version
stringclasses 21
values | code
stringlengths 225
174k
| apis
sequence | full_version
stringlengths 1
6
| repo_name
stringlengths 10
107
| hexsha
stringlengths 40
40
|
---|---|---|---|---|---|
1.10 | import os
import logging
import copy
from tqdm import trange
from datetime import datetime
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
from torchvision.utils import save_image
from utils import ema
from lib.dataset import DataLooper
from lib.sde import VPSDE
from lib.model.ddpm import DDPM
from lib.trainer import DiffusionTrainer
from lib.sampler import DiffusionSampler
def train(config, logdir, resume=True):
"""Running a training pipeline"""
# Dataset setup
datalooper = DataLooper(
config,
batch_size=config.train.batch_size,
)
# Model setup
if config.model.name.lower() == 'ddpm':
net_model = DDPM(
config.dataset.ch,
config.model.ch,
config.model.ch_mult,
config.model.attn,
config.model.num_res_blocks,
config.model.dropout,
)
else:
raise ValueError
ema_model = copy.deepcopy(net_model)
if config.parallel:
net_model = torch.nn.DataParallel(net_model)
ema_model = torch.nn.DataParallel(ema_model)
# SDE setup
if config.sde.name == 'VPSDE':
sde = VPSDE(
config.sde.beta_min,
config.sde.beta_max,
config.sde.N,
)
else:
raise ValueError
# Trainer setup
trainer = DiffusionTrainer(
sde,
net_model,
config.model.pred_type,
).to(config.device)
trainer.train()
# Optimizer setup
optim = torch.optim.Adam(
net_model.parameters(),
lr=config.train.lr,
)
warmup = config.train.warmup
sched = torch.optim.lr_scheduler.LambdaLR(
optim,
lr_lambda=lambda step: min(step, warmup) / warmup,
)
# Sampler setup
sampler = DiffusionSampler(
sde,
ema_model,
config.model.pred_type,
).to(config.device)
sampler.eval()
# Log setup
sample_dir = os.path.join(logdir, 'samples')
os.makedirs(sample_dir, exist_ok=True)
writer = SummaryWriter(logdir)
# Show model size
model_size = sum(p.numel() for p in net_model.parameters())
logging.info(f'Model Params : {model_size / 1024 / 1024:.2f}M')
# Load checkpoint (if exists)
try:
assert resume
ckpt = torch.load(os.path.join(logdir, f'ckpt_latest.pt'))
net_model.load_state_dict(ckpt['net_model'])
ema_model.load_state_dict(ckpt['ema_model'])
optim.load_state_dict(ckpt['optimizer'])
sched.load_state_dict(ckpt['scheduler'])
init_step = ckpt['step'] + 1
logging.info(f'Checkpoint loaded! Re-start from step {init_step}.')
except:
init_step = 0
logging.info(f'No checkpoint found. Start from step {init_step}.')
# Start training
with trange(init_step, config.train.total_steps, dynamic_ncols=True) as pbar:
for step in pbar:
# Train
optim.zero_grad()
x_0 = next(datalooper)
x_0 = x_0.to(config.device)
loss = trainer(x_0)
loss = loss.mean()
loss.backward()
torch.nn.utils.clip_grad_norm_(
net_model.parameters(),
config.train.grad_clip,
)
optim.step()
sched.step()
ema(net_model, ema_model, config.train.ema_decay)
# Log
writer.add_scalar('loss', loss, step)
pbar.set_postfix(loss=f'{loss:.3f}')
# Sample
if config.train.sample_step > 0 and step % config.train.sample_step == 0:
xs = []
total_steps = config.eval.sample_size // config.eval.batch_size
for i in range(0, config.eval.sample_size, config.eval.batch_size):
x_T = torch.randn_like(x_0)
with torch.no_grad():
x = sampler(
x_T,
pbar,
corrector_n_steps=1,
corrector_langevin_snr=0.16,
)
xs.append((x.detach().cpu() + 1.) / 2)
pbar.set_postfix(option=f'({i+1}/{total_steps})')
xs = torch.cat(xs, dim=0)
save_image(
xs[:64],
os.path.join(sample_dir, f'sample_{step}.png'),
nrow=8,
)
# Save
if config.train.save_step > 0 and step % config.train.save_step == 0:
ckpt = {
'net_model': net_model.state_dict(),
'ema_model': ema_model.state_dict(),
'optimizer': optim.state_dict(),
'scheduler': sched.state_dict(),
'step': step,
}
torch.save(ckpt, os.path.join(logdir, f'ckpt_latest.pt'))
# Archive
if config.train.archive_step > 0 and step % config.train.archive_step == 0:
ckpt = {
'net_model': net_model.state_dict(),
'ema_model': ema_model.state_dict(),
'optimizer': optim.state_dict(),
'scheduler': sched.state_dict(),
'step': step,
}
torch.save(ckpt, os.path.join(logdir, f'ckpt_{step}.pt'))
writer.close()
def eval(config, logdir):
"""Running an evaluation pipeline"""
# Datalooper setup
eval_datalooper = DataLooper(
config,
batch_size=config.eval.batch_size,
)
sample_size = config.eval.sample_size
batch_size = config.eval.batch_size
# Model setup
if config.model.name.lower() == 'ddpm':
model = DDPM(
config.dataset.ch,
config.model.ch,
config.model.ch_mult,
config.model.attn,
config.model.num_res_blocks,
config.model.dropout,
)
else:
raise ValueError
if config.parallel:
model = torch.nn.DataParallel(model)
# SDE setup
if config.sde.name == 'VPSDE':
sde = VPSDE(
config.sde.beta_min,
config.sde.beta_max,
config.sde.N,
)
else:
raise ValueError
# Sampler setup
sampler = DiffusionSampler(
sde,
model,
config.model.pred_type,
).to(config.device)
sampler.eval()
# Show model size
model_size = sum(p.numel() for p in model.parameters())
logging.info(f'Model Params : {model_size / 1024 / 1024:.2f}M')
# Load checkpoint
ckpt = torch.load(
os.path.join(logdir, f'ckpt_latest.pt'),
map_location=config.device
)
logging.info(f'Checkpoint step : {ckpt["step"]}')
model.load_state_dict(ckpt['ema_model'])
# Directory setup
eval_dir = os.path.join(logdir, 'eval')
sample_dir = os.path.join(eval_dir, 'samples')
os.makedirs(eval_dir, exist_ok=True)
os.makedirs(sample_dir, exist_ok=True)
xs = []
x_0 = next(eval_datalooper).to(config.device)
with trange(0, sample_size, batch_size, dynamic_ncols=True) as pbar:
for _ in pbar:
x_T = torch.randn_like(x_0)
with torch.no_grad():
x = sampler(
x_T,
pbar,
corrector_n_steps=3,
corrector_langevin_snr=0.16,
)
xs.append((x.detach().cpu() + 1.) / 2)
xs = torch.cat(xs, dim=0)
now = datetime.now()
save_image(
xs[:64],
os.path.join(sample_dir, f'samples_{now}.png'),
nrow=8,
) | [
"torch.cat",
"torch.no_grad",
"torch.randn_like",
"torch.utils.tensorboard.SummaryWriter",
"torch.nn.DataParallel"
] | 1.10.0 | NCIA-Diffusion/ScoreSDE | b5a562908daf66e6dcf0b791beb83f1fcb61174b |
1.0 | import logging
import os
import warnings
from abc import ABC, abstractmethod
from collections import defaultdict
from os.path import join
from typing import Iterable, List, Optional, Tuple, Union
import torch
from torch import nn
from .composition import AdapterCompositionBlock, Fuse, Stack, parse_composition
from .configuration import AdapterConfig, AdapterConfigBase, AdapterFusionConfig, get_adapter_config_hash
from .context import AdapterSetup, ForwardContext
from .hub_mixin import PushAdapterToHubMixin
from .layer import AdapterLayer, AdapterLayerBase
from .loading import AdapterFusionLoader, AdapterLoader, PredictionHeadLoader, WeightsLoader
from .modeling import Adapter, GLOWCouplingBlock, NICECouplingBlock
from .prefix_tuning import PrefixTuningPool, PrefixTuningShim
from .utils import EMBEDDING_FILE, TOKENIZER_PATH, inherit_doc
from .wrappers.configuration import wrap_config
logger = logging.getLogger(__name__)
class InvertibleAdaptersMixin:
"""Mixin for Transformer models adding invertible adapters."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.invertible_adapters = nn.ModuleDict(dict())
# Make sure config is wrapped
self.config = wrap_config(self.config)
def add_invertible_adapter(self, adapter_name: str):
"""
Adds an invertible adapter module for the adapter with the given name. If the given adapter does not specify an
invertible adapter config, this method does nothing.
Args:
adapter_name (str): The name of the adapter for which to add an invertible adapter module.
"""
if adapter_name in self.invertible_adapters:
raise ValueError(f"Model already contains an adapter module for '{adapter_name}'.")
adapter_config = self.config.adapters.match(
adapter_name,
config_type=AdapterConfig,
location_key="inv_adapter",
)
if adapter_config and adapter_config["inv_adapter"]:
if adapter_config["inv_adapter"] == "nice":
inv_adap = NICECouplingBlock(
[[self.config.hidden_size]],
non_linearity=adapter_config["non_linearity"],
reduction_factor=adapter_config["inv_adapter_reduction_factor"],
)
elif adapter_config["inv_adapter"] == "glow":
inv_adap = GLOWCouplingBlock(
[[self.config.hidden_size]],
non_linearity=adapter_config["non_linearity"],
reduction_factor=adapter_config["inv_adapter_reduction_factor"],
)
else:
raise ValueError(f"Invalid invertible adapter type '{adapter_config['inv_adapter']}'.")
self.invertible_adapters[adapter_name] = inv_adap
self.invertible_adapters[adapter_name].apply(Adapter.init_bert_weights)
def delete_invertible_adapter(self, adapter_name: str):
if adapter_name in self.invertible_adapters:
del self.invertible_adapters[adapter_name]
def get_invertible_adapter(self):
# TODO: Currently no fusion over invertible adapters, takes only very first language adapter position
if self.config.adapters.active_setup is not None and len(self.config.adapters.active_setup) > 0:
first_adapter = self.config.adapters.active_setup.first()
if first_adapter in self.invertible_adapters:
return self.invertible_adapters[first_adapter]
return None
def enable_invertible_adapters(self, adapter_names):
for adapter_name in adapter_names:
if adapter_name in self.invertible_adapters:
for param in self.invertible_adapters[adapter_name].parameters():
param.requires_grad = True
def invertible_adapters_forward(self, hidden_states, rev=False):
# TODO: Currently no fusion over invertible adapters, takes only very first language adapter position
if self.config.adapters.active_setup is not None and len(self.config.adapters.active_setup) > 0:
first_adapter = self.config.adapters.active_setup.first()
if first_adapter in self.invertible_adapters:
hidden_states = self.invertible_adapters[first_adapter](hidden_states, rev=rev)
return hidden_states
class ModelAdaptersMixin(PushAdapterToHubMixin, ABC):
"""Mixin for transformer models adding support for loading/ saving adapters."""
def __init__(self, config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
if config.name_or_path and not os.path.exists(config.name_or_path):
self.model_name = config.name_or_path
else:
self.model_name = None
self.loaded_embeddings = {}
self.shared_parameters = nn.ModuleDict()
self._active_embedding = "default"
# Make sure config is wrapped
self.config = wrap_config(self.config)
def _link_prefix_to_pool(self, layer):
if isinstance(layer, PrefixTuningShim):
layer.set_pool(self.base_model.prefix_tuning)
def _init_adapter_modules(self, add_prefix_tuning_pool=True):
"""
This method initializes adapter modules and fusion modules from the model config.
"""
# Link all prefix tunings
if add_prefix_tuning_pool:
self.base_model.prefix_tuning = PrefixTuningPool(self.config)
self.apply_to_adapter_layers(lambda i, layer: self._link_prefix_to_pool(layer))
# Initialize adapters from config
for adapter_name in self.config.adapters:
self.apply_to_adapter_layers(lambda i, layer: layer.add_adapter(adapter_name, i))
# Initialize fusion from config
for fusion_name in self.config.adapters.fusions:
self.apply_to_adapter_layers(lambda i, layer: layer.add_fusion_layer(fusion_name))
self.loaded_embeddings["default"] = self.get_input_embeddings()
# These methods have to be implemented by every deriving class:
@abstractmethod
def iter_layers(self) -> Iterable[Tuple[int, nn.Module]]:
"""
Iterates over all layers of the model.
This abstract method has to ne implemented by every implementing model.
"""
pass
def apply_to_adapter_layers(self, fn):
"""
Applies a function to all adapter layers of the model.
"""
for i, layer in self.iter_layers():
for module in layer.modules():
if isinstance(module, AdapterLayerBase):
fn(i, module)
def train_adapter(self, adapter_setup: Union[list, AdapterCompositionBlock], train_embeddings=False):
"""Sets the model into mode for training the given adapters."""
self.train()
self.freeze_model(True)
adapter_setup = parse_composition(adapter_setup)
self.apply_to_adapter_layers(lambda i, layer: layer.enable_adapters(adapter_setup, True, False))
for adapter_name in adapter_setup:
if adapter_name in self.shared_parameters:
for param in self.shared_parameters[adapter_name].values():
param.requires_grad = True
if isinstance(self, InvertibleAdaptersMixin):
self.enable_invertible_adapters(adapter_setup.flatten())
# use the adapters to be trained by default in every forward pass
self.set_active_adapters(adapter_setup)
if train_embeddings:
self.get_input_embeddings().train()
def train_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):
"""Sets the model into mode for training of adapter fusion determined by a list of adapter names."""
warnings.warn(
"add_fusion() has been deprecated in favor of add_adapter_fusion(). Please use the newer method instead.",
FutureWarning,
)
self.train_adapter_fusion(adapter_setup, unfreeze_adapters=unfreeze_adapters)
def train_adapter_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):
"""Sets the model into mode for training of adapter fusion determined by a list of adapter names."""
self.train()
self.freeze_model(True)
adapter_setup = parse_composition(adapter_setup)
self.apply_to_adapter_layers(lambda i, layer: layer.enable_adapters(adapter_setup, unfreeze_adapters, True))
# use the adapters to be trained by default in every forward pass
self.set_active_adapters(adapter_setup)
# TODO implement fusion for invertible adapters
def has_adapters(self):
if not getattr(self.config, "is_adaptable", None):
return False
return len(self.config.adapters.adapters) > 0
@property
def has_parallel_adapters(self) -> bool:
if self.config.adapters.active_setup:
return self.config.adapters.active_setup.parallel_channels > 1
else:
return False
@property
def active_adapters(self) -> AdapterCompositionBlock:
return self.config.adapters.active_setup
@active_adapters.setter
def active_adapters(self, adapter_setup: Union[list, AdapterCompositionBlock]):
self.set_active_adapters(adapter_setup)
def set_shared_parameters(self, param):
self.shared_parameters = param
def set_active_adapters(
self, adapter_setup: Union[list, AdapterCompositionBlock], skip_layers: Optional[List[int]] = None
):
"""
Sets the adapter modules to be used by default in every forward pass. If no adapter with the given name is
found, no module of the respective type will be activated.
Args:
adapter_setup (list):
The list of adapters to be activated by default. Can be a fusion or stacking configuration.
"""
adapter_setup = parse_composition(adapter_setup, model_type=self.config.model_type)
if adapter_setup:
for adapter_name in adapter_setup.flatten():
if adapter_name not in self.config.adapters.adapters:
raise ValueError(
f"No adapter with name '{adapter_name}' found. Please make sure that all specified adapters are correctly loaded."
)
self.config.adapters.active_setup = adapter_setup
self.config.adapters.skip_layers = skip_layers
def add_adapter(self, adapter_name: str, config=None, overwrite_ok: bool = False, set_active: bool = False):
"""
Adds a new adapter module of the specified type to the model.
Args:
adapter_name (str): The name of the adapter module to be added. config (str or dict or AdapterConfigBase,
optional): The adapter configuration, can be either:
- the string identifier of a pre-defined configuration dictionary
- a configuration dictionary specifying the full config
- if not given, the default configuration for this adapter type will be used
overwrite_ok (bool, optional): Overwrite an adapter with the same name if it exists. By default (False), an
exception is thrown. set_active (bool, optional): Set the adapter to be the active one. By default (False),
the adapter is added but not activated.
"""
if isinstance(config, dict):
config = AdapterConfigBase.load(config) # ensure config is ok and up-to-date
# In case adapter already exists and we allow overwriting, explicitly delete the existing one first
if overwrite_ok and adapter_name in self.config.adapters:
self.delete_adapter(adapter_name)
self.config.adapters.add(adapter_name, config=config)
try:
self.apply_to_adapter_layers(lambda i, layer: layer.add_adapter(adapter_name, i))
# PHM Layer
if self.config.adapters.match(adapter_name, AdapterConfig, location_key="phm_layer"):
self._add_shared_parameters(adapter_name, config)
# Prefix Tuning
for module in self.modules():
if isinstance(module, PrefixTuningPool):
module.confirm_prefix(adapter_name)
if isinstance(self, InvertibleAdaptersMixin):
self.add_invertible_adapter(adapter_name)
except ValueError as ex:
self.delete_adapter(adapter_name)
raise ex
if set_active:
self.set_active_adapters(adapter_name)
def _add_shared_parameters(self, adapter_name, adapter_config: AdapterConfig):
self.shared_parameters[adapter_name] = (
list(self.get_adapter(adapter_name)[0].values())[0].adapter_down[0].init_shared_parameters()
)
def add_fusion(self, adapter_names: Union[Fuse, list], adapter_fusion_config=None, override_kwargs=None):
warnings.warn(
"add_fusion() has been deprecated in favor of add_adapter_fusion(). Please use the newer method instead.",
FutureWarning,
)
adapter_fusion_config = AdapterFusionConfig.from_dict(adapter_fusion_config).replace(**override_kwargs)
self.add_adapter_fusion(adapter_names, adapter_fusion_config)
def add_adapter_fusion(
self,
adapter_names: Union[Fuse, list, str],
config=None,
overwrite_ok: bool = False,
set_active: bool = False,
):
"""
Adds AdapterFusion to the model with alll the necessary configurations and weight initializations
Args:
adapter_names (Fuse or list or str): AdapterFusion layer to add. Can be either:
- a ``Fuse`` composition block
- a list of adapter names to fuse
- a comma-separated string of adapter names to fuse
config (str or dict): adapter fusion configuration, can be either:
- a string identifying a pre-defined adapter fusion configuration
- a dictionary representing the adapter fusion configuration
- the path to a file containing the adapter fusion configuration
overwrite_ok (bool, optional):
Overwrite an AdapterFusion layer with the same name if it exists. By default (False), an exception is
thrown.
set_active (bool, optional):
Activate the added AdapterFusion. By default (False), the AdapterFusion is added but not activated.
"""
if isinstance(adapter_names, Fuse):
adapter_names = adapter_names.children
elif isinstance(adapter_names, str):
adapter_names = adapter_names.split(",")
if isinstance(config, dict):
config = AdapterFusionConfig.from_dict(config) # ensure config is ok and up-to-date
# In case adapter already exists and we allow overwriting, explicitly delete the existing one first
if overwrite_ok and self.config.adapters.get_fusion(adapter_names) is not None:
self.delete_adapter_fusion(adapter_names)
self.config.adapters.add_fusion(adapter_names, config=config)
self.apply_to_adapter_layers(lambda i, layer: layer.add_fusion_layer(adapter_names))
if set_active:
if not isinstance(adapter_names, list):
adapter_names = adapter_names.split(",")
self.set_active_adapters(Fuse(*adapter_names))
def delete_adapter(self, adapter_name: str):
"""
Deletes the adapter with the specified name from the model.
Args:
adapter_name (str): The name of the adapter.
"""
if adapter_name not in self.config.adapters:
logger.info("No adapter '%s' found for deletion. Skipping.", adapter_name)
return
del self.config.adapters.adapters[adapter_name]
self.apply_to_adapter_layers(lambda i, layer: layer.delete_adapter(adapter_name))
if isinstance(self, InvertibleAdaptersMixin):
self.delete_invertible_adapter(adapter_name)
# Reset active adapters if this was the only active adapter
if self.active_adapters == Stack(adapter_name):
self.active_adapters = None
def delete_adapter_fusion(self, adapter_names: Union[Fuse, list, str]):
"""
Deletes the AdapterFusion layer of the specified adapters.
Args:
adapter_names (Union[Fuse, list, str]): AdapterFusion layer to delete.
"""
if isinstance(adapter_names, Fuse):
adapter_fusion_name = ",".join(adapter_names.children)
elif isinstance(adapter_names, list):
adapter_fusion_name = ",".join(adapter_names)
elif isinstance(adapter_names, str):
adapter_fusion_name = adapter_names
else:
raise ValueError("Invalid AdapterFusion definition: {}".format(adapter_names))
if adapter_fusion_name not in self.config.adapters.fusions:
logger.info("No AdapterFusion '%s' found for deletion. Skipping.", adapter_fusion_name)
return
del self.config.adapters.fusions[adapter_fusion_name]
self.apply_to_adapter_layers(lambda i, layer: layer.delete_fusion_layer(adapter_fusion_name))
# Reset active adapters if this was the active setup
if self.active_adapters == adapter_names:
self.active_adapters = None
def save_adapter(
self,
save_directory: str,
adapter_name: str,
meta_dict: dict = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
):
"""
Saves an adapter and its configuration file to a directory so that it can be shared or reloaded using
`load_adapter()`.
Args:
save_directory (str): Path to a directory where the adapter should be saved.
adapter_name (str): Name of the adapter to be saved.
Raises:
ValueError: If the given adapter name is invalid.
"""
loader = AdapterLoader(self)
loader.save(save_directory, adapter_name, meta_dict)
# save additional custom weights
if custom_weights_loaders:
for weights_loader in custom_weights_loaders:
weights_loader.save(save_directory, adapter_name)
def save_adapter_fusion(
self,
save_directory: str,
adapter_names: Union[Fuse, list, str],
meta_dict: dict = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
):
"""
Saves an AdapterFusion layer and its configuration file to a directory so that it can be shared or reloaded
using `load_adapter_fusion()`.
Args:
save_directory (str): Path to a directory where the AdapterFusion should be saved.
adapter_names (Union[Fuse, list, str]): AdapterFusion to be saved.
Raises:
ValueError: If the given AdapterFusion name is invalid.
"""
if isinstance(adapter_names, Fuse):
adapter_fusion_name = ",".join(adapter_names.children)
elif isinstance(adapter_names, list):
adapter_fusion_name = ",".join(adapter_names)
elif isinstance(adapter_names, str):
adapter_fusion_name = adapter_names
else:
raise ValueError("Invalid AdapterFusion definition: {}".format(adapter_names))
loader = AdapterFusionLoader(self)
loader.save(save_directory, adapter_fusion_name, meta_dict)
# save additional custom weights
if custom_weights_loaders:
for weights_loader in custom_weights_loaders:
weights_loader.save(save_directory, adapter_fusion_name)
def load_adapter(
self,
adapter_name_or_path: str,
config: Union[dict, str] = None,
version: str = None,
model_name: str = None,
load_as: str = None,
source: str = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
leave_out: Optional[List[int]] = None,
id2label=None,
set_active: bool = False,
**kwargs
) -> str:
"""
Loads a pre-trained pytorch adapter module from the local file system or a remote location.
Args:
adapter_name_or_path (str): can be either:
- the identifier of a pre-trained task adapter to be loaded from Adapter Hub
- a path to a directory containing adapter weights saved using `model.saved_adapter()`
- a URL pointing to a zip folder containing a saved adapter module
config (dict or str, optional): The requested configuration of the adapter.
If not specified, will be either: - the default adapter config for the requested adapter if specified -
the global default adapter config
version (str, optional): The version of the adapter to be loaded.
model_name (str, optional): The string identifier of the pre-trained model.
load_as (str, optional): Load the adapter using this name. By default, the name with which the adapter was
saved will be used.
source (str, optional): Identifier of the source(s) from where to load the adapter. Can be:
- "ah" (default): search on AdapterHub.
- "hf": search on HuggingFace model hub.
- None: search on all sources
leave_out: Dynamically drop adapter modules in the specified Transformer layers when loading the adapter.
set_active (bool, optional):
Set the loaded adapter to be the active one. By default (False), the adapter is loaded but not
activated.
Returns:
str: The name with which the adapter was added to the model.
"""
loader = AdapterLoader(self)
load_dir, load_name = loader.load(
adapter_name_or_path,
config,
version,
model_name,
load_as,
source=source,
leave_out=leave_out,
set_active=set_active,
**kwargs,
)
# load additional custom weights
if custom_weights_loaders:
for weights_loader in custom_weights_loaders:
weights_loader.load(
load_dir,
load_as=load_as,
loading_info=kwargs.get("loading_info", None),
main_load_name=load_name,
id2label=id2label,
set_active=set_active,
)
return load_name
def load_adapter_fusion(
self,
adapter_fusion_name_or_path: str,
load_as: str = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
set_active: bool = False,
**kwargs
) -> str:
"""
Loads a pre-trained AdapterFusion layer from the local file system.
Args:
adapter_fusion_name_or_path (str):
a path to a directory containing AdapterFusion weights saved using `model.save_adapter_fusion()`.
load_as (str, optional): Load the AdapterFusion using this name.
By default, the name with which the AdapterFusion layer was saved will be used.
set_active (bool, optional):
Activate the loaded AdapterFusion. By default (False), the AdapterFusion is loaded but not activated.
Returns:
str: The name with which the AdapterFusion was added to the model.
"""
loader = AdapterFusionLoader(self)
load_dir, load_name = loader.load(adapter_fusion_name_or_path, load_as, set_active=set_active)
# load additional custom weights
if custom_weights_loaders:
for weights_loader in custom_weights_loaders:
weights_loader.load(
load_dir,
load_as=load_as,
loading_info=kwargs.get("loading_info", None),
main_load_name=load_name,
set_active=set_active,
)
return load_name
def save_all_adapters(
self,
save_directory: str,
meta_dict: dict = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
):
"""
Saves all adapters of this model together with their configuration to subfolders of the given location.
Args:
save_directory (str): Path to a directory where the adapters should be saved.
"""
for name in self.config.adapters:
adapter_config = self.config.adapters.get(name)
h = get_adapter_config_hash(adapter_config)
save_path = join(save_directory, name)
if meta_dict:
meta_dict.update({"config_id": h})
else:
meta_dict = {"config_id": h}
self.save_adapter(save_path, name, meta_dict=meta_dict, custom_weights_loaders=custom_weights_loaders)
def save_all_adapter_fusions(
self,
save_directory: str,
meta_dict: dict = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
):
"""
Saves all AdapterFusion layers of this model together with their configuration to subfolders of the given
location.
Args:
save_directory (str): Path to a directory where the AdapterFusion layers should be saved.
"""
for name in self.config.adapters.fusions:
adapter_fusion_config = self.config.adapters.get_fusion(name)
h = get_adapter_config_hash(adapter_fusion_config)
save_path = join(save_directory, name)
if meta_dict:
meta_dict.update({"config_id": h})
else:
meta_dict = {"config_id": h}
self.save_adapter_fusion(
save_path, name, meta_dict=meta_dict, custom_weights_loaders=custom_weights_loaders
)
def freeze_model(self, freeze=True):
"""Freezes all weights of the model."""
# first freeze/ unfreeze all model weights
for param in self.base_model.parameters():
param.requires_grad = not freeze
self.model_frozen = freeze
def forward_context(self, context: ForwardContext, *args, **kwargs):
"""
This method is called by the ``ForwardContext`` at the beginning of the forward pass.
"""
# some warnings if we don't use available adapters
active_adapters = getattr(self, "active_adapters", None) or AdapterSetup.get_context()
if not active_adapters:
if self.has_adapters():
logger.warning("There are adapters available but none are activated for the forward pass.")
return
context.adapters_parallelized = False
# Add the shared parameters for the active adapters to the context
context.shared_parameters = {
name: param for name, param in self.shared_parameters.items() if name in active_adapters.flatten()
}
# Prefix tuning
input_tensor = kwargs.get("input_ids", None)
if input_tensor is None:
input_tensor = kwargs.get("decoder_input_ids", None)
if input_tensor is None:
input_tensor = kwargs.get("attention_mask", None)
if input_tensor is None:
input_tensor = args[0]
context.prefix_states = self.base_model.prefix_tuning(input_tensor.shape[0])
def load_embeddings(self, path: str, name: str):
"""
Load a saved embedding from the given path. If the embedding was saved with a tokenizer it is returned
Args:
path: the path to the saved embedding
name: the name the embedding should be loaded as
Returns: a tokenizer if it ws saved with the embedding otherwise None
"""
from ..models.auto.tokenization_auto import AutoTokenizer
if name in self.loaded_embeddings:
raise ValueError("An embedding with the name {} already exists".format(name))
tokenizer = None
tokenizer_path = os.path.join(path, TOKENIZER_PATH)
if os.path.isdir(tokenizer_path):
tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)
embedding_path = os.path.join(path, EMBEDDING_FILE)
if not os.path.isfile(embedding_path):
raise FileNotFoundError("No embeddings found at {}".format(embedding_path))
weights = torch.load(embedding_path)
self.loaded_embeddings[name] = nn.Embedding.from_pretrained(weights)
self.set_active_embeddings(name)
return tokenizer
def add_embeddings(self, name, tokenizer, reference_embedding=None, reference_tokenizer=None, embedding_dim=None):
"""
Add a new embedding to the model. If a reference embedding and reference tokenizer are provided tokens in the
present in both tokenizers are initialized to the embedding in the reference_embedding.
Args:
name: the name of the embedding
tokenizer: the tokenizer determining the vocab of the embedding
reference_embedding:
the reference embedding to use for initializing the embeddings of tokens present in the newly created
embedding
reference_tokenizer: the tokenizer providing the vocab for the reference embedding
embedding_dim: the dimension of the embeddings (if None the hidden_size from the config is used)
"""
if name in self.loaded_embeddings:
raise ValueError("An embedding with the name {} already exists".format(name))
if embedding_dim is None:
embedding_dim = self.config.hidden_size
embedding = nn.Embedding(tokenizer.vocab_size, embedding_dim)
embedding.requires_grad_(False)
if (reference_embedding is not None and reference_tokenizer is None) or (
reference_tokenizer is not None and reference_embedding is None
):
raise KeyError(
"Reference embedding and reference tokenizer are required to use initialize embeddings from reference embedding"
)
if reference_embedding is not None and reference_tokenizer is not None:
tokens = set(tokenizer.get_vocab().keys()) & set(reference_tokenizer.get_vocab().keys())
reference_vocab = reference_tokenizer.get_vocab()
vocab = tokenizer.get_vocab()
for t in tokens:
idx_reference = reference_vocab[t]
idx = vocab[t]
embedding.weight[idx] = self.loaded_embeddings[reference_embedding].weight[idx_reference].clone()
embedding.train(False)
self.loaded_embeddings[name] = embedding
self.set_active_embeddings(name)
def delete_embeddings(self, name):
"""
Deletes the embedding with the given name
Args:
name: The name of the embedding that should be deleted
"""
if name not in self.loaded_embeddings:
raise ValueError("No embedding with name {}".format(name))
if self.active_embeddings == name:
logger.warning("The active embedding is deleted. Setting the default embedding as active.")
self.set_active_embeddings("default")
del self.loaded_embeddings[name]
def save_embeddings(self, path, name, tokenizer=None):
"""
Saves the embedding with the given name. If a tokenizer is passed as well the tokenizer is saved together with
the embedding.
Args:
path: The path where the embedding should be saved
name: The name of the embedding that should be saved
tokenizer: optionally a tokenizer to save with the embedding (default is None)
"""
if self.active_embeddings == name:
self.loaded_embeddings[name] = self.get_input_embeddings()
os.makedirs(path, exist_ok=True)
embedding_path = os.path.join(path, EMBEDDING_FILE)
torch.save(self.loaded_embeddings[name].weight, embedding_path)
if tokenizer:
tokenizer_path = os.path.join(path, TOKENIZER_PATH)
tokenizer.save_pretrained(tokenizer_path)
def set_active_embeddings(self, name):
"""
Sets the active embedding for the forward pass of the model
Args:
name: The name of the embedding that should be used
"""
self.loaded_embeddings[self.active_embeddings] = self.get_input_embeddings()
self.set_input_embeddings(self.loaded_embeddings[name])
self._active_embedding = name
@property
def active_embeddings(self):
return self._active_embedding
def get_fusion_regularization_loss(self):
reg_loss = 0.0
target = torch.zeros((self.config.hidden_size, self.config.hidden_size)).fill_diagonal_(1.0).to(self.device)
for i, layer in self.iter_layers():
for module in layer.modules():
if isinstance(module, AdapterLayer):
for _, layer_fusion in module.adapter_fusion_layer.items():
if hasattr(layer_fusion, "value"):
reg_loss += 0.01 * (target - layer_fusion.value.weight).pow(2).sum()
return reg_loss
def get_adapter(self, name) -> dict:
"""
Returns a dictionary with all weights of the adapter with the specified name.
Args:
name (str): The adapter name.
Returns:
dict: A nested dictionary containing the weights of the adapter. The dictionary is structured as follow:
{<layer id>: {<module location>: <nn.Module>}}.
"""
destination = defaultdict(dict)
# use a custom index to ensure numbering is from 0 to N layers
for i, (_, layer) in enumerate(self.iter_layers()):
for module in layer.modules():
if isinstance(module, AdapterLayerBase):
adapter_module = module.get_adapter(name)
if adapter_module is not None:
destination[i][module.location_key] = adapter_module
return dict(destination)
def eject_prefix_tuning(self, name: str):
"""
Converts the prefix tuning with the given name from the reparameterized form into the flat form.
Args:
name (str): The name of the prefix tuning.
"""
for module in self.modules():
if isinstance(module, PrefixTuningPool):
if name in module.prefix_tunings:
module.prefix_tunings[name].eject()
@inherit_doc
class ModelWithHeadsAdaptersMixin(ModelAdaptersMixin):
"""
Mixin adding support for loading/ saving adapters to transformer models with head(s).
"""
def __init__(self, config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
self._convert_to_flex_head = False
def set_shared_parameters(self, param):
self.shared_parameters = param
if self.base_model is not self:
self.base_model.shared_parameters = self.shared_parameters
def iter_layers(self) -> Iterable[Tuple[int, nn.Module]]:
"""
Iterates over all layers of the model.
"""
if self.base_model is self:
return super().iter_layers()
else:
return self.base_model.iter_layers()
def add_adapter(self, adapter_name: str, config=None, overwrite_ok: bool = False, set_active: bool = False):
"""
Adds a new adapter module of the specified type to the model.
Args:
adapter_name (str): The name of the adapter module to be added.
config (str or dict, optional): The adapter configuration, can be either:
- the string identifier of a pre-defined configuration dictionary
- a configuration dictionary specifying the full config
- if not given, the default configuration for this adapter type will be used
overwrite_ok (bool, optional):
Overwrite an adapter with the same name if it exists. By default (False), an exception is thrown.
set_active (bool, optional):
Set the adapter to be the active one. By default (False), the adapter is added but not activated.
If self.base_model is self, must inherit from a class that implements this method, to preclude infinite
recursion
"""
if self.base_model is self:
super().add_adapter(adapter_name, config, overwrite_ok=overwrite_ok, set_active=set_active)
else:
self.base_model.add_adapter(adapter_name, config, overwrite_ok=overwrite_ok, set_active=set_active)
def train_adapter(self, adapter_setup: Union[list, AdapterCompositionBlock], train_embeddings=False):
"""
Sets the model into mode for training the given adapters. If self.base_model is self, must inherit from a class
that implements this method, to preclude infinite recursion
"""
if self.base_model is self:
super().train_adapter(adapter_setup, train_embeddings)
else:
self.base_model.train_adapter(adapter_setup, train_embeddings)
def train_adapter_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):
"""
Sets the model into mode for training of adapter fusion determined by a list of adapter names. If
self.base_model is self, must inherit from a class that implements this method, to preclude infinite recursion
"""
if self.base_model is self:
super().train_adapter_fusion(adapter_setup, unfreeze_adapters=unfreeze_adapters)
else:
self.base_model.train_adapter_fusion(adapter_setup, unfreeze_adapters=unfreeze_adapters)
def save_head(self, save_directory: str, head_name: str = None):
loader = PredictionHeadLoader(self)
loader.save(save_directory, name=head_name)
def load_head(self, save_directory, load_as=None, id2label=None, **kwargs):
loader = PredictionHeadLoader(self, convert_to_flex_head=self._convert_to_flex_head)
return loader.load(save_directory, load_as=load_as, id2label=id2label, **kwargs)
def save_adapter(
self,
save_directory: str,
adapter_name: str,
with_head: bool = True,
meta_dict: dict = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
):
if with_head:
if custom_weights_loaders is None:
custom_weights_loaders = []
custom_weights_loaders.append(PredictionHeadLoader(self, error_on_missing=False))
super().save_adapter(
save_directory,
adapter_name,
meta_dict=meta_dict,
custom_weights_loaders=custom_weights_loaders,
)
def load_adapter(
self,
adapter_name_or_path: str,
config: Union[dict, str] = None,
version: str = None,
model_name: str = None,
load_as: str = None,
source: str = None,
with_head: bool = True,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
leave_out: Optional[List[int]] = None,
id2label=None,
set_active: bool = False,
**kwargs
) -> str:
if with_head:
if custom_weights_loaders is None:
custom_weights_loaders = []
custom_weights_loaders.append(
PredictionHeadLoader(
self,
error_on_missing=False,
convert_to_flex_head=self._convert_to_flex_head,
)
)
# Support passing a num_labels for compatibility reasons. Convert to label map here.
num_labels = kwargs.pop("num_labels", None)
if num_labels is not None:
id2label = {i: "LABEL_" + str(i) for i in range(num_labels)}
return super().load_adapter(
adapter_name_or_path,
config=config,
version=version,
model_name=model_name,
load_as=load_as,
source=source,
custom_weights_loaders=custom_weights_loaders,
leave_out=leave_out,
id2label=id2label,
set_active=set_active,
**kwargs,
)
def save_all_adapters(
self,
save_directory: str,
with_head: bool = True,
meta_dict: dict = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
):
if with_head:
if custom_weights_loaders is None:
custom_weights_loaders = []
custom_weights_loaders.append(PredictionHeadLoader(self, error_on_missing=False))
super().save_all_adapters(
save_directory,
meta_dict=meta_dict,
custom_weights_loaders=custom_weights_loaders,
)
def save_adapter_fusion(
self,
save_directory: str,
adapter_names: Union[Fuse, list, str],
meta_dict: dict = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
with_head: Union[bool, str] = False,
):
"""
Saves an AdapterFusion layer and its configuration file to a directory so that it can be shared or reloaded
using `load_adapter_fusion()`.
Args:
save_directory (str): Path to a directory where the AdapterFusion should be saved.
adapter_names (Union[Fuse, list, str]): AdapterFusion to be saved.
with_head (Union[bool, str]):
If True, will save a head with the same name as the AdapterFusionLayer. If a string, this will be used
as the name of the head to be saved.
Raises:
ValueError: If the given AdapterFusion name is invalid.
"""
super().save_adapter_fusion(save_directory, adapter_names, meta_dict, custom_weights_loaders)
if with_head:
# Make sure to cover the different options for adapter_names
if isinstance(with_head, str):
head_name = with_head
elif isinstance(adapter_names, Fuse):
head_name = adapter_names.name
elif isinstance(adapter_names, list):
head_name = ",".join(adapter_names)
else:
head_name = adapter_names
if head_name not in self.heads:
raise ValueError("No head with name {} found".format(head_name))
loader = PredictionHeadLoader(self)
loader.save(save_directory, head_name)
def load_adapter_fusion(
self,
adapter_fusion_name_or_path: str,
load_as: str = None,
custom_weights_loaders: Optional[List[WeightsLoader]] = None,
set_active: bool = False,
with_head: bool = True,
**kwargs
) -> str:
if with_head:
if custom_weights_loaders is None:
custom_weights_loaders = []
custom_weights_loaders.append(PredictionHeadLoader(self, error_on_missing=False))
super().load_adapter_fusion(adapter_fusion_name_or_path, load_as, custom_weights_loaders, set_active)
def save_all_heads(self, save_directory):
for head_name in self.heads:
save_path = join(save_directory, head_name)
self.save_head(save_path, head_name)
def get_labels(self):
return list(self.config.id2label.values())
def get_labels_dict(self):
return self.config.id2label
def get_adapter(self, name):
"""
If self.base_model is self, must inherit from a class that implements this method, to preclude infinite
recursion
"""
if self.base_model is self:
return super().get_adapter(name)
else:
return self.base_model.get_adapter(name)
def load_embeddings(self, path: str, name: str):
if self.base_model is self:
return super().load_embeddings(path, name)
else:
return self.base_model.load_embeddings(path, name)
def save_embeddings(self, path, name, tokenizer=None):
if self.base_model is self:
return super().save_embeddings(path, name, tokenizer)
else:
return self.base_model.save_embeddings(path, name, tokenizer)
def add_embeddings(self, name, tokenizer, reference_embedding=None, reference_tokenizer=None, embedding_dim=None):
if self.base_model is None:
return super().add_embeddings(name, tokenizer, reference_embedding, reference_tokenizer, embedding_dim)
else:
return self.base_model.add_embeddings(
name, tokenizer, reference_embedding, reference_tokenizer, embedding_dim
)
def set_active_embeddings(self, name):
if self.base_model is None:
return super().set_active_embeddings(name)
else:
return self.base_model.set_active_embeddings(name)
def delete_embeddings(self, name):
if self.base_model is None:
return super().delete_embeddings(name)
else:
return self.base_model.delete_embeddings(name)
| [
"torch.zeros",
"torch.nn.Embedding.from_pretrained",
"torch.nn.ModuleDict",
"torch.save",
"torch.load",
"torch.nn.Embedding"
] | 1.0 | rahuln/adapter-transformers | ac3284547064686d31b95e5e1b078447a2199779 |
1.3 | import _init_paths
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from trainers.adnet_train_sl import adnet_train_sl
import argparse
from options.general2 import opts
from models.ADNet import adnet
from utils.get_train_videos import get_train_videos
from trainers.adnet_train_rl import adnet_train_rl
import torch
torch.multiprocessing.set_start_method('spawn', force=True)
import torch.backends.cudnn as cudnn
import torch.nn as nn
import os
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(
description='ADNet training')
# parser.add_argument('--resume', default='weights/ADNet_SL_backup.pth', type=str, help='Resume from checkpoint')
# parser.add_argument('--resume', default='weights/ADNet_RL_2epoch8_backup.pth', type=str, help='Resume from checkpoint')
# parser.add_argument('--resume', default='weights/ADNet_SL_epoch27_final.pth', type=str, help='Resume from checkpoint')
parser.add_argument('--resume', default='models/weights_mul_step3_new/ADNet_SL_.pth', type=str, help='Resume from checkpoint')
parser.add_argument('--num_workers', default=6, type=int, help='Number of workers used in dataloading')
parser.add_argument('--start_iter', default=2, type=int, help='Begin counting iterations starting from this value (should be used with resume)')
parser.add_argument('--cuda', default=True, type=str2bool, help='Use cuda to train model')
parser.add_argument('--gamma', default=0.1, type=float, help='Gamma update for SGD')
parser.add_argument('--visualize', default=True, type=str2bool, help='Use tensorboardx to for loss visualization')
parser.add_argument('--send_images_to_visualization', type=str2bool, default=False, help='Sample a random image from each 10th batch, send it to visdom after augmentations step')
parser.add_argument('--save_folder', default='models/weights_del', help='Location to save checkpoint models')
parser.add_argument('--tensorlogdir', default='logs/tensorboardx_log_del', help='Location to save tensorboardx_log')
parser.add_argument('--train_consecutive', default=False, type=str2bool, help='Whether to train consecutive frames')
parser.add_argument('--train_mul_step', default=False, type=str2bool, help='Whether to train multiple steps')
parser.add_argument('--save_file', default='ADNet_SL_', type=str, help='save file part of file name for SL')
parser.add_argument('--save_file_RL', default='ADNet_RL_', type=str, help='save file part of file name for RL')
parser.add_argument('--start_epoch', default=0, type=int, help='Begin counting epochs starting from this value')
parser.add_argument('--run_supervised', default=True, type=str2bool, help='Whether to run supervised learning or not')
parser.add_argument('--multidomain', default=False, type=str2bool, help='Separating weight for each videos (default) or not')
parser.add_argument('--save_result_images', default=False, type=str2bool, help='Whether to save the results or not. Save folder: images/')
parser.add_argument('--display_images', default=False, type=str2bool, help='Whether to display images or not')
if __name__ == "__main__":
args = parser.parse_args()
# Supervised Learning part
if args.run_supervised:
#opts['minibatch_size'] = 128
opts['minibatch_size'] = 256
# train with supervised learning
_, _, train_videos = adnet_train_sl(args, opts)
args.resume = os.path.join(args.save_folder, args.save_file) + '.pth'
# reinitialize the network with network from SL
net, domain_specific_nets = adnet(opts, trained_file=args.resume, random_initialize_domain_specific=True,
multidomain=args.multidomain)
args.start_epoch = 0
args.start_iter = 0
else:
assert args.resume is not None, \
"Please put result of supervised learning or reinforcement learning with --resume (filename)"
train_videos = get_train_videos(opts)
if train_videos == None:
opts['num_videos'] = 1
else:
opts['num_videos'] = len(train_videos['video_names'])
if args.start_iter == 0: # means the weight came from the SL
# net, domain_specific_nets = adnet(opts, trained_file=args.resume, random_initialize_domain_specific=True, multidomain=args.multidomain)
net, domain_specific_nets = adnet(opts, trained_file=args.resume, random_initialize_domain_specific=False,
multidomain=args.multidomain)
else: # resume the adnet
net, domain_specific_nets = adnet(opts, trained_file=args.resume, random_initialize_domain_specific=False, multidomain=args.multidomain)
if args.cuda:
net = nn.DataParallel(net)
cudnn.benchmark = True
net = net.cuda()
# Reinforcement Learning part
#opts['minibatch_size'] = 32
opts['minibatch_size'] = 128
net = adnet_train_rl(net, domain_specific_nets, train_videos, opts, args)
| [
"torch.multiprocessing.set_start_method",
"torch.nn.DataParallel"
] | 1.3.1 | hizb-resume/LTD-local-track-to-detect-for-VID | 7147ac7c6cd4b22a956aaaabaa151e5ed5410c68 |
1.6 | import argparse
from dl_training.training import BaseTrainer
from dl_training.testing import OpenBHBTester
import torch
import logging
if __name__=="__main__":
logger = logging.getLogger("SMLvsDL")
parser = argparse.ArgumentParser()
# Data location + saving paths
parser.add_argument("--root", type=str, required=True, help="Path to data root directory")
parser.add_argument("--preproc", type=str, default='vbm', choices=['vbm', 'quasi_raw'])
parser.add_argument("--checkpoint_dir", type=str)
parser.add_argument("--exp_name", type=str, required=True)
parser.add_argument("--outfile_name", type=str, help="The output file name used to save the results in testing mode.")
parser.add_argument("--N_train_max", type=int, default=None, help="Maximum number of training samples "
"to be used per fold")
parser.add_argument("--pb", type=str, choices=["age", "sex", "scz", "bipolar", "asd", "self_supervised"])
parser.add_argument("--folds", nargs='+', type=int, help="Fold indexes to run during the training")
parser.add_argument("--nb_folds", type=int, default=5)
# Important: what model do we use
parser.add_argument("--net", type=str, help="Network to use")
# Depends on available CPU/GPU memory
parser.add_argument("-b", "--batch_size", type=int, required=True)
parser.add_argument("--nb_epochs_per_saving", type=int, default=5)
parser.add_argument("--manual_seed", type=int, help="The manual seed to give to pytorch.")
# Optimizer hyper-parameters
parser.add_argument("--lr", type=float, required=True, help="Initial learning rate")
parser.add_argument("--gamma_scheduler", type=float, required=True)
parser.add_argument("--nb_epochs", type=int, default=300)
parser.add_argument("--step_size_scheduler", type=int, default=10)
# Dataloader: set them
parser.add_argument("--num_cpu_workers", type=int, default=3, help="Number of workers assigned to do the "
"preprocessing step (used by DataLoader of Pytorch)")
parser.add_argument("--sampler", choices=["random", "weighted_random", "sequential"], required=True)
parser.add_argument("--residualize", type=str, choices=["linear", "combat"])
# Self-sypervised learning
parser.add_argument("--sigma", type=float, help="Hyper-parameter for RBF kernel in self-supervised loss.", default=5)
# Transfer Learning
parser.add_argument("--pretrained_path", type=str)
parser.add_argument("--load_optimizer", action="store_true", help="If <pretrained_path> is set, loads also the "
"optimizer's weigth")
# This code can be executed on CPU or GPU
parser.add_argument("--cuda", type=bool, default=True, help="If True, executes the code on GPU")
# Kind of tests
parser.add_argument("--train", action="store_true")
parser.add_argument("--test", action="store_true")
args = parser.parse_args()
if not torch.cuda.is_available():
args.cuda = False
logger.warning("cuda is not available and has been disabled.")
if args.manual_seed:
torch.manual_seed(args.manual_seed)
if not args.train and not args.test:
args.train = True
logger.info("No mode specify: training mode is set automatically")
if args.train:
trainer = BaseTrainer(args)
trainer.run()
# do not consider the pretrained path anymore since it will be eventually computed automatically
args.pretrained_path = None
if args.test:
tester = OpenBHBTester(args)
tester.run()
| [
"torch.manual_seed",
"torch.cuda.is_available"
] | 1.6.0 | Duplums/SMLvsDL | b285717bd8d8e832b4bc9e2b42d18bd96b628def |
1.8 | import torch
from torch import nn
from torch.utils.data import DataLoader, Dataset
class DatasetSplit(Dataset):
def __init__(self, dataset, idxs):
self.dataset = dataset
self.idxs = list(idxs)
def __len__(self):
return len(self.idxs)
def __getitem__(self, item):
image, label = self.dataset[self.idxs[item]]
return image, label
class LocalUpdate(object):
def __init__(self, args, dataset=None, idxs=None):
self.args = args
self.loss_func = nn.CrossEntropyLoss()
self.selected_clients = []
self.ldr_train = DataLoader(DatasetSplit(dataset, idxs), batch_size=self.args.local_bs, shuffle=True, num_workers=4)
def train(self, net, history_dict, lrT):
net.train()
# train and update
optimizer = torch.optim.SGD(net.parameters(), lr=lrT, momentum=0.9, weight_decay=5e-4)
epoch_loss = []
for iter in range(self.args.local_ep):
batch_loss = []
for batch_idx, (images, labels) in enumerate(self.ldr_train):
images, labels = images.to(self.args.device), labels.to(self.args.device)
net.zero_grad()
log_probs = net(images)
loss = self.loss_func(log_probs, labels)
loss.backward()
optimizer.step()
batch_loss.append(loss.item())
epoch_loss.append(sum(batch_loss)/len(batch_loss))
current_dict = net.state_dict()
for k in current_dict.keys():
current_dict[k] -= history_dict[k]
return current_dict, sum(epoch_loss) / len(epoch_loss)
| [
"torch.nn.CrossEntropyLoss"
] | 1.8.1 | lynshao/NoisyNN | 2c827dbe697f4a8d8f9b2cb8abb2aa43a749fa16 |
1.5 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Modules to compute the matching cost and solve the corresponding LSAP.
"""
import torch
from scipy.optimize import linear_sum_assignment
from torch import nn
from util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou
class HungarianMatcher(nn.Module):
"""This class computes an assignment between the targets and the predictions of the network
For efficiency reasons, the targets don't include the no_object. Because of this, in general,
there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,
while the others are un-matched (and thus treated as non-objects).
"""
def __init__(self, cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1):
"""Creates the matcher
Params:
cost_class: This is the relative weight of the classification error in the matching cost
cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost
cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost
"""
super().__init__()
self.cost_class = cost_class
self.cost_bbox = cost_bbox
self.cost_giou = cost_giou
assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, "all costs cant be 0"
@torch.no_grad()
def forward(self, outputs, targets):
""" Performs the matching
Params:
outputs: This is a dict that contains at least these entries:
"pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
"pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates
targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
"labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth
objects in the target) containing the class labels
"boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates
Returns:
A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected targets (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
"""
bs, num_queries = outputs["pred_logits"].shape[:2]
# We flatten to compute the cost matrices in a batch
out_prob = outputs["pred_logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes]
out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4]
# Also concat the target labels and boxes
tgt_ids = torch.cat([v["labels"] for v in targets])
tgt_bbox = torch.cat([v["boxes"] for v in targets])
# Compute the classification cost. Contrary to the loss, we don't use the NLL,
# but approximate it in 1 - proba[target class].
# The 1 is a constant that doesn't change the matching, it can be ommitted.
# 在out_prob中,dim1中每一维都代表了对每个该bbox的所属类别的概率,由于有92个类,所以有92个数字
# 由于candidate box远比实际的box数量要多,因此并不知道到底哪个candidate能与gt box进行匹配
# 所以先获取所有tgt_id,并在out_ptob中取出对应的概率,因为知道在众多candidate中必有一个bbox与某个gt bbox最为匹配
# 之所以用减号就是想知道与理想概率1的差距,但这里加不加1其实无所谓
cost_class = -out_prob[:, tgt_ids]
# Compute the L1 cost between boxes
cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)
# Compute the giou cost betwen boxes
cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox))
# Final cost matrix
C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou
C = C.view(bs, num_queries, -1).cpu()
sizes = [len(v["boxes"]) for v in targets]
indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]
return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
def build_matcher(args):
return HungarianMatcher(cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou)
| [
"torch.cat",
"torch.no_grad",
"torch.as_tensor",
"torch.cdist"
] | 1.5.0 | whq-hqw/detr_change | 142f75cc5e0b59ca6e07928ddcbed3e461816611 |
1.1 | import os
import pytest
import torch
import tests.base.utils as tutils
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.core import memory
from pytorch_lightning.trainer.distrib_parts import parse_gpu_ids, determine_root_gpu_device
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.base import EvalModelTemplate
PRETEND_N_OF_GPUS = 16
@pytest.mark.spawn
@pytest.mark.parametrize("backend", ['dp', 'ddp', 'ddp2'])
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
def test_multi_gpu_model(tmpdir, backend):
"""Make sure DDP works."""
tutils.set_random_master_port()
trainer_options = dict(
default_root_dir=tmpdir,
max_epochs=1,
train_percent_check=0.4,
val_percent_check=0.2,
gpus=[0, 1],
distributed_backend=backend,
)
model = EvalModelTemplate()
# tutils.run_model_test(trainer_options, model)
trainer = Trainer(**trainer_options)
result = trainer.fit(model)
assert result
# test memory helper functions
memory.get_memory_profile('min_max')
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
def test_ddp_all_dataloaders_passed_to_fit(tmpdir):
"""Make sure DDP works with dataloaders passed to fit()"""
tutils.set_random_master_port()
trainer_options = dict(default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=1,
train_percent_check=0.4,
val_percent_check=0.2,
gpus=[0, 1],
distributed_backend='ddp')
model = EvalModelTemplate()
fit_options = dict(train_dataloader=model.train_dataloader(),
val_dataloaders=model.val_dataloader())
trainer = Trainer(**trainer_options)
result = trainer.fit(model, **fit_options)
assert result == 1, "DDP doesn't work with dataloaders passed to fit()."
def test_cpu_slurm_save_load(tmpdir):
"""Verify model save/load/checkpoint on CPU."""
hparams = EvalModelTemplate.get_default_hparams()
model = EvalModelTemplate(hparams)
# logger file to get meta
logger = tutils.get_default_logger(tmpdir)
version = logger.version
# fit model
trainer = Trainer(
max_epochs=1,
logger=logger,
checkpoint_callback=ModelCheckpoint(tmpdir)
)
result = trainer.fit(model)
real_global_step = trainer.global_step
# traning complete
assert result == 1, 'cpu model failed to complete'
# predict with trained model before saving
# make a prediction
dataloaders = model.test_dataloader()
if not isinstance(dataloaders, list):
dataloaders = [dataloaders]
for dataloader in dataloaders:
for batch in dataloader:
break
x, y = batch
x = x.view(x.size(0), -1)
model.eval()
pred_before_saving = model(x)
# test HPC saving
# simulate snapshot on slurm
saved_filepath = trainer.hpc_save(tmpdir, logger)
assert os.path.exists(saved_filepath)
# new logger file to get meta
logger = tutils.get_default_logger(tmpdir, version=version)
trainer = Trainer(
max_epochs=1,
logger=logger,
checkpoint_callback=ModelCheckpoint(tmpdir),
)
model = EvalModelTemplate(hparams)
# set the epoch start hook so we can predict before the model does the full training
def assert_pred_same():
assert trainer.global_step == real_global_step and trainer.global_step > 0
# predict with loaded model to make sure answers are the same
trainer.model.eval()
new_pred = trainer.model(x)
assert torch.all(torch.eq(pred_before_saving, new_pred)).item() == 1
model.on_epoch_start = assert_pred_same
# by calling fit again, we trigger training, loading weights from the cluster
# and our hook to predict using current model before any more weight updates
trainer.fit(model)
@pytest.mark.spawn
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
def test_multi_gpu_none_backend(tmpdir):
"""Make sure when using multiple GPUs the user can't use `distributed_backend = None`."""
trainer_options = dict(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=1,
train_percent_check=0.1,
val_percent_check=0.1,
gpus='-1'
)
model = EvalModelTemplate()
with pytest.warns(UserWarning):
tutils.run_model_test(trainer_options, model)
@pytest.fixture
def mocked_device_count(monkeypatch):
def device_count():
return PRETEND_N_OF_GPUS
monkeypatch.setattr(torch.cuda, 'device_count', device_count)
@pytest.fixture
def mocked_device_count_0(monkeypatch):
def device_count():
return 0
monkeypatch.setattr(torch.cuda, 'device_count', device_count)
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(["gpus", "expected_num_gpus", "distributed_backend"], [
pytest.param(None, 0, None, id="None - expect 0 gpu to use."),
pytest.param(0, 0, None, id="Oth gpu, expect 1 gpu to use."),
pytest.param(1, 1, None, id="1st gpu, expect 1 gpu to use."),
pytest.param(-1, PRETEND_N_OF_GPUS, "ddp", id="-1 - use all gpus"),
pytest.param('-1', PRETEND_N_OF_GPUS, "ddp", id="'-1' - use all gpus"),
pytest.param(3, 3, "ddp", id="3rd gpu - 1 gpu to use (backend:ddp)")
])
def test_trainer_gpu_parse(mocked_device_count, gpus, expected_num_gpus, distributed_backend):
assert Trainer(gpus=gpus, distributed_backend=distributed_backend).num_gpus == expected_num_gpus
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(["gpus", "expected_num_gpus", "distributed_backend"], [
pytest.param(None, 0, None, id="None - expect 0 gpu to use."),
pytest.param(None, 0, "ddp", id="None - expect 0 gpu to use."),
])
def test_trainer_num_gpu_0(mocked_device_count_0, gpus, expected_num_gpus, distributed_backend):
assert Trainer(gpus=gpus, distributed_backend=distributed_backend).num_gpus == expected_num_gpus
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(['gpus', 'expected_root_gpu', "distributed_backend"], [
pytest.param(None, None, "ddp", id="None is None"),
pytest.param(0, None, "ddp", id="O gpus, expect gpu root device to be None."),
pytest.param(1, 0, "ddp", id="1 gpu, expect gpu root device to be 0."),
pytest.param(-1, 0, "ddp", id="-1 - use all gpus, expect gpu root device to be 0."),
pytest.param('-1', 0, "ddp", id="'-1' - use all gpus, expect gpu root device to be 0."),
pytest.param(3, 0, "ddp", id="3 gpus, expect gpu root device to be 0.(backend:ddp)")
])
def test_root_gpu_property(mocked_device_count, gpus, expected_root_gpu, distributed_backend):
assert Trainer(gpus=gpus, distributed_backend=distributed_backend).root_gpu == expected_root_gpu
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(['gpus', 'expected_root_gpu', "distributed_backend"], [
pytest.param(None, None, None, id="None is None"),
pytest.param(None, None, "ddp", id="None is None"),
pytest.param(0, None, "ddp", id="None is None"),
])
def test_root_gpu_property_0_passing(mocked_device_count_0, gpus, expected_root_gpu, distributed_backend):
assert Trainer(gpus=gpus, distributed_backend=distributed_backend).root_gpu == expected_root_gpu
# Asking for a gpu when non are available will result in a MisconfigurationException
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(['gpus', 'expected_root_gpu', "distributed_backend"], [
pytest.param(1, None, "ddp"),
pytest.param(3, None, "ddp"),
pytest.param(3, None, "ddp"),
pytest.param([1, 2], None, "ddp"),
pytest.param([0, 1], None, "ddp"),
pytest.param(-1, None, "ddp"),
pytest.param('-1', None, "ddp")
])
def test_root_gpu_property_0_raising(mocked_device_count_0, gpus, expected_root_gpu, distributed_backend):
with pytest.raises(MisconfigurationException):
Trainer(gpus=gpus, distributed_backend=distributed_backend).root_gpu
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(['gpus', 'expected_root_gpu'], [
pytest.param(None, None, id="No gpus, expect gpu root device to be None"),
pytest.param([0], 0, id="Oth gpu, expect gpu root device to be 0."),
pytest.param([1], 1, id="1st gpu, expect gpu root device to be 1."),
pytest.param([3], 3, id="3rd gpu, expect gpu root device to be 3."),
pytest.param([1, 2], 1, id="[1, 2] gpus, expect gpu root device to be 1."),
])
def test_determine_root_gpu_device(gpus, expected_root_gpu):
assert determine_root_gpu_device(gpus) == expected_root_gpu
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(['gpus', 'expected_gpu_ids'], [
pytest.param(None, None),
pytest.param(0, None),
pytest.param(1, [0]),
pytest.param(3, [0, 1, 2]),
pytest.param(-1, list(range(PRETEND_N_OF_GPUS)), id="-1 - use all gpus"),
pytest.param([0], [0]),
pytest.param([1, 3], [1, 3]),
pytest.param('0', [0]),
pytest.param('3', [3]),
pytest.param('1, 3', [1, 3]),
pytest.param('2,', [2]),
pytest.param('-1', list(range(PRETEND_N_OF_GPUS)), id="'-1' - use all gpus"),
])
def test_parse_gpu_ids(mocked_device_count, gpus, expected_gpu_ids):
assert parse_gpu_ids(gpus) == expected_gpu_ids
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(['gpus'], [
pytest.param(0.1),
pytest.param(-2),
pytest.param(False),
pytest.param([]),
pytest.param([-1]),
pytest.param([None]),
pytest.param(['0']),
pytest.param((0, 1)),
])
def test_parse_gpu_fail_on_unsupported_inputs(mocked_device_count, gpus):
with pytest.raises(MisconfigurationException):
parse_gpu_ids(gpus)
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize("gpus", [[1, 2, 19], -1, '-1'])
def test_parse_gpu_fail_on_non_existent_id(mocked_device_count_0, gpus):
with pytest.raises(MisconfigurationException):
parse_gpu_ids(gpus)
@pytest.mark.gpus_param_tests
def test_parse_gpu_fail_on_non_existent_id_2(mocked_device_count):
with pytest.raises(MisconfigurationException):
parse_gpu_ids([1, 2, 19])
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize("gpus", [-1, '-1'])
def test_parse_gpu_returns_None_when_no_devices_are_available(mocked_device_count_0, gpus):
with pytest.raises(MisconfigurationException):
parse_gpu_ids(gpus)
| [
"torch.eq",
"torch.cuda.device_count"
] | 1.1 | ashwinb/pytorch-lightning | 89787947304a0db3a98a1ddd0e818a91a924e43f |
1.6 | import torch
from torch import Tensor
from torch import nn
from typing import Union, Tuple, List, Iterable, Dict
import os
import json
class Pooling(nn.Module):
"""Performs pooling (max or mean) on the token embeddings.
Using pooling, it generates from a variable sized sentence a fixed sized sentence embedding. This layer also allows to use the CLS token if it is returned by the underlying word embedding model.
You can concatenate multiple poolings together.
:param word_embedding_dimension: Dimensions for the word embeddings
:param pooling_mode: Can be a string: mean/max/cls. If set, overwrites the other pooling_mode_* settings
:param pooling_mode_cls_token: Use the first token (CLS token) as text representations
:param pooling_mode_max_tokens: Use max in each dimension over all tokens.
:param pooling_mode_mean_tokens: Perform mean-pooling
:param pooling_mode_mean_sqrt_len_tokens: Perform mean-pooling, but devide by sqrt(input_length).
"""
def __init__(self,
word_embedding_dimension: int,
pooling_mode: str = None,
pooling_mode_cls_token: bool = False,
pooling_mode_max_tokens: bool = False,
pooling_mode_mean_tokens: bool = True,
pooling_mode_mean_sqrt_len_tokens: bool = False,
):
super(Pooling, self).__init__()
self.config_keys = ['word_embedding_dimension', 'pooling_mode_cls_token', 'pooling_mode_mean_tokens', 'pooling_mode_max_tokens', 'pooling_mode_mean_sqrt_len_tokens']
if pooling_mode is not None: #Set pooling mode by string
pooling_mode = pooling_mode.lower()
assert pooling_mode in ['mean', 'max', 'cls']
pooling_mode_cls_token = (pooling_mode == 'cls')
pooling_mode_max_tokens = (pooling_mode == 'max')
pooling_mode_mean_tokens = (pooling_mode == 'mean')
self.word_embedding_dimension = word_embedding_dimension
self.pooling_mode_cls_token = pooling_mode_cls_token
self.pooling_mode_mean_tokens = pooling_mode_mean_tokens
self.pooling_mode_max_tokens = pooling_mode_max_tokens
self.pooling_mode_mean_sqrt_len_tokens = pooling_mode_mean_sqrt_len_tokens
pooling_mode_multiplier = sum([pooling_mode_cls_token, pooling_mode_max_tokens, pooling_mode_mean_tokens, pooling_mode_mean_sqrt_len_tokens])
self.pooling_output_dimension = (pooling_mode_multiplier * word_embedding_dimension)
def __repr__(self):
return "Pooling({})".format(self.get_config_dict())
def get_pooling_mode_str(self) -> str:
"""
Returns the pooling mode as string
"""
modes = []
if self.pooling_mode_cls_token:
modes.append('cls')
if self.pooling_mode_mean_tokens:
modes.append('mean')
if self.pooling_mode_max_tokens:
modes.append('max')
if self.pooling_mode_mean_sqrt_len_tokens:
modes.append('mean_sqrt_len_tokens')
return "+".join(modes)
def forward(self, features: Dict[str, Tensor]):
token_embeddings = features['token_embeddings']
attention_mask = features['attention_mask']
## Pooling strategy
output_vectors = []
if self.pooling_mode_cls_token:
cls_token = features.get('cls_token_embeddings', token_embeddings[:, 0]) # Take first token by default
output_vectors.append(cls_token)
if self.pooling_mode_max_tokens:
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
token_embeddings[input_mask_expanded == 0] = -1e9 # Set padding tokens to large negative value
max_over_time = torch.max(token_embeddings, 1)[0]
output_vectors.append(max_over_time)
if self.pooling_mode_mean_tokens or self.pooling_mode_mean_sqrt_len_tokens:
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
sum_embeddings = torch.sum(token_embeddings * input_mask_expanded, 1)
#If tokens are weighted (by WordWeights layer), feature 'token_weights_sum' will be present
if 'token_weights_sum' in features:
sum_mask = features['token_weights_sum'].unsqueeze(-1).expand(sum_embeddings.size())
else:
sum_mask = input_mask_expanded.sum(1)
sum_mask = torch.clamp(sum_mask, min=1e-9)
if self.pooling_mode_mean_tokens:
output_vectors.append(sum_embeddings / sum_mask)
if self.pooling_mode_mean_sqrt_len_tokens:
output_vectors.append(sum_embeddings / torch.sqrt(sum_mask))
output_vector = torch.cat(output_vectors, 1)
features.update({'sentence_embedding': output_vector})
return features
def get_sentence_embedding_dimension(self):
return self.pooling_output_dimension
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, 'config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, 'config.json')) as fIn:
config = json.load(fIn)
return Pooling(**config)
| [
"torch.cat",
"torch.sqrt",
"torch.max",
"torch.clamp",
"torch.sum"
] | 1.6.0 | searchsolved/sentence-transformers-master | 50f345322d602ebab9e6d2b5e2a98e7e9d0cf9a3 |
0.4 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import torch.nn as nn
from collections import OrderedDict
from functools import partial
from lib.models.tools.module_helper import ModuleHelper
class GlobalAvgPool2d(nn.Module):
def __init__(self):
"""Global average pooling over the input's spatial dimensions"""
super(GlobalAvgPool2d, self).__init__()
def forward(self, inputs):
in_size = inputs.size()
return inputs.view((in_size[0], in_size[1], -1)).mean(dim=2)
class IdentityResidualBlock(nn.Module):
def __init__(self,
in_channels,
channels,
stride=1,
dilation=1,
groups=1,
bn_type=None,
dropout=None):
"""Configurable identity-mapping residual block
Parameters
----------
in_channels : int
Number of input channels.
channels : list of int
Number of channels in the internal feature maps. Can either have two or three elements: if three construct
a residual block with two `3 x 3` convolutions, otherwise construct a bottleneck block with `1 x 1`, then
`3 x 3` then `1 x 1` convolutions.
stride : int
Stride of the first `3 x 3` convolution
dilation : int
Dilation to apply to the `3 x 3` convolutions.
groups : int
Number of convolution groups. This is used to create ResNeXt-style blocks and is only compatible with
bottleneck blocks.
bn_type : callable
Function to create normalization / activation Module.
dropout: callable
Function to create Dropout Module.
"""
super(IdentityResidualBlock, self).__init__()
# Check parameters for inconsistencies
if len(channels) != 2 and len(channels) != 3:
raise ValueError("channels must contain either two or three values")
if len(channels) == 2 and groups != 1:
raise ValueError("groups > 1 are only valid if len(channels) == 3")
is_bottleneck = len(channels) == 3
need_proj_conv = stride != 1 or in_channels != channels[-1]
self.bn1 = ModuleHelper.BNReLU(in_channels, bn_type=bn_type)
if not is_bottleneck:
layers = [
("conv1", nn.Conv2d(in_channels, channels[0], 3, stride=stride, padding=dilation, bias=False,
dilation=dilation)),
("bn2", ModuleHelper.BNReLU(channels[0], bn_type=bn_type)),
("conv2", nn.Conv2d(channels[0], channels[1], 3, stride=1, padding=dilation, bias=False,
dilation=dilation))
]
if dropout is not None:
layers = layers[0:2] + [("dropout", dropout())] + layers[2:]
else:
layers = [
("conv1", nn.Conv2d(in_channels, channels[0], 1, stride=stride, padding=0, bias=False)),
("bn2", ModuleHelper.BNReLU(channels[0], bn_type=bn_type)),
("conv2", nn.Conv2d(channels[0], channels[1], 3, stride=1, padding=dilation, bias=False,
groups=groups, dilation=dilation)),
("bn3", ModuleHelper.BNReLU(channels[1], bn_type=bn_type)),
("conv3", nn.Conv2d(channels[1], channels[2], 1, stride=1, padding=0, bias=False))
]
if dropout is not None:
layers = layers[0:4] + [("dropout", dropout())] + layers[4:]
self.convs = nn.Sequential(OrderedDict(layers))
if need_proj_conv:
self.proj_conv = nn.Conv2d(in_channels, channels[-1], 1, stride=stride, padding=0, bias=False)
def forward(self, x):
if hasattr(self, "proj_conv"):
bn1 = self.bn1(x)
shortcut = self.proj_conv(bn1)
else:
shortcut = x.clone()
bn1 = self.bn1(x)
out = self.convs(bn1)
out.add_(shortcut)
return out
class WiderResNetA2(nn.Module):
def __init__(self,
structure=[3, 3, 6, 3, 1, 1],
bn_type=None,
classes=0,
dilation=True):
"""Wider ResNet with pre-activation (identity mapping) blocks
This variant uses down-sampling by max-pooling in the first two blocks and by strided convolution in the others.
Parameters
----------
structure : list of int
Number of residual blocks in each of the six modules of the network.
bn_type : callable
Function to create normalization / activation Module.
classes : int
If not `0` also include global average pooling and a fully-connected layer with `classes` outputs at the end
of the network.
dilation : bool
If `True` apply dilation to the last three modules and change the down-sampling factor from 32 to 8.
"""
super(WiderResNetA2, self).__init__()
self.structure = structure
self.dilation = dilation
if len(structure) != 6:
raise ValueError("Expected a structure with six values")
# Initial layers
self.mod1 = nn.Sequential(OrderedDict([
("conv1", nn.Conv2d(3, 64, 3, stride=1, padding=1, bias=False))
]))
# Groups of residual blocks
in_channels = 64
channels = [(128, 128), (256, 256), (512, 512), (512, 1024), (512, 1024, 2048), (1024, 2048, 4096)]
for mod_id, num in enumerate(structure):
# Create blocks for module
blocks = []
for block_id in range(num):
if not dilation:
dil = 1
stride = 2 if block_id == 0 and 2 <= mod_id <= 4 else 1
else:
if mod_id == 3:
dil = 2
elif mod_id > 3:
dil = 4
else:
dil = 1
stride = 2 if block_id == 0 and mod_id == 2 else 1
if mod_id == 4:
drop = None
elif mod_id == 5:
drop = None
else:
drop = None
blocks.append((
"block%d" % (block_id + 1),
IdentityResidualBlock(in_channels, channels[mod_id], bn_type=bn_type, stride=stride, dilation=dil,
dropout=drop)
))
# Update channels and p_keep
in_channels = channels[mod_id][-1]
# Create module
if mod_id < 2:
self.add_module("pool%d" % (mod_id + 2), nn.MaxPool2d(3, stride=2, padding=1, ceil_mode=True))
self.add_module("mod%d" % (mod_id + 2), nn.Sequential(OrderedDict(blocks)))
self.bn_out = ModuleHelper.BNReLU(in_channels, bn_type=bn_type)
def forward(self, img):
tuple_features = list()
out = self.mod1(img)
out = self.mod2(self.pool2(out))
out = self.mod3(self.pool3(out))
out = self.mod4(out)
tuple_features.append(out)
out = self.mod5(out)
tuple_features.append(out)
out = self.mod6(out)
tuple_features.append(out)
out = self.mod7(out)
out = self.bn_out(out)
tuple_features.append(out)
return tuple_features
| [
"torch.nn.Conv2d",
"torch.nn.MaxPool2d"
] | 0.4.1 | Xlinford/ContrastiveSeg | 79eec700d2efdaad4da8cf0c07674107e72078da |
1.7 | import json
import torch
from tqdm import tqdm
from .consts import ARGS, DEVICE, TOKENIZER
def read_data(path):
data = []
with open(path, encoding='utf8') as f:
for line in f:
line = json.loads(line)
data.append(line)
return data
def batchify(sentence_dict, phrase_list_sampled, batch_size=32):
batches = []
pointer = 0
total_num = len(phrase_list_sampled)
while pointer < total_num:
text_batch = []
span_batch = []
for data_line in phrase_list_sampled[pointer:pointer+batch_size]:
sent_id, start, end, phrase_lemma = data_line
text = sentence_dict[sent_id]
text_batch.append(text)
span_batch.append([(start, end)])
batches.append((text_batch, span_batch))
pointer += batch_size
return batches
def get_features(sentence_dict, phrase_list, model, return_prob=False):
all_features = []
if return_prob:
all_probs = []
for batch in tqdm(batchify(sentence_dict, phrase_list, ARGS.batch_size), ncols=100, desc='Generate all features...'):
text_batch, span_batch = batch
inputs = TOKENIZER(text_batch, entity_spans=span_batch, padding=True, add_prefix_space=True, return_tensors="pt")
for k,v in inputs.items():
inputs[k] = v.to(DEVICE)
with torch.no_grad():
luke_outputs, entity_pooling = model(**inputs)
if return_prob:
model_prob = model.get_cluster_prob(entity_pooling)
all_probs.append(model_prob.detach().cpu())
all_features.append(entity_pooling.detach().cpu())
all_features = torch.cat(all_features, dim=0)
if return_prob:
all_probs = torch.cat(all_probs, dim=0)
return all_features, all_probs
return all_features
def get_probs(sentence_dict, phrase_list, model):
all_probs = []
for batch in tqdm(batchify(sentence_dict, phrase_list, ARGS.batch_size), ncols=100, desc='Generate all features...'):
text_batch, span_batch = batch
inputs = TOKENIZER(text_batch, entity_spans=span_batch, padding=True, add_prefix_space=True, return_tensors="pt")
for k,v in inputs.items():
inputs[k] = v.to(DEVICE)
with torch.no_grad():
luke_outputs, entity_pooling = model(**inputs)
model_prob = model.get_cluster_prob(entity_pooling)
all_probs.append(model_prob.detach().cpu())
all_probs = torch.cat(all_probs, dim=0)
return all_probs
def get_all_phrase_bert_features(sentence_dict, phrase_list, model):
all_features = []
with torch.no_grad():
for batch in tqdm(batchify(sentence_dict, phrase_list, ARGS.batch_size), ncols=100, desc='Generate all features...'):
text_batch, span_batch = batch
phrase_list = []
for text, span in zip(text_batch, span_batch):
span = span[0]
start, end = span
phrase_list.append(text[start:end])
repr_list = model.encode(phrase_list)
all_features+=list(repr_list)
all_features = torch.FloatTensor(all_features)
return all_features | [
"torch.cat",
"torch.FloatTensor",
"torch.no_grad"
] | 1.7.0 | JiachengLi1995/UCTopic | 3875f2afbf6b99dfce2d5b5cd930976049746d41 |
1.3 | """
Entry point for training and evaluating a lemmatizer.
This lemmatizer combines a neural sequence-to-sequence architecture with an `edit` classifier
and two dictionaries to produce robust lemmas from word forms.
For details please refer to paper: https://nlp.stanford.edu/pubs/qi2018universal.pdf.
"""
import logging
import sys
import os
import shutil
import time
from datetime import datetime
import argparse
import numpy as np
import random
import torch
from torch import nn, optim
from stanza.models.lemma.data import DataLoader
from stanza.models.lemma.vocab import Vocab
from stanza.models.lemma.trainer import Trainer
from stanza.models.lemma import scorer, edit
from stanza.models.common import utils
import stanza.models.common.seq2seq_constant as constant
from stanza.models.common.doc import *
from stanza.utils.conll import CoNLL
from stanza.models import _training_logging
logger = logging.getLogger('stanza')
def parse_args(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='data/lemma', help='Directory for all lemma data.')
parser.add_argument('--train_file', type=str, default=None, help='Input file for data loader.')
parser.add_argument('--eval_file', type=str, default=None, help='Input file for data loader.')
parser.add_argument('--output_file', type=str, default=None, help='Output CoNLL-U file.')
parser.add_argument('--gold_file', type=str, default=None, help='Output CoNLL-U file.')
parser.add_argument('--mode', default='train', choices=['train', 'predict'])
parser.add_argument('--lang', type=str, help='Language')
parser.add_argument('--no_dict', dest='ensemble_dict', action='store_false', help='Do not ensemble dictionary with seq2seq. By default use ensemble.')
parser.add_argument('--dict_only', action='store_true', help='Only train a dictionary-based lemmatizer.')
parser.add_argument('--hidden_dim', type=int, default=200)
parser.add_argument('--emb_dim', type=int, default=50)
parser.add_argument('--num_layers', type=int, default=1)
parser.add_argument('--emb_dropout', type=float, default=0.5)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--max_dec_len', type=int, default=50)
parser.add_argument('--beam_size', type=int, default=1)
parser.add_argument('--attn_type', default='soft', choices=['soft', 'mlp', 'linear', 'deep'], help='Attention type')
parser.add_argument('--pos_dim', type=int, default=50)
parser.add_argument('--pos_dropout', type=float, default=0.5)
parser.add_argument('--no_edit', dest='edit', action='store_false', help='Do not use edit classifier in lemmatization. By default use an edit classifier.')
parser.add_argument('--num_edit', type=int, default=len(edit.EDIT_TO_ID))
parser.add_argument('--alpha', type=float, default=1.0)
parser.add_argument('--no_pos', dest='pos', action='store_false', help='Do not use UPOS in lemmatization. By default UPOS is used.')
parser.add_argument('--sample_train', type=float, default=1.0, help='Subsample training data.')
parser.add_argument('--optim', type=str, default='adam', help='sgd, adagrad, adam or adamax.')
parser.add_argument('--lr', type=float, default=1e-3, help='Learning rate')
parser.add_argument('--lr_decay', type=float, default=0.9)
parser.add_argument('--decay_epoch', type=int, default=30, help="Decay the lr starting from this epoch.")
parser.add_argument('--num_epoch', type=int, default=60)
parser.add_argument('--batch_size', type=int, default=50)
parser.add_argument('--max_grad_norm', type=float, default=5.0, help='Gradient clipping.')
parser.add_argument('--log_step', type=int, default=20, help='Print log every k steps.')
parser.add_argument('--model_dir', type=str, default='saved_models/lemma', help='Root dir for saving models.')
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
parser.add_argument('--cpu', action='store_true', help='Ignore CUDA.')
args = parser.parse_args(args=args)
return args
def main(args=None):
args = parse_args(args=args)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
if args.cpu:
args.cuda = False
elif args.cuda:
torch.cuda.manual_seed(args.seed)
args = vars(args)
logger.info("Running lemmatizer in {} mode".format(args['mode']))
if args['mode'] == 'train':
train(args)
else:
evaluate(args)
def train(args):
# load data
logger.info("[Loading data with batch size {}...]".format(args['batch_size']))
train_doc = Document(CoNLL.conll2dict(input_file=args['train_file']))
train_batch = DataLoader(train_doc, args['batch_size'], args, evaluation=False)
vocab = train_batch.vocab
args['vocab_size'] = vocab['char'].size
args['pos_vocab_size'] = vocab['pos'].size
dev_doc = Document(CoNLL.conll2dict(input_file=args['eval_file']))
dev_batch = DataLoader(dev_doc, args['batch_size'], args, vocab=vocab, evaluation=True)
utils.ensure_dir(args['model_dir'])
model_file = '{}/{}_lemmatizer.pt'.format(args['model_dir'], args['lang'])
# pred and gold path
system_pred_file = args['output_file']
gold_file = args['gold_file']
utils.print_config(args)
# skip training if the language does not have training or dev data
if len(train_batch) == 0 or len(dev_batch) == 0:
logger.warning("[Skip training because no training data available...]")
return
# start training
# train a dictionary-based lemmatizer
trainer = Trainer(args=args, vocab=vocab, use_cuda=args['cuda'])
logger.info("[Training dictionary-based lemmatizer...]")
trainer.train_dict(train_batch.doc.get([TEXT, UPOS, LEMMA]))
logger.info("Evaluating on dev set...")
dev_preds = trainer.predict_dict(dev_batch.doc.get([TEXT, UPOS]))
dev_batch.doc.set([LEMMA], dev_preds)
CoNLL.dict2conll(dev_batch.doc.to_dict(), system_pred_file)
_, _, dev_f = scorer.score(system_pred_file, gold_file)
logger.info("Dev F1 = {:.2f}".format(dev_f * 100))
if args.get('dict_only', False):
# save dictionaries
trainer.save(model_file)
else:
# train a seq2seq model
logger.info("[Training seq2seq-based lemmatizer...]")
global_step = 0
max_steps = len(train_batch) * args['num_epoch']
dev_score_history = []
best_dev_preds = []
current_lr = args['lr']
global_start_time = time.time()
format_str = '{}: step {}/{} (epoch {}/{}), loss = {:.6f} ({:.3f} sec/batch), lr: {:.6f}'
# start training
for epoch in range(1, args['num_epoch']+1):
train_loss = 0
for i, batch in enumerate(train_batch):
start_time = time.time()
global_step += 1
loss = trainer.update(batch, eval=False) # update step
train_loss += loss
if global_step % args['log_step'] == 0:
duration = time.time() - start_time
logger.info(format_str.format(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), global_step,
max_steps, epoch, args['num_epoch'], loss, duration, current_lr))
# eval on dev
logger.info("Evaluating on dev set...")
dev_preds = []
dev_edits = []
for i, batch in enumerate(dev_batch):
preds, edits = trainer.predict(batch, args['beam_size'])
dev_preds += preds
if edits is not None:
dev_edits += edits
dev_preds = trainer.postprocess(dev_batch.doc.get([TEXT]), dev_preds, edits=dev_edits)
# try ensembling with dict if necessary
if args.get('ensemble_dict', False):
logger.info("[Ensembling dict with seq2seq model...]")
dev_preds = trainer.ensemble(dev_batch.doc.get([TEXT, UPOS]), dev_preds)
dev_batch.doc.set([LEMMA], dev_preds)
CoNLL.dict2conll(dev_batch.doc.to_dict(), system_pred_file)
_, _, dev_score = scorer.score(system_pred_file, gold_file)
train_loss = train_loss / train_batch.num_examples * args['batch_size'] # avg loss per batch
logger.info("epoch {}: train_loss = {:.6f}, dev_score = {:.4f}".format(epoch, train_loss, dev_score))
# save best model
if epoch == 1 or dev_score > max(dev_score_history):
trainer.save(model_file)
logger.info("new best model saved.")
best_dev_preds = dev_preds
# lr schedule
if epoch > args['decay_epoch'] and dev_score <= dev_score_history[-1] and \
args['optim'] in ['sgd', 'adagrad']:
current_lr *= args['lr_decay']
trainer.update_lr(current_lr)
dev_score_history += [dev_score]
logger.info("")
logger.info("Training ended with {} epochs.".format(epoch))
best_f, best_epoch = max(dev_score_history)*100, np.argmax(dev_score_history)+1
logger.info("Best dev F1 = {:.2f}, at epoch = {}".format(best_f, best_epoch))
def evaluate(args):
# file paths
system_pred_file = args['output_file']
gold_file = args['gold_file']
model_file = '{}/{}_lemmatizer.pt'.format(args['model_dir'], args['lang'])
# load model
use_cuda = args['cuda'] and not args['cpu']
trainer = Trainer(model_file=model_file, use_cuda=use_cuda)
loaded_args, vocab = trainer.args, trainer.vocab
for k in args:
if k.endswith('_dir') or k.endswith('_file') or k in ['shorthand']:
loaded_args[k] = args[k]
# load data
logger.info("Loading data with batch size {}...".format(args['batch_size']))
doc = Document(CoNLL.conll2dict(input_file=args['eval_file']))
batch = DataLoader(doc, args['batch_size'], loaded_args, vocab=vocab, evaluation=True)
# skip eval if dev data does not exist
if len(batch) == 0:
logger.warning("Skip evaluation because no dev data is available...\nLemma score:\n{} ".format(args['lang']))
return
dict_preds = trainer.predict_dict(batch.doc.get([TEXT, UPOS]))
if loaded_args.get('dict_only', False):
preds = dict_preds
else:
logger.info("Running the seq2seq model...")
preds = []
edits = []
for i, b in enumerate(batch):
ps, es = trainer.predict(b, args['beam_size'])
preds += ps
if es is not None:
edits += es
preds = trainer.postprocess(batch.doc.get([TEXT]), preds, edits=edits)
if loaded_args.get('ensemble_dict', False):
logger.info("[Ensembling dict with seq2seq lemmatizer...]")
preds = trainer.ensemble(batch.doc.get([TEXT, UPOS]), preds)
# write to file and score
batch.doc.set([LEMMA], preds)
CoNLL.dict2conll(batch.doc.to_dict(), system_pred_file)
if gold_file is not None:
_, _, score = scorer.score(system_pred_file, gold_file)
logger.info("Finished evaluation\nLemma score:\n{} {:.2f}".format(args['lang'], score*100))
if __name__ == '__main__':
main()
| [
"torch.cuda.manual_seed",
"torch.manual_seed",
"torch.cuda.is_available"
] | 1.3.0 | rasimuvaikas/stanza | 21793519a531b0e9d7151e42d180d97785c9a5b8 |
1.2 | import torch
import numpy as np
import unittest
from his_evaluators.metrics import register_metrics
DEVICE = torch.device("cuda:0")
class MetricTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.paired_metric_dict = register_metrics(types=("ssim", "psnr", "lps"), device=DEVICE)
cls.unpaired_metric_dict = register_metrics(
types=("is", "fid", "PCB-CS-reid", "PCB-freid", "OS-CS-reid", "OS-freid"),
device=DEVICE
)
def test_01_paired_metrics(self):
bs = 5
image_size = 512
preds_imgs = np.random.rand(bs, 3, image_size, image_size)
preds_imgs *= 255
preds_imgs = preds_imgs.astype(np.uint8)
ref_imgs = np.copy(preds_imgs)
ssim_score = self.paired_metric_dict["ssim"].calculate_score(preds_imgs, ref_imgs)
psnr_score = self.paired_metric_dict["psnr"].calculate_score(preds_imgs, ref_imgs)
lps_score = self.paired_metric_dict["lps"].calculate_score(preds_imgs, ref_imgs)
print("ssim score = {}".format(ssim_score))
print("psnr score = {}".format(psnr_score))
print("lps score = {}".format(lps_score))
self.assertEqual(ssim_score, 1.0)
self.assertEqual(psnr_score, np.inf)
self.assertEqual(lps_score, 0.0)
def test_02_unpaired_metrics(self):
bs = 5
image_size = 512
preds_imgs = np.random.rand(bs, 3, image_size, image_size)
preds_imgs *= 255
preds_imgs = preds_imgs.astype(np.uint8)
ref_imgs = np.random.rand(bs, 3, image_size, image_size)
ref_imgs *= 255
ref_imgs = ref_imgs.astype(np.uint8)
inception_score = self.unpaired_metric_dict["is"].calculate_score(preds_imgs)
fid_score = self.unpaired_metric_dict["fid"].calculate_score(preds_imgs, ref_imgs)
os_cs_reid = self.unpaired_metric_dict["OS-CS-reid"].calculate_score(preds_imgs, ref_imgs)
pcb_cs_reid = self.unpaired_metric_dict["PCB-CS-reid"].calculate_score(preds_imgs, ref_imgs)
os_freid = self.unpaired_metric_dict["OS-freid"].calculate_score(preds_imgs, ref_imgs)
pcb_freid = self.unpaired_metric_dict["PCB-freid"].calculate_score(preds_imgs, ref_imgs)
print("inception score = {}".format(inception_score))
print("fid score = {}".format(fid_score))
print("OS-Cosine Similarity = {}".format(os_cs_reid))
print("PCB-Cosine Similarity = {}".format(pcb_cs_reid))
print("OS-freid = {}".format(os_freid))
print("PCB-freid = {}".format(pcb_freid))
if __name__ == '__main__':
unittest.main()
| [
"torch.device"
] | 1.2.0 | Puneet-G/Impersonator-NNProject | 980cfc260feebbc873b4150326791340f6526c42 |
1.8 | import re
import pickle
import numpy as np
import pandas as pd
import torch
from string import punctuation
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import sent_tokenize, word_tokenize
from sklearn.feature_extraction.text import CountVectorizer
from flask import Flask, render_template, request, jsonify
# import MLP module definition
from models import MLP
class CustomUnpickler(pickle.Unpickler):
def find_class(self, module, name):
try:
return super().find_class(__name__, name)
except AttributeError:
return super().find_class(module, name)
# load saved model parameters and vectorizers
model = CustomUnpickler(open('data/multi-layer-perceptron-parameters.pkl', 'rb')).load()
text_vectorizer = CustomUnpickler(open('data/text_vectorizer.pkl','rb')).load()
def preprocess(df):
"""
Preprocess user input in the same way we preprocessed the training data.
1. Remove non-alphabetic characters, convert to lowercase
2. Tokenize (word_tokenizer from nltk)
3. Lemmatize (WordNetLemmatizer)
4. Vectorize (CountVectorizer)
Use the same CountVectorizers from training in order to extract
the same features and have the same output dimensions.
"""
lemmatizer = WordNetLemmatizer()
text_processed = []
for text in df.text:
# remove punctuation and lowercase
text = re.sub(r'[^a-zA-Z]', ' ', text)
text = text.lower()
# tokenize and lemmatize tokens
tokens = word_tokenize(text)
tokens = [lemmatizer.lemmatize(x) for x in tokens]
text_processed.append(' '.join(tokens))
# vectorize
text_matrix = text_vectorizer.transform(text_processed).toarray()
# return np matrix
return text_matrix
app = Flask(__name__)
@app.route('/')
def home():
return render_template('home.html')
@app.route('/predict', methods=['POST'])
def predict():
text = request.json['text']
d = {'text': [text]}
# create dataframe from user input
X_df = pd.DataFrame(data=d)
# preprocess df and return np array
X_np = preprocess(X_df)
# convert to tensor
X_tensor = torch.Tensor(X_np)
# predict
y_pred = model(X_tensor)
y_pred_max = torch.max(y_pred,1)[1]
if y_pred_max == 1:
result = "real"
else:
result = "fake"
return jsonify({"result": result})
if __name__ == '__main__':
app.run() | [
"torch.Tensor",
"torch.max"
] | 1.8.0 | Aditibansal2603/fake-news-predictor | 9d4ba2ed95799ca63d0fa7f3f5ad0e6f09b9b215 |
1.6 | '''
This code is used for testing MoDL on JPEG-compressed data, for the results shown in figures 6, 7 and 8c in the paper.
Before running this script you should update the following:
basic_data_folder - it should be the same as the output folder defined in the script /crime_2_jpeg/data_prep/jpeg_data_prep.py
(c) Efrat Shimron, UC Berkeley, 2021
'''
import logging
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
from MoDL_single import UnrolledModel
from subtle_data_crimes.functions.error_funcs import error_metrics
from utils import complex_utils as cplx
from utils.datasets import create_data_loaders
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# create a folder for the test figures
if not os.path.exists('test_figs'):
os.makedirs('test_figs')
##################### create test loader ###########################
class Namespace:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
# Hyper parameters
params = Namespace()
params.batch_size = 1
# image dimensions
params.NX = 640
params.NY = 372
# calib is assumed to be 12 for NX=640
calib_x = int(12)
calib_y = int(12 * params.NY / params.NX)
params.calib = np.array([calib_x, calib_y])
params.shuffle_flag = False # should be True for training, False for testing. Notice that this is not a string, semicolons aren't necessary.
params.sampling_flag = 'var_dens_2D'
params.var_dens_flag = 'strong' # 'weak' / 'strong'
checkpoint_num = int(69) # load saved model (trained network)
q_vec = np.array([20, 50, 75, 999])
R_vec = np.array([4])
N_examples_4display = 15 # number of examples to display
N_examples_stats = 15 # number of examples over which the mean and STD will be computed
NRMSE_av_vs_q_and_R = np.zeros((R_vec.shape[0], q_vec.shape[0]))
NRMSE_std_vs_q_and_R = np.zeros((R_vec.shape[0], q_vec.shape[0]))
SSIM_av_vs_q_and_R = np.zeros((R_vec.shape[0], q_vec.shape[0]))
SSIM_std_vs_q_and_R = np.zeros((R_vec.shape[0], q_vec.shape[0]))
N_calc_err = 200
NRMSE_examples_4display = np.zeros((R_vec.shape[0], q_vec.shape[0], N_calc_err))
SSIM_examples_4display = np.zeros((R_vec.shape[0], q_vec.shape[0], N_calc_err))
small_dataset_flag = 0
for r in range(R_vec.shape[0]):
R = R_vec[r]
print('================================================== ')
print(' R={} '.format(R))
print('================================================== ')
# Important - here we update R in the params in order to create masks with appropriate sampling
# The mask is created in the DataTransform (utils/datasets
params.R = R
for qi in range(q_vec.shape[0]):
q = q_vec[qi]
params.q = q
# update the next path to YOUR path
basic_data_folder = "/mikQNAP/NYU_knee_data/multicoil_efrat/5_JPEG_compressed_data/"
data_type = 'test'
im_type_str = 'full_im' # training & validation is done on blocks (to accelerate training). Test is done on full-size images.
params.data_path = basic_data_folder + data_type + "/q" + str(params.q) + "/" + im_type_str + "/"
test_loader = create_data_loaders(params)
N_test_batches = len(test_loader.dataset)
print('N_test_batches =', N_test_batches)
checkpoint_file = 'R{}_q{}/checkpoints/model_{}.pt'.format(R, q, checkpoint_num)
checkpoint = torch.load(checkpoint_file, map_location=device)
# load the parameters of the trained network
params_loaded = checkpoint["params"]
single_MoDL = UnrolledModel(params_loaded).to(device)
single_MoDL.load_state_dict(checkpoint['model'])
single_MoDL.eval()
NRMSE_test_list = []
SSIM_test_list = []
cnt = 0
with torch.no_grad():
for iter, data in enumerate(test_loader):
if iter % 10 == 0:
print('loading test batch ', iter)
# input_batch, target_batch, mask_batch, target_no_JPEG_batch = data
input_batch, target_batch, mask_batch = data
# display the mask (before converting it to torch tensor)
if (iter == 0):
# print('mask_batch shape:',mask_batch.shape)
mask_squeezed = mask_batch[0, :, :, 0].squeeze()
# fig = plt.figure()
# plt.imshow(mask_squeezed, cmap="gray")
# plt.title(params.sampling_flag + ' epoch 0, iter {}'.format(iter))
# plt.show()
# fig.savefig('mask_iter{}.png'.format(iter))
# move data to GPU
input_batch = input_batch.to(device)
target_batch = target_batch.to(device)
mask_batch = mask_batch.to(device)
# forward pass - for the full batch
out_batch = single_MoDL(input_batch.float(), mask=mask_batch)
for i in range(params.batch_size):
cnt += 1 # counts the number of test images
print('cnt={}'.format(cnt))
im_input = cplx.to_numpy(input_batch.cpu())[i, :, :]
im_target = cplx.to_numpy(target_batch.cpu())[i, :, :]
im_out = cplx.to_numpy(out_batch.cpu())[i, :, :]
MoDL_err = error_metrics(np.abs(im_target), np.abs(im_out))
MoDL_err.calc_NRMSE()
MoDL_err.calc_SSIM()
NRMSE_test_list.append(MoDL_err.NRMSE)
SSIM_test_list.append(MoDL_err.SSIM)
if cnt < N_calc_err:
NRMSE_examples_4display[r, qi, cnt - 1] = MoDL_err.NRMSE
SSIM_examples_4display[r, qi, cnt - 1] = MoDL_err.SSIM
if cnt <= N_examples_4display:
target_im_rotated = np.rot90(np.abs(im_target), 2)
im_out_rotated = np.rot90(np.abs(im_out), 2)
NX = im_out_rotated.shape[0]
NY = im_out_rotated.shape[1]
if (r == 0) & (qi == 0) & (iter == 0):
TARGETS = np.zeros((NX, NY, q_vec.shape[0], N_examples_4display))
RECS = np.zeros((NX, NY, R_vec.shape[0], q_vec.shape[0], N_examples_4display))
TARGETS[:, :, qi, iter] = target_im_rotated
RECS[:, :, r, qi, iter] = im_out_rotated
# if iter==0:
fig = plt.figure()
plt.imshow(target_im_rotated, cmap="gray")
plt.colorbar(shrink=0.5)
plt.axis('off')
plt.title('target - iter={} - R{} q{}'.format(iter, R, q))
plt.show()
figname = 'check3_target_R{}_q{}_iter{}'.format(R, q, iter)
fig.savefig(figname)
if iter >= N_examples_stats:
break
# NRMSE - calc av & std
NRMSE_test_array = np.asarray(NRMSE_test_list)
NRMSE_av = np.mean(NRMSE_test_array[0:N_examples_stats].squeeze())
NRMSE_std = np.std(NRMSE_test_array[0:N_examples_stats].squeeze())
NRMSE_av_vs_q_and_R[r, qi] = NRMSE_av
NRMSE_std_vs_q_and_R[r, qi] = NRMSE_std
# SSIM - calc av & std
SSIM_test_array = np.asarray(SSIM_test_list)
SSIM_av = np.mean(SSIM_test_array[0:N_examples_stats].squeeze())
SSIM_std = np.std(SSIM_test_array[0:N_examples_stats].squeeze())
SSIM_av_vs_q_and_R[r, qi] = SSIM_av
SSIM_std_vs_q_and_R[r, qi] = SSIM_std
print('q={} NRMSE_av = {}, SSIM_av = {}'.format(q, NRMSE_av, SSIM_av))
# save NRMSE_av & SSIM
print('saving results')
results_filename = 'Res_for_Fig6.npz'
np.savez(results_filename, R_vec=R_vec, q_vec=q_vec, params=params, checkpoint_num=checkpoint_num,
NRMSE_av_vs_q_and_R=NRMSE_av_vs_q_and_R,
NRMSE_std_vs_q_and_R=NRMSE_std_vs_q_and_R,
SSIM_av_vs_q_and_R=SSIM_av_vs_q_and_R,
SSIM_std_vs_q_and_R=SSIM_std_vs_q_and_R,
NRMSE_examples_4display=NRMSE_examples_4display,
SSIM_examples_4display=SSIM_examples_4display,
N_examples_stats=N_examples_stats,
N_examples_4display=N_examples_4display,
TARGETS=TARGETS,
RECS=RECS,
)
| [
"torch.no_grad",
"torch.cuda.is_available",
"torch.load"
] | 1.6.0 | mikgroup/subtle_data_crimes | 210025d9cb8f92583f5f983be15af06b57cfea36 |
1.0 | import os
import random
import time
import numpy as np
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from crossView import PVA_model, Argoverse
from opt import get_args
import tqdm
from datetime import datetime
from utils import mean_IU, mean_precision
import wandb
def readlines(filename):
"""Read all the lines in a text file and return as a list
"""
with open(filename, 'r') as f:
lines = f.read().splitlines()
return lines
class Trainer_argo:
def __init__(self):
self.opt = get_args()
self.models = {}
self.weight = {"static": self.opt.static_weight, "dynamic": self.opt.dynamic_weight}
self.seed = self.opt.global_seed
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.create_time = time.strftime("%Y-%m-%d-%H-%M", time.localtime())
self.epoch = 0
self.start_epoch = 0
if self.seed != 0:
self.set_seed() # set seed
# Initializing models
self.model = PVA_model(self.opt, self.device)
#self.model.to(self.device)
# Optimization
self.optimizer = optim.Adam(self.model.parameters_to_train)
# Data Loaders
fpath = os.path.join(
os.path.dirname(__file__),
"splits",
"argo",
"{}_files.txt")
train_filenames = readlines(fpath.format("train"))
val_filenames = readlines(fpath.format("val"))
self.val_filenames = val_filenames
self.train_filenames = train_filenames
train_dataset = Argoverse(self.opt, train_filenames)
val_dataset = Argoverse(self.opt, val_filenames, is_train=False)
self.train_loader = DataLoader(
dataset = train_dataset,
batch_size = self.opt.batch_size,
shuffle = True,
num_workers=self.opt.num_workers,
pin_memory=True,
drop_last=True)
self.val_loader = DataLoader(
dataset = val_dataset,
batch_size = 1,
shuffle = True,
num_workers=self.opt.num_workers,
pin_memory=True,
drop_last=True)
if self.opt.load_weights_folder != "":
self.load_model()
# Save log and models path
now = datetime.now()
self.opt.save_path = os.path.join(self.opt.save_path, now.strftime("%Y%m%d-%H%M%S"))
wandb.init(project="cross-view", entity="zzx9636", config={"epochs": self.opt.num_epochs,
"batch_size": self.opt.batch_size})
wandb.define_metric("eval/*", step_metric="eval/step")
print(
"There are {:d} training items and {:d} validation items\n".format(
len(train_dataset),
len(val_dataset)))
def train(self):
#self.validation()
for self.epoch in range(self.start_epoch, self.opt.num_epochs + 1):
self.adjust_learning_rate(self.optimizer, self.epoch, self.opt.lr_steps)
self.run_epoch()
self.validation()
if (self.epoch%5)==0:
self.save_model()
def run_epoch(self):
for inputs in self.train_loader:
self.model.train()
self.optimizer.zero_grad()
for key, input in inputs.items():
if key != "filename":
inputs[key] = input.to(self.device)
_, losses = self.model(inputs)
losses["loss"].backward()
self.optimizer.step()
wandb.log({"loss": losses["loss"], "topview_loss": losses["topview_loss"],
"transform_loss": losses["transform_loss"]})
#"transform_topview_loss": losses["transform_topview_loss"]})
def validation(self):
iou, mAP = np.array([0., 0., 0.]), np.array([0., 0., 0.])
#trans_iou, trans_mAP = np.array([0., 0.]), np.array([0., 0.])
with torch.no_grad():
for inputs in self.val_loader:
self.model.eval()
for key, input in inputs.items():
if key != "filename":
inputs[key] = input.to(self.device)
outputs, _ = self.model(inputs)
pred = np.squeeze(
torch.argmax(
outputs["topview"].detach(),
1).cpu().numpy())
true = np.squeeze(
inputs["combine"].detach().cpu().numpy())
#print(mean_IU(pred, true), mean_precision(pred, true))
iou += mean_IU(pred, true)
mAP += mean_precision(pred, true)
iou /= len(self.val_loader)
mAP /= len(self.val_loader)
print("Epoch: %d | Validation: mIOU: %.4f, %.4f mAP: %.4f, %.4f" % (self.epoch, iou[1], iou[2], mAP[1], mAP[2]))
log_dict = {"eval/step": self.epoch, "eval/map/mIOU": iou[1], "eval/map/mAP": mAP[1],
"eval/vehicle/mIOU": iou[2], "eval/vehicle/mAP": mAP[2]}
wandb.log(log_dict)
def save_model(self):
save_path = os.path.join(
self.opt.save_path,
"weights_{}".format(
self.epoch)
)
if not os.path.exists(save_path):
os.makedirs(save_path)
for model_name, model in self.model.models.items():
model_path = os.path.join(save_path, "{}.pth".format(model_name))
state_dict = model.state_dict()
state_dict['epoch'] = self.epoch
if model_name == "encoder":
state_dict["height"] = self.opt.height
state_dict["width"] = self.opt.width
torch.save(state_dict, model_path)
optim_path = os.path.join(save_path, "{}.pth".format("adam"))
torch.save(self.optimizer.state_dict(), optim_path)
print("Save models to ", save_path)
def load_model(self):
"""Load model(s) from disk
"""
self.opt.load_weights_folder = os.path.expanduser(
self.opt.load_weights_folder)
assert os.path.isdir(self.opt.load_weights_folder), \
"Cannot find folder {}".format(self.opt.load_weights_folder)
print(
"loading model from folder {}".format(
self.opt.load_weights_folder))
for key in self.model.models.keys():
if "discriminator" not in key:
print("Loading {} weights...".format(key))
path = os.path.join(
self.opt.load_weights_folder,
"{}.pth".format(key))
model_dict = self.model.models[key].state_dict()
pretrained_dict = torch.load(path)
if 'epoch' in pretrained_dict:
self.start_epoch = pretrained_dict['epoch']
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
self.model.models[key].load_state_dict(model_dict)
# loading adam state
if self.opt.load_weights_folder == "":
optimizer_load_path = os.path.join(
self.opt.load_weights_folder, "adam.pth")
if os.path.isfile(optimizer_load_path):
print("Loading Adam weights")
optimizer_dict = torch.load(optimizer_load_path)
self.optimizer.load_state_dict(optimizer_dict)
else:
print("Cannot find Adam weights so Adam is randomly initialized")
def adjust_learning_rate(self, optimizer, epoch, lr_steps):
"""Sets the learning rate to the initial LR decayed by 10 every 25 epochs"""
decay = 0.1 ** (sum(epoch >= np.array(lr_steps)))
decay = round(decay, 2)
lr = self.opt.lr * decay
lr_transform = self.opt.lr_transform * decay
decay = self.opt.weight_decay
optimizer.param_groups[0]['lr'] = lr_transform
optimizer.param_groups[1]['lr'] = lr
optimizer.param_groups[0]['weight_decay'] = decay
optimizer.param_groups[1]['weight_decay'] = decay
wandb.log({"lr": lr, "lr_transform":lr_transform, "decay": decay})
def set_seed(self):
seed = self.seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if __name__ == "__main__":
start_time = time.ctime()
print(start_time)
trainer = Trainer_argo()
trainer.train()
end_time = time.ctime()
print(end_time)
| [
"torch.cuda.manual_seed",
"torch.cuda.manual_seed_all",
"torch.no_grad",
"torch.optim.Adam",
"torch.save",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.load"
] | 1.0.0 | zzx9636/cross-view | 9a7e874be607eefa7bd34934e274cc376e99f65f |
1.2 | import datetime
import logging
import math
import os
import time
import traceback
from typing import Dict, Optional, Tuple, Union, Iterable, Any
import torch
import torch.distributed as dist
import torch.optim.lr_scheduler
from torch.nn.parallel import DistributedDataParallel
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError, parse_cuda_device, check_for_gpu
from allennlp.common.tqdm import Tqdm
from allennlp.common.util import dump_metrics, gpu_memory_mb, peak_memory_mb, lazy_groups_of
from allennlp.data.instance import Instance
from allennlp.data.iterators.data_iterator import DataIterator, TensorDict
from allennlp.models.model import Model
from allennlp.nn import util as nn_util
from allennlp.training import util as training_util
from allennlp.training.checkpointer import Checkpointer
from allennlp.training.learning_rate_schedulers import LearningRateScheduler
from allennlp.training.metric_tracker import MetricTracker
from allennlp.training.momentum_schedulers import MomentumScheduler
from allennlp.training.moving_average import MovingAverage
from allennlp.training.optimizers import Optimizer
from allennlp.training.tensorboard_writer import TensorboardWriter
from allennlp.training.trainer_base import TrainerBase
logger = logging.getLogger(__name__)
@TrainerBase.register("default")
class Trainer(TrainerBase):
def __init__(
self,
model: Model,
optimizer: torch.optim.Optimizer,
iterator: DataIterator,
train_dataset: Iterable[Instance],
validation_dataset: Optional[Iterable[Instance]] = None,
patience: Optional[int] = None,
validation_metric: str = "-loss",
validation_iterator: DataIterator = None,
shuffle: bool = True,
num_epochs: int = 20,
serialization_dir: Optional[str] = None,
num_serialized_models_to_keep: int = 20,
keep_serialized_model_every_num_seconds: int = None,
checkpointer: Checkpointer = None,
model_save_interval: float = None,
cuda_device: int = -1,
grad_norm: Optional[float] = None,
grad_clipping: Optional[float] = None,
learning_rate_scheduler: Optional[LearningRateScheduler] = None,
momentum_scheduler: Optional[MomentumScheduler] = None,
summary_interval: int = 100,
histogram_interval: int = None,
should_log_parameter_statistics: bool = True,
should_log_learning_rate: bool = False,
log_batch_size_period: Optional[int] = None,
moving_average: Optional[MovingAverage] = None,
distributed: bool = False,
rank: int = 0,
world_size: int = 1,
num_gradient_accumulation_steps: int = 1,
) -> None:
"""
A trainer for doing supervised learning. It just takes a labeled dataset
and a ``DataIterator``, and uses the supplied ``Optimizer`` to learn the weights
for your model over some fixed number of epochs. You can also pass in a validation
dataset and enable early stopping. There are many other bells and whistles as well.
# Parameters
model : ``Model``, required.
An AllenNLP model to be optimized. Pytorch Modules can also be optimized if
their ``forward`` method returns a dictionary with a "loss" key, containing a
scalar tensor representing the loss function to be optimized.
If you are training your model using GPUs, your model should already be
on the correct device. (If you use `Trainer.from_params` this will be
handled for you.)
optimizer : ``torch.nn.Optimizer``, required.
An instance of a Pytorch Optimizer, instantiated with the parameters of the
model to be optimized.
iterator : ``DataIterator``, required.
A method for iterating over a ``Dataset``, yielding padded indexed batches.
train_dataset : ``Dataset``, required.
A ``Dataset`` to train on. The dataset should have already been indexed.
validation_dataset : ``Dataset``, optional, (default = None).
A ``Dataset`` to evaluate on. The dataset should have already been indexed.
patience : Optional[int] > 0, optional (default=None)
Number of epochs to be patient before early stopping: the training is stopped
after ``patience`` epochs with no improvement. If given, it must be ``> 0``.
If None, early stopping is disabled.
validation_metric : str, optional (default="loss")
Validation metric to measure for whether to stop training using patience
and whether to serialize an ``is_best`` model each epoch. The metric name
must be prepended with either "+" or "-", which specifies whether the metric
is an increasing or decreasing function.
validation_iterator : ``DataIterator``, optional (default=None)
An iterator to use for the validation set. If ``None``, then
use the training `iterator`.
shuffle : ``bool``, optional (default=True)
Whether to shuffle the instances in the iterator or not.
num_epochs : int, optional (default = 20)
Number of training epochs.
serialization_dir : str, optional (default=None)
Path to directory for saving and loading model files. Models will not be saved if
this parameter is not passed.
num_serialized_models_to_keep : ``int``, optional (default=20)
Number of previous model checkpoints to retain. Default is to keep 20 checkpoints.
A value of None or -1 means all checkpoints will be kept.
keep_serialized_model_every_num_seconds : ``int``, optional (default=None)
If num_serialized_models_to_keep is not None, then occasionally it's useful to
save models at a given interval in addition to the last num_serialized_models_to_keep.
To do so, specify keep_serialized_model_every_num_seconds as the number of seconds
between permanently saved checkpoints. Note that this option is only used if
num_serialized_models_to_keep is not None, otherwise all checkpoints are kept.
checkpointer : ``Checkpointer``, optional (default=None)
An instance of class Checkpointer to use instead of the default. If a checkpointer is specified,
the arguments num_serialized_models_to_keep and keep_serialized_model_every_num_seconds should
not be specified. The caller is responsible for initializing the checkpointer so that it is
consistent with serialization_dir.
model_save_interval : ``float``, optional (default=None)
If provided, then serialize models every ``model_save_interval``
seconds within single epochs. In all cases, models are also saved
at the end of every epoch if ``serialization_dir`` is provided.
cuda_device : ``int``, optional (default = -1)
An integer specifying the CUDA device(s) to use for this process. If -1, the CPU is used.
Data parallelism is controlled at the allennlp train level, so each trainer will have a single
GPU.
grad_norm : ``float``, optional, (default = None).
If provided, gradient norms will be rescaled to have a maximum of this value.
grad_clipping : ``float``, optional (default = ``None``).
If provided, gradients will be clipped `during the backward pass` to have an (absolute)
maximum of this value. If you are getting ``NaNs`` in your gradients during training
that are not solved by using ``grad_norm``, you may need this.
learning_rate_scheduler : ``LearningRateScheduler``, optional (default = None)
If specified, the learning rate will be decayed with respect to
this schedule at the end of each epoch (or batch, if the scheduler implements
the ``step_batch`` method). If you use :class:`torch.optim.lr_scheduler.ReduceLROnPlateau`,
this will use the ``validation_metric`` provided to determine if learning has plateaued.
To support updating the learning rate on every batch, this can optionally implement
``step_batch(batch_num_total)`` which updates the learning rate given the batch number.
momentum_scheduler : ``MomentumScheduler``, optional (default = None)
If specified, the momentum will be updated at the end of each batch or epoch
according to the schedule.
summary_interval : ``int``, optional, (default = 100)
Number of batches between logging scalars to tensorboard
histogram_interval : ``int``, optional, (default = ``None``)
If not None, then log histograms to tensorboard every ``histogram_interval`` batches.
When this parameter is specified, the following additional logging is enabled:
* Histograms of model parameters
* The ratio of parameter update norm to parameter norm
* Histogram of layer activations
We log histograms of the parameters returned by
``model.get_parameters_for_histogram_tensorboard_logging``.
The layer activations are logged for any modules in the ``Model`` that have
the attribute ``should_log_activations`` set to ``True``. Logging
histograms requires a number of GPU-CPU copies during training and is typically
slow, so we recommend logging histograms relatively infrequently.
Note: only Modules that return tensors, tuples of tensors or dicts
with tensors as values currently support activation logging.
should_log_parameter_statistics : ``bool``, optional, (default = True)
Whether to send parameter statistics (mean and standard deviation
of parameters and gradients) to tensorboard.
should_log_learning_rate : ``bool``, optional, (default = False)
Whether to send parameter specific learning rate to tensorboard.
log_batch_size_period : ``int``, optional, (default = ``None``)
If defined, how often to log the average batch size.
moving_average : ``MovingAverage``, optional, (default = None)
If provided, we will maintain moving averages for all parameters. During training, we
employ a shadow variable for each parameter, which maintains the moving average. During
evaluation, we backup the original parameters and assign the moving averages to corresponding
parameters. Be careful that when saving the checkpoint, we will save the moving averages of
parameters. This is necessary because we want the saved model to perform as well as the validated
model if we load it later. But this may cause problems if you restart the training from checkpoint.
distributed : ``bool``, optional, (default = False)
If set, PyTorch's `DistributedDataParallel` is used to train the model in multiple GPUs. This also
requires `world_size` to be greater than 1.
rank : ``int``, optional, (default = 0)
This is the unique identifier of the `Trainer` in a distributed process group. The GPU device id is
used as the rank.
world_size : ``int``, (default = 1)
The number of `Trainer` workers participating in the distributed training.
num_gradient_accumulation_steps : ``int``, optional, (default = 1)
Gradients are accumulated for the given number of steps before doing an optimizer step. This can
be useful to accommodate batches that are larger than the RAM size. Refer Thomas Wolf's
[post](https://tinyurl.com/y5mv44fw) for details on Gradient Accumulation.
"""
super().__init__(serialization_dir, cuda_device, distributed, rank, world_size)
# I am not calling move_to_gpu here, because if the model is
# not already on the GPU then the optimizer is going to be wrong.
self.model = model
self.iterator = iterator
self._validation_iterator = validation_iterator
self.shuffle = shuffle
self.optimizer = optimizer
self.train_data = train_dataset
self._validation_data = validation_dataset
if patience is None: # no early stopping
if validation_dataset:
logger.warning(
"You provided a validation dataset but patience was set to None, "
"meaning that early stopping is disabled"
)
elif (not isinstance(patience, int)) or patience <= 0:
raise ConfigurationError(
'{} is an invalid value for "patience": it must be a positive integer '
"or None (if you want to disable early stopping)".format(patience)
)
# For tracking is_best_so_far and should_stop_early
self._metric_tracker = MetricTracker(patience, validation_metric)
# Get rid of + or -
self._validation_metric = validation_metric[1:]
self._num_epochs = num_epochs
if checkpointer is not None:
# We can't easily check if these parameters were passed in, so check against their default values.
# We don't check against serialization_dir since it is also used by the parent class.
if (
num_serialized_models_to_keep != 20
or keep_serialized_model_every_num_seconds is not None
):
raise ConfigurationError(
"When passing a custom Checkpointer, you may not also pass in separate checkpointer "
"args 'num_serialized_models_to_keep' or 'keep_serialized_model_every_num_seconds'."
)
self._checkpointer = checkpointer
else:
self._checkpointer = Checkpointer(
serialization_dir,
keep_serialized_model_every_num_seconds,
num_serialized_models_to_keep,
)
self._model_save_interval = model_save_interval
self._grad_norm = grad_norm
self._grad_clipping = grad_clipping
self._learning_rate_scheduler = learning_rate_scheduler
self._momentum_scheduler = momentum_scheduler
self._moving_average = moving_average
# We keep the total batch number as an instance variable because it
# is used inside a closure for the hook which logs activations in
# ``_enable_activation_logging``.
self._batch_num_total = 0
self._tensorboard = TensorboardWriter(
get_batch_num_total=lambda: self._batch_num_total,
serialization_dir=serialization_dir,
summary_interval=summary_interval,
histogram_interval=histogram_interval,
should_log_parameter_statistics=should_log_parameter_statistics,
should_log_learning_rate=should_log_learning_rate,
)
self._log_batch_size_period = log_batch_size_period
self._last_log = 0.0 # time of last logging
self._num_gradient_accumulation_steps = num_gradient_accumulation_steps
# Enable activation logging.
if histogram_interval is not None:
self._tensorboard.enable_activation_logging(self.model)
# Using `DistributedDataParallel`(ddp) brings in a quirk wrt AllenNLP's `Model` interface and its
# usage. A `Model` object is wrapped by `ddp`, but assigning the wrapped model to `self.model`
# will break the usages such as `Model.get_regularization_penalty`, `Model.get_metrics`, etc.
#
# Hence a reference to Pytorch's object is maintained in the case of distributed training and in the
# normal case, reference to `Model` is retained. This reference is only used in
# these places: `model.__call__`, `model.train` and `model.eval`.
if self._distributed:
self._pytorch_model = DistributedDataParallel(
self.model, device_ids=[self.cuda_device], find_unused_parameters=True
)
else:
self._pytorch_model = self.model
def rescale_gradients(self) -> Optional[float]:
return training_util.rescale_gradients(self.model, self._grad_norm)
def batch_loss(self, batch: TensorDict, for_training: bool) -> torch.Tensor:
"""
Does a forward pass on the given batches and returns the ``loss`` value in the result.
If ``for_training`` is `True` also applies regularization penalty.
"""
batch = nn_util.move_to_device(batch, self.cuda_device)
output_dict = self._pytorch_model(**batch)
try:
loss = output_dict["loss"]
if for_training:
loss += self.model.get_regularization_penalty()
except KeyError:
if for_training:
raise RuntimeError(
"The model you are trying to optimize does not contain a"
" 'loss' key in the output of model.forward(inputs)."
)
loss = None
return loss
def _train_epoch(self, epoch: int) -> Dict[str, float]:
"""
Trains one epoch and returns metrics.
"""
logger.info("Epoch %d/%d", epoch, self._num_epochs - 1)
peak_cpu_usage = peak_memory_mb()
logger.info(f"Peak CPU memory usage MB: {peak_cpu_usage}")
gpu_usage = []
for gpu, memory in gpu_memory_mb().items():
gpu_usage.append((gpu, memory))
logger.info(f"GPU {gpu} memory usage MB: {memory}")
train_loss = 0.0
# Set the model to "train" mode.
self._pytorch_model.train()
# Get tqdm for the training batches
batch_generator = self.iterator(self.train_data, num_epochs=1, shuffle=self.shuffle)
batch_group_generator = lazy_groups_of(
batch_generator, self._num_gradient_accumulation_steps
)
num_training_batches = math.ceil(
self.iterator.get_num_batches(self.train_data) / self._num_gradient_accumulation_steps
)
# Having multiple tqdm bars in case of distributed training will be a mess. Hence only the master's
# progress is shown
if self._master:
batch_group_generator_tqdm = Tqdm.tqdm(
batch_group_generator, total=num_training_batches
)
else:
batch_group_generator_tqdm = batch_group_generator
self._last_log = time.time()
last_save_time = time.time()
batches_this_epoch = 0
if self._batch_num_total is None:
self._batch_num_total = 0
histogram_parameters = set(self.model.get_parameters_for_histogram_tensorboard_logging())
logger.info("Training")
cumulative_batch_group_size = 0
for batch_group in batch_group_generator_tqdm:
batches_this_epoch += 1
self._batch_num_total += 1
batch_num_total = self._batch_num_total
self.optimizer.zero_grad()
for batch in batch_group:
loss = self.batch_loss(batch, for_training=True)
if torch.isnan(loss):
raise ValueError("nan loss encountered")
loss = loss / len(batch_group)
loss.backward()
train_loss += loss.item()
batch_grad_norm = self.rescale_gradients()
# This does nothing if batch_num_total is None or you are using a
# scheduler which doesn't update per batch.
if self._learning_rate_scheduler:
self._learning_rate_scheduler.step_batch(batch_num_total)
if self._momentum_scheduler:
self._momentum_scheduler.step_batch(batch_num_total)
if self._tensorboard.should_log_histograms_this_batch() and self._master:
# get the magnitude of parameter updates for logging
# We need a copy of current parameters to compute magnitude of updates,
# and copy them to CPU so large models won't go OOM on the GPU.
param_updates = {
name: param.detach().cpu().clone()
for name, param in self.model.named_parameters()
}
self.optimizer.step()
for name, param in self.model.named_parameters():
param_updates[name].sub_(param.detach().cpu())
update_norm = torch.norm(param_updates[name].view(-1))
param_norm = torch.norm(param.view(-1)).cpu()
self._tensorboard.add_train_scalar(
"gradient_update/" + name, update_norm / (param_norm + 1e-7)
)
else:
self.optimizer.step()
# Update moving averages
if self._moving_average is not None:
self._moving_average.apply(batch_num_total)
# Update the description with the latest metrics
metrics = training_util.get_metrics(
self.model,
train_loss,
batches_this_epoch,
world_size=self._world_size,
cuda_device=[self.cuda_device],
)
# Updating tqdm only for the master as the trainers wouldn't have one
if self._master:
description = training_util.description_from_metrics(metrics)
batch_group_generator_tqdm.set_description(description, refresh=False)
# Log parameter values to Tensorboard (only from the master)
if self._tensorboard.should_log_this_batch() and self._master:
self._tensorboard.log_parameter_and_gradient_statistics(self.model, batch_grad_norm)
self._tensorboard.log_learning_rates(self.model, self.optimizer)
self._tensorboard.add_train_scalar("loss/loss_train", metrics["loss"])
self._tensorboard.log_metrics({"epoch_metrics/" + k: v for k, v in metrics.items()})
if self._tensorboard.should_log_histograms_this_batch() and self._master:
self._tensorboard.log_histograms(self.model, histogram_parameters)
if self._log_batch_size_period:
batch_group_size = sum(training_util.get_batch_size(batch) for batch in batch_group)
cumulative_batch_group_size += batch_group_size
if (batches_this_epoch - 1) % self._log_batch_size_period == 0:
average = cumulative_batch_group_size / batches_this_epoch
logger.info(
f"current batch size: {batch_group_size} mean batch size: {average}"
)
self._tensorboard.add_train_scalar("current_batch_size", batch_group_size)
self._tensorboard.add_train_scalar("mean_batch_size", average)
# Save model if needed.
if (
self._model_save_interval is not None
and (time.time() - last_save_time > self._model_save_interval)
and self._master
):
last_save_time = time.time()
self._save_checkpoint(
"{0}.{1}".format(epoch, training_util.time_to_str(int(last_save_time)))
)
metrics = training_util.get_metrics(
self.model,
train_loss,
batches_this_epoch,
reset=True,
world_size=self._world_size,
cuda_device=[self.cuda_device],
)
metrics["cpu_memory_MB"] = peak_cpu_usage
for (gpu_num, memory) in gpu_usage:
metrics["gpu_" + str(gpu_num) + "_memory_MB"] = memory
return metrics
def _validation_loss(self) -> Tuple[float, int]:
"""
Computes the validation loss. Returns it and the number of batches.
"""
logger.info("Validating")
self._pytorch_model.eval()
# Replace parameter values with the shadow values from the moving averages.
if self._moving_average is not None:
self._moving_average.assign_average_value()
if self._validation_iterator is not None:
val_iterator = self._validation_iterator
else:
val_iterator = self.iterator
val_generator = val_iterator(self._validation_data, num_epochs=1, shuffle=False)
num_validation_batches = val_iterator.get_num_batches(self._validation_data)
val_generator_tqdm = Tqdm.tqdm(val_generator, total=num_validation_batches)
batches_this_epoch = 0
val_loss = 0
for batch in val_generator_tqdm:
loss = self.batch_loss(batch, for_training=False)
if loss is not None:
# You shouldn't necessarily have to compute a loss for validation, so we allow for
# `loss` to be None. We need to be careful, though - `batches_this_epoch` is
# currently only used as the divisor for the loss function, so we can safely only
# count those batches for which we actually have a loss. If this variable ever
# gets used for something else, we might need to change things around a bit.
batches_this_epoch += 1
val_loss += loss.detach().cpu().numpy()
# Update the description with the latest metrics
val_metrics = training_util.get_metrics(
self.model,
val_loss,
batches_this_epoch,
world_size=self._world_size,
cuda_device=[self.cuda_device],
)
description = training_util.description_from_metrics(val_metrics)
val_generator_tqdm.set_description(description, refresh=False)
# Now restore the original parameter values.
if self._moving_average is not None:
self._moving_average.restore()
return val_loss, batches_this_epoch
def train(self) -> Dict[str, Any]:
"""
Trains the supplied model with the supplied parameters.
"""
try:
epoch_counter = self._restore_checkpoint()
except RuntimeError:
traceback.print_exc()
raise ConfigurationError(
"Could not recover training from the checkpoint. Did you mean to output to "
"a different serialization directory or delete the existing serialization "
"directory?"
)
training_util.enable_gradient_clipping(self.model, self._grad_clipping)
logger.info("Beginning training.")
train_metrics: Dict[str, float] = {}
val_metrics: Dict[str, float] = {}
this_epoch_val_metric: float = None
metrics: Dict[str, Any] = {}
epochs_trained = 0
training_start_time = time.time()
metrics["best_epoch"] = self._metric_tracker.best_epoch
for key, value in self._metric_tracker.best_epoch_metrics.items():
metrics["best_validation_" + key] = value
for epoch in range(epoch_counter, self._num_epochs):
epoch_start_time = time.time()
train_metrics = self._train_epoch(epoch)
# get peak of memory usage
if "cpu_memory_MB" in train_metrics:
metrics["peak_cpu_memory_MB"] = max(
metrics.get("peak_cpu_memory_MB", 0), train_metrics["cpu_memory_MB"]
)
for key, value in train_metrics.items():
if key.startswith("gpu_"):
metrics["peak_" + key] = max(metrics.get("peak_" + key, 0), value)
# Let all workers finish training before going into the validation mode
if self._distributed:
dist.barrier()
if self._validation_data is not None:
with torch.no_grad():
# We have a validation set, so compute all the metrics on it.
val_loss, num_batches = self._validation_loss()
# It is safe again to wait till the validation is done. This is
# important to get the metrics right.
if self._distributed:
dist.barrier()
val_metrics = training_util.get_metrics(
self.model,
val_loss,
num_batches,
reset=True,
world_size=self._world_size,
cuda_device=[self.cuda_device],
)
# Check validation metric for early stopping
this_epoch_val_metric = val_metrics[self._validation_metric]
self._metric_tracker.add_metric(this_epoch_val_metric)
if self._metric_tracker.should_stop_early():
logger.info("Ran out of patience. Stopping training.")
break
if self._master:
self._tensorboard.log_metrics(
train_metrics, val_metrics=val_metrics, log_to_console=True, epoch=epoch + 1
) # +1 because tensorboard doesn't like 0
# Create overall metrics dict
training_elapsed_time = time.time() - training_start_time
metrics["training_duration"] = str(datetime.timedelta(seconds=training_elapsed_time))
metrics["training_start_epoch"] = epoch_counter
metrics["training_epochs"] = epochs_trained
metrics["epoch"] = epoch
for key, value in train_metrics.items():
metrics["training_" + key] = value
for key, value in val_metrics.items():
metrics["validation_" + key] = value
if self._metric_tracker.is_best_so_far():
# Update all the best_ metrics.
# (Otherwise they just stay the same as they were.)
metrics["best_epoch"] = epoch
for key, value in val_metrics.items():
metrics["best_validation_" + key] = value
self._metric_tracker.best_epoch_metrics = val_metrics
if self._serialization_dir and self._master:
dump_metrics(
os.path.join(self._serialization_dir, f"metrics_epoch_{epoch}.json"), metrics
)
# The Scheduler API is agnostic to whether your schedule requires a validation metric -
# if it doesn't, the validation metric passed here is ignored.
if self._learning_rate_scheduler:
self._learning_rate_scheduler.step(this_epoch_val_metric, epoch)
if self._momentum_scheduler:
self._momentum_scheduler.step(this_epoch_val_metric, epoch)
if self._master:
self._save_checkpoint(epoch)
# Wait for the master to finish saving the checkpoint
if self._distributed:
dist.barrier()
epoch_elapsed_time = time.time() - epoch_start_time
logger.info("Epoch duration: %s", datetime.timedelta(seconds=epoch_elapsed_time))
if epoch < self._num_epochs - 1:
training_elapsed_time = time.time() - training_start_time
estimated_time_remaining = training_elapsed_time * (
(self._num_epochs - epoch_counter) / float(epoch - epoch_counter + 1) - 1
)
formatted_time = str(datetime.timedelta(seconds=int(estimated_time_remaining)))
logger.info("Estimated training time remaining: %s", formatted_time)
epochs_trained += 1
# make sure pending events are flushed to disk and files are closed properly
self._tensorboard.close()
# Load the best model state before returning
best_model_state = self._checkpointer.best_model_state()
if best_model_state:
self.model.load_state_dict(best_model_state)
return metrics
def _save_checkpoint(self, epoch: Union[int, str]) -> None:
"""
Saves a checkpoint of the model to self._serialization_dir.
Is a no-op if self._serialization_dir is None.
# Parameters
epoch : Union[int, str], required.
The epoch of training. If the checkpoint is saved in the middle
of an epoch, the parameter is a string with the epoch and timestamp.
"""
# If moving averages are used for parameters, we save
# the moving average values into checkpoint, instead of the current values.
if self._moving_average is not None:
self._moving_average.assign_average_value()
# These are the training states we need to persist.
training_states = {
"metric_tracker": self._metric_tracker.state_dict(),
"optimizer": self.optimizer.state_dict(),
"batch_num_total": self._batch_num_total,
}
# If we have a learning rate or momentum scheduler, we should persist them too.
if self._learning_rate_scheduler is not None:
training_states["learning_rate_scheduler"] = self._learning_rate_scheduler.state_dict()
if self._momentum_scheduler is not None:
training_states["momentum_scheduler"] = self._momentum_scheduler.state_dict()
self._checkpointer.save_checkpoint(
model_state=self.model.state_dict(),
epoch=epoch,
training_states=training_states,
is_best_so_far=self._metric_tracker.is_best_so_far(),
)
# Restore the original values for parameters so that training will not be affected.
if self._moving_average is not None:
self._moving_average.restore()
def _restore_checkpoint(self) -> int:
"""
Restores the model and training state from the last saved checkpoint.
This includes an epoch count and optimizer state, which is serialized separately
from model parameters. This function should only be used to continue training -
if you wish to load a model for inference/load parts of a model into a new
computation graph, you should use the native Pytorch functions:
`` model.load_state_dict(torch.load("/path/to/model/weights.th"))``
If ``self._serialization_dir`` does not exist or does not contain any checkpointed weights,
this function will do nothing and return 0.
# Returns
epoch: int
The epoch at which to resume training, which should be one after the epoch
in the saved training state.
"""
model_state, training_state = self._checkpointer.restore_checkpoint()
if not training_state:
# No checkpoint to restore, start at 0
return 0
self.model.load_state_dict(model_state)
self.optimizer.load_state_dict(training_state["optimizer"])
if (
self._learning_rate_scheduler is not None
and "learning_rate_scheduler" in training_state
):
self._learning_rate_scheduler.load_state_dict(training_state["learning_rate_scheduler"])
if self._momentum_scheduler is not None and "momentum_scheduler" in training_state:
self._momentum_scheduler.load_state_dict(training_state["momentum_scheduler"])
training_util.move_optimizer_to_cuda(self.optimizer)
# Currently the ``training_state`` contains a serialized ``MetricTracker``.
if "metric_tracker" in training_state:
self._metric_tracker.load_state_dict(training_state["metric_tracker"])
# It used to be the case that we tracked ``val_metric_per_epoch``.
elif "val_metric_per_epoch" in training_state:
self._metric_tracker.clear()
self._metric_tracker.add_metrics(training_state["val_metric_per_epoch"])
# And before that we didn't track anything.
else:
self._metric_tracker.clear()
if isinstance(training_state["epoch"], int):
epoch_to_return = training_state["epoch"] + 1
else:
epoch_to_return = int(training_state["epoch"].split(".")[0]) + 1
# For older checkpoints with batch_num_total missing, default to old behavior where
# it is unchanged.
batch_num_total = training_state.get("batch_num_total")
if batch_num_total is not None:
self._batch_num_total = batch_num_total
return epoch_to_return
# Requires custom from_params.
@classmethod
def from_params( # type: ignore
cls,
model: Model,
serialization_dir: str,
iterator: DataIterator,
train_data: Iterable[Instance],
validation_data: Optional[Iterable[Instance]],
params: Params,
validation_iterator: DataIterator = None,
local_rank: int = 0,
) -> "Trainer":
patience = params.pop_int("patience", None)
validation_metric = params.pop("validation_metric", "-loss")
shuffle = params.pop_bool("shuffle", True)
num_epochs = params.pop_int("num_epochs", 20)
cuda_device = parse_cuda_device(params.pop("cuda_device", -1))
grad_norm = params.pop_float("grad_norm", None)
grad_clipping = params.pop_float("grad_clipping", None)
lr_scheduler_params = params.pop("learning_rate_scheduler", None)
momentum_scheduler_params = params.pop("momentum_scheduler", None)
check_for_gpu(cuda_device)
if cuda_device >= 0:
# Moving model to GPU here so that the optimizer state gets constructed on
# the right device.
model = model.cuda(cuda_device)
parameters = [[n, p] for n, p in model.named_parameters() if p.requires_grad]
optimizer = Optimizer.from_params(parameters, params.pop("optimizer"))
if "moving_average" in params:
moving_average = MovingAverage.from_params(
params.pop("moving_average"), parameters=parameters
)
else:
moving_average = None
if lr_scheduler_params:
lr_scheduler = LearningRateScheduler.from_params(optimizer, lr_scheduler_params)
else:
lr_scheduler = None
if momentum_scheduler_params:
momentum_scheduler = MomentumScheduler.from_params(optimizer, momentum_scheduler_params)
else:
momentum_scheduler = None
if "checkpointer" in params:
if (
"keep_serialized_model_every_num_seconds" in params
or "num_serialized_models_to_keep" in params
):
raise ConfigurationError(
"Checkpointer may be initialized either from the 'checkpointer' key or from the "
"keys 'num_serialized_models_to_keep' and 'keep_serialized_model_every_num_seconds'"
" but the passed config uses both methods."
)
checkpointer = Checkpointer.from_params(params.pop("checkpointer"))
else:
num_serialized_models_to_keep = params.pop_int("num_serialized_models_to_keep", 20)
keep_serialized_model_every_num_seconds = params.pop_int(
"keep_serialized_model_every_num_seconds", None
)
checkpointer = Checkpointer(
serialization_dir=serialization_dir,
num_serialized_models_to_keep=num_serialized_models_to_keep,
keep_serialized_model_every_num_seconds=keep_serialized_model_every_num_seconds,
)
model_save_interval = params.pop_float("model_save_interval", None)
summary_interval = params.pop_int("summary_interval", 100)
histogram_interval = params.pop_int("histogram_interval", None)
should_log_parameter_statistics = params.pop_bool("should_log_parameter_statistics", True)
should_log_learning_rate = params.pop_bool("should_log_learning_rate", False)
log_batch_size_period = params.pop_int("log_batch_size_period", None)
distributed = params.pop_bool("distributed", False)
world_size = params.pop_int("world_size", 1)
num_gradient_accumulation_steps = params.pop("num_gradient_accumulation_steps", 1)
params.assert_empty(cls.__name__)
return cls(
model,
optimizer,
iterator,
train_data,
validation_data,
patience=patience,
validation_metric=validation_metric,
validation_iterator=validation_iterator,
shuffle=shuffle,
num_epochs=num_epochs,
serialization_dir=serialization_dir,
cuda_device=cuda_device,
grad_norm=grad_norm,
grad_clipping=grad_clipping,
learning_rate_scheduler=lr_scheduler,
momentum_scheduler=momentum_scheduler,
checkpointer=checkpointer,
model_save_interval=model_save_interval,
summary_interval=summary_interval,
histogram_interval=histogram_interval,
should_log_parameter_statistics=should_log_parameter_statistics,
should_log_learning_rate=should_log_learning_rate,
log_batch_size_period=log_batch_size_period,
moving_average=moving_average,
distributed=distributed,
rank=local_rank,
world_size=world_size,
num_gradient_accumulation_steps=num_gradient_accumulation_steps,
)
| [
"torch.no_grad",
"torch.distributed.barrier",
"torch.nn.parallel.DistributedDataParallel",
"torch.isnan"
] | 1.2.0 | loopylangur/allennlp | 0fc695b08a0376317e45ae0a45584aa9eb14beb6 |
1.2 | from typing import Dict, List, TypeVar, Generic
import warnings
import torch
import numpy
from allennlp.common import Registrable
from allennlp.data.tokenizers.token import Token
from allennlp.data.vocabulary import Vocabulary
TokenType = TypeVar("TokenType", int, List[int], numpy.ndarray)
class TokenIndexer(Generic[TokenType], Registrable):
"""
A ``TokenIndexer`` determines how string tokens get represented as arrays of indices in a model.
This class both converts strings into numerical values, with the help of a
:class:`~allennlp.data.vocabulary.Vocabulary`, and it produces actual arrays.
Tokens can be represented as single IDs (e.g., the word "cat" gets represented by the number
34), or as lists of character IDs (e.g., "cat" gets represented by the numbers [23, 10, 18]),
or in some other way that you can come up with (e.g., if you have some structured input you
want to represent in a special way in your data arrays, you can do that here).
# Parameters
token_min_padding_length : ``int``, optional (default=``0``)
The minimum padding length required for the :class:`TokenIndexer`. For example,
the minimum padding length of :class:`SingleIdTokenIndexer` is the largest size of
filter when using :class:`CnnEncoder`.
Note that if you set this for one TokenIndexer, you likely have to set it for all
:class:`TokenIndexer` for the same field, otherwise you'll get mismatched tensor sizes.
"""
default_implementation = "single_id"
has_warned_for_as_padded_tensor = False
def __init__(self, token_min_padding_length: int = 0) -> None:
self._token_min_padding_length: int = token_min_padding_length
def count_vocab_items(self, token: Token, counter: Dict[str, Dict[str, int]]):
"""
The :class:`Vocabulary` needs to assign indices to whatever strings we see in the training
data (possibly doing some frequency filtering and using an OOV, or out of vocabulary,
token). This method takes a token and a dictionary of counts and increments counts for
whatever vocabulary items are present in the token. If this is a single token ID
representation, the vocabulary item is likely the token itself. If this is a token
characters representation, the vocabulary items are all of the characters in the token.
"""
raise NotImplementedError
def tokens_to_indices(
self, tokens: List[Token], vocabulary: Vocabulary, index_name: str
) -> Dict[str, List[TokenType]]:
"""
Takes a list of tokens and converts them to one or more sets of indices.
This could be just an ID for each token from the vocabulary.
Or it could split each token into characters and return one ID per character.
Or (for instance, in the case of byte-pair encoding) there might not be a clean
mapping from individual tokens to indices.
"""
raise NotImplementedError
def get_padding_token(self) -> TokenType:
"""
Deprecated. Please just implement the padding token in `as_padded_tensor` instead.
TODO(Mark): remove in 1.0 release. This is only a concrete implementation to preserve
backward compatability, otherwise it would be abstract.
When we need to add padding tokens, what should they look like? This method returns a
"blank" token of whatever type is returned by :func:`tokens_to_indices`.
"""
warnings.warn(
"Using a Field with get_padding_token as an inherited method,"
" which will be depreciated in 1.0.0."
"Please implement as_padded_tensor instead.",
FutureWarning,
)
return 0 # type: ignore
def get_padding_lengths(self, token: TokenType) -> Dict[str, int]:
"""
This method returns a padding dictionary for the given token that specifies lengths for
all arrays that need padding. For example, for single ID tokens the returned dictionary
will be empty, but for a token characters representation, this will return the number
of characters in the token.
"""
raise NotImplementedError
def get_token_min_padding_length(self) -> int:
"""
This method returns the minimum padding length required for this TokenIndexer.
For example, the minimum padding length of `SingleIdTokenIndexer` is the largest
size of filter when using `CnnEncoder`.
"""
return self._token_min_padding_length
def as_padded_tensor(
self,
tokens: Dict[str, List[TokenType]],
desired_num_tokens: Dict[str, int],
padding_lengths: Dict[str, int],
) -> Dict[str, torch.Tensor]:
"""
This method pads a list of tokens to ``desired_num_tokens`` and returns that padded list
of input tokens as a torch Tensor. If the input token list is longer than ``desired_num_tokens``
then it will be truncated.
``padding_lengths`` is used to provide supplemental padding parameters which are needed
in some cases. For example, it contains the widths to pad characters to when doing
character-level padding.
Note that this method should be abstract, but it is implemented to allow backward compatability.
"""
if not self.has_warned_for_as_padded_tensor:
warnings.warn(
"Using a Field with pad_token_sequence, which will be depreciated in 1.0.0."
"Please implement as_padded_tensor instead.",
FutureWarning,
)
self.has_warned_for_as_padded_tensor = True
padded = self.pad_token_sequence(tokens, desired_num_tokens, padding_lengths)
return {key: torch.LongTensor(array) for key, array in padded.items()}
def pad_token_sequence(
self,
tokens: Dict[str, List[TokenType]],
desired_num_tokens: Dict[str, int],
padding_lengths: Dict[str, int],
) -> Dict[str, TokenType]:
"""
Deprecated. Please use `as_padded_tensor` instead.
TODO(Mark): remove in 1.0 release.
"""
raise NotImplementedError
def get_keys(self, index_name: str) -> List[str]:
"""
Return a list of the keys this indexer return from ``tokens_to_indices``.
"""
return [index_name]
def __eq__(self, other) -> bool:
if isinstance(self, other.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
| [
"torch.LongTensor"
] | 1.2.0 | loopylangur/allennlp | 0fc695b08a0376317e45ae0a45584aa9eb14beb6 |
1.9 | from logger import coil_logger
import torch.nn as nn
import torch
import importlib
from configs import g_conf
from coilutils.general import command_number_to_index
from .building_blocks import Conv
from .building_blocks import Branching
from .building_blocks import FC
from .building_blocks import Join
from .building_blocks import ReverseLayerFunction
class CoILReverse(nn.Module):
def __init__(self, params, sensors):
super(CoILReverse, self).__init__()
self.params = params
number_first_layer_channels = 0
# If we fuse more than one frame, then the first layer will be a concatenation of
# the channels of this first layer [3, w, h] (e.g., 2 RGB images->3+3=6)
for _, sizes in g_conf.SENSORS.items():
number_first_layer_channels += sizes[0] * g_conf.NUMBER_FRAMES_FUSION
# Get one item from the dict
sensor_input_shape = next(iter(g_conf.SENSORS.values())) # [3, 300, 400]
sensor_input_shape = [number_first_layer_channels, sensor_input_shape[1],
sensor_input_shape[2]] # replace the above result on the channels here
# For this case we check if the perception layer is of the type "conv"
if 'conv' in params['perception']:
perception_convs = Conv(params={
'channels': [number_first_layer_channels] + params['perception']['conv']['channels'],
'kernels': params['perception']['conv']['kernels'],
'strides': params['perception']['conv']['strides'],
'dropouts': params['perception']['conv']['dropouts'],
'end_layer': True})
perception_fc = FC(params={
'neurons': [perception_convs.get_conv_output(sensor_input_shape)] + params['perception']['fc']['neurons'],
'dropouts': params['perception']['fc']['dropouts'],
'end_layer': False})
self.perception = nn.Sequential(*[perception_convs, perception_fc])
number_output_neurons = params['perception']['fc']['neurons'][-1]
elif 'res' in params['perception']: # pre defined residual networks
resnet_module = importlib.import_module('network.models.building_blocks.resnet')
resnet_module = getattr(resnet_module, params['perception']['res']['name'])
self.perception = resnet_module(pretrained=g_conf.PRE_TRAINED,
num_classes=params['perception']['res']['num_classes'],
img_h=sensors['rgb'][1],
img_w=sensors['rgb'][2])
number_output_neurons = params['perception']['res']['num_classes']
else:
raise ValueError("Invalid perception layer type; only convolutional ('conv') or ResNet-based ('res') "
"are allowed)")
self.intermediate_layers = None
self.measurements = FC(params={'neurons': [len(g_conf.INPUTS)] + params['measurements']['fc']['neurons'],
'dropouts': params['measurements']['fc']['dropouts'],
'end_layer': False})
self.join = Join(
params={
'after_process': FC(params={
'neurons': [params['measurements']['fc']['neurons'][-1] + number_output_neurons] +
params['join']['fc']['neurons'],
'dropouts': params['join']['fc']['dropouts'],
'end_layer': False}),
'mode': 'cat'})
self.speed_branch = FC(params={
'neurons': [params['join']['fc']['neurons'][-1]] + params['speed_branch']['fc']['neurons'] + [1],
'dropouts': params['speed_branch']['fc']['dropouts'] + [0.0],
'end_layer': True})
# Create the fc vector separately
branch_fc_vector = []
for i in range(params['branches']['number_of_branches']):
branch_fc_vector.append(FC(params={'neurons': [params['join']['fc']['neurons'][-1]] +
params['branches']['fc']['neurons'] + [len(g_conf.TARGETS)],
'dropouts': params['branches']['fc']['dropouts'] + [0.0],
'end_layer': True}))
self.branches = Branching(branch_fc_vector) # Here we set branching automatically
# Weight initialization for the convolutional perception modules
if 'conv' in params['perception']:
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0.1)
# Init for the rest of the network
else:
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0.1)
def forward(self, x, a):
""" ###### APPLY THE PERCEPTION MODULE """
x, self.intermediate_layers = self.perception(x)
# Not a variable, just to store intermediate layers for future visualization
# self.intermediate_layers = inter
""" APPLY THE MEASUREMENT MODULE """
m = self.measurements(a)
""" Join measurements and perception"""
j = self.join(x, m)
branch_outputs = self.branches(j)
speed_branch_output = self.speed_branch(x)
# We concatenate speed with the rest.
return branch_outputs + [speed_branch_output]
def forward_branch(self, x, a, branch_number):
"""
Do a forward operation and return a single branch.
Args:
x: the image input (torch.squeeze(data['rgb']))
a: speed measurement (dataset.extract_inputs(data))
branch_number: the branch number to be returned (data['directions'])
Returns:
the forward operation on the selected branch
"""
output_vec = torch.stack(self.forward(x, a)[0:self.params['branches']['number_of_branches']])
return self.extract_branch(output_vec, branch_number)
def get_perception_layers(self, x):
return self.perception.get_layers_features(x)
@staticmethod
def extract_branch(output_vec, branch_number):
# Extract
branch_number = command_number_to_index(branch_number)
if len(branch_number) > 1:
branch_number = torch.squeeze(branch_number.type(torch.cuda.LongTensor))
else:
branch_number = branch_number.type(torch.cuda.LongTensor)
branch_number = torch.stack([branch_number, torch.cuda.LongTensor(range(0, len(branch_number)))])
return output_vec[branch_number[0], branch_number[1], :]
| [
"torch.nn.Sequential",
"torch.nn.init.xavier_uniform_",
"torch.nn.init.constant_"
] | 1.9.0 | PDillis/coiltraine | a682aa62af5f6ecb95a837d33b70d893d3d261f6 |
1.0 | # coding=utf-8
# Copyright 2018 The Microsoft Research Asia LayoutLM Team Authors and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch LayoutLM model. """
import math
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ...activations import ACT2FN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
MaskedLMOutput,
TokenClassifierOutput,
)
from ...modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from ...utils import logging
from .configuration_layoutlm import LayoutLMConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "LayoutLMConfig"
_TOKENIZER_FOR_DOC = "LayoutLMTokenizer"
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST = [
"layoutlm-base-uncased",
"layoutlm-large-uncased",
]
LayoutLMLayerNorm = torch.nn.LayerNorm
class LayoutLMEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super(LayoutLMEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size)
self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size)
self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size)
self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = LayoutLMLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
def forward(
self,
input_ids=None,
bbox=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
words_embeddings = inputs_embeds
position_embeddings = self.position_embeddings(position_ids)
try:
left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])
except IndexError as e:
raise IndexError("The :obj:`bbox`coordinate values should be within 0-1000 range.") from e
h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1])
w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0])
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = (
words_embeddings
+ position_embeddings
+ left_position_embeddings
+ upper_position_embeddings
+ right_position_embeddings
+ lower_position_embeddings
+ h_position_embeddings
+ w_position_embeddings
+ token_type_embeddings
)
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->LayoutLM
class LayoutLMSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
if encoder_hidden_states is not None:
mixed_key_layer = self.key(encoder_hidden_states)
mixed_value_layer = self.value(encoder_hidden_states)
attention_mask = encoder_attention_mask
else:
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in LayoutLMModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->LayoutLM
class LayoutLMSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->LayoutLM
class LayoutLMAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = LayoutLMSelfAttention(config)
self.output = LayoutLMSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate
class LayoutLMIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->LayoutLM
class LayoutLMOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->LayoutLM
class LayoutLMLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = LayoutLMAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
self.crossattention = LayoutLMAttention(config)
self.intermediate = LayoutLMIntermediate(config)
self.output = LayoutLMOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
):
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
if self.is_decoder and encoder_hidden_states is not None:
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->LayoutLM
class LayoutLMEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([LayoutLMLayer(config) for _ in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
if getattr(self.config, "gradient_checkpointing", False):
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, all_hidden_states, all_self_attentions, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithCrossAttentions(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPooler
class LayoutLMPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->LayoutLM
class LayoutLMPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->LayoutLM
class LayoutLMLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = LayoutLMPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->LayoutLM
class LayoutLMOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = LayoutLMLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class LayoutLMPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = LayoutLMConfig
base_model_prefix = "layoutlm"
authorized_missing_keys = [r"position_ids"]
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, LayoutLMLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
LAYOUTLM_START_DOCSTRING = r"""
The LayoutLM model was proposed in `LayoutLM: Pre-training of Text and Layout for Document Image Understanding
<https://arxiv.org/abs/1912.13318>`__ by....
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config (:class:`~transformers.LayoutLMConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
LAYOUTLM_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.LayoutLMTokenizer`. See
:func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
bbox (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`):
Bounding Boxes of each input sequence tokens. Selected in the range ``[0, config.max_2d_position_embeddings
- 1]``.
`What are bboxes? <../glossary.html#position-ids>`_
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`{0}`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for
tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``: ``0`` corresponds to a `sentence A` token, ``1`` corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: :obj:`1`
indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
If set to ``True``, the hidden states of all layers are returned. See ``hidden_states`` under returned
tensors for more detail.
return_dict (:obj:`bool`, `optional`):
If set to ``True``, the model will return a :class:`~transformers.file_utils.ModelOutput` instead of a
plain tuple.
"""
@add_start_docstrings(
"The bare LayoutLM Model transformer outputting raw hidden-states without any specific head on top.",
LAYOUTLM_START_DOCSTRING,
)
class LayoutLMModel(LayoutLMPreTrainedModel):
config_class = LayoutLMConfig
pretrained_model_archive_map = LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST
base_model_prefix = "layoutlm"
def __init__(self, config):
super(LayoutLMModel, self).__init__(config)
self.config = config
self.embeddings = LayoutLMEmbeddings(config)
self.encoder = LayoutLMEncoder(config)
self.pooler = LayoutLMPooler(config)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(LAYOUTLM_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="layoutlm-base-uncased",
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
bbox=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
"""
input_ids (torch.LongTensor of shape (batch_size, sequence_length)):
Indices of input sequence tokens in the vocabulary.
attention_mask (torch.FloatTensor of shape (batch_size, sequence_length), optional):
Mask to avoid performing attention on padding token indices. Mask values selected in [0, 1]: 1 for tokens
that are NOT MASKED, 0 for MASKED tokens.
token_type_ids (torch.LongTensor of shape (batch_size, sequence_length), optional):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in [0, 1]:
0 corresponds to a sentence A token, 1 corresponds to a sentence B token
position_ids (torch.LongTensor of shape (batch_size, sequence_length), optional):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range [0,
config.max_position_embeddings - 1].
head_mask (torch.FloatTensor of shape (num_heads,) or (num_layers, num_heads), optional):
Mask to nullify selected heads of the self-attention modules. Mask values selected in [0, 1]: 1 indicates
the head is not masked, 0 indicates the head is masked.
inputs_embeds (torch.FloatTensor of shape (batch_size, sequence_length, hidden_size), optional):
Optionally, instead of passing input_ids you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert input_ids indices into associated vectors than the
model’s internal embedding lookup matrix.
output_attentions (bool, optional):
If set to True, the attentions tensors of all attention layers are returned.
output_hidden_states (bool, optional):
If set to True, the hidden states of all layers are returned.
return_dict (bool, optional):
If set to True, the model will return a ModelOutput instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if bbox is None:
bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.to(dtype=next(self.parameters()).dtype)
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(
input_ids=input_ids,
bbox=bbox,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
encoder_outputs = self.encoder(
embedding_output,
extended_attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings("""LayoutLM Model with a `language modeling` head on top. """, LAYOUTLM_START_DOCSTRING)
class LayoutLMForMaskedLM(LayoutLMPreTrainedModel):
config_class = LayoutLMConfig
pretrained_model_archive_map = LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST
base_model_prefix = "layoutlm"
def __init__(self, config):
super().__init__(config)
self.layoutlm = LayoutLMModel(config)
self.cls = LayoutLMOnlyMLMHead(config)
self.init_weights()
def get_input_embeddings(self):
return self.layoutlm.embeddings.word_embeddings
def get_output_embeddings(self):
return self.cls.predictions.decoder
@add_start_docstrings_to_model_forward(LAYOUTLM_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="layoutlm-base-uncased",
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
bbox=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.layoutlm(
input_ids,
bbox,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(
prediction_scores.view(-1, self.config.vocab_size),
labels.view(-1),
)
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
LayoutLM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
LAYOUTLM_START_DOCSTRING,
)
class LayoutLMForTokenClassification(LayoutLMPreTrainedModel):
config_class = LayoutLMConfig
pretrained_model_archive_map = LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST
base_model_prefix = "layoutlm"
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.layoutlm = LayoutLMModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def get_input_embeddings(self):
return self.layoutlm.embeddings.word_embeddings
@add_start_docstrings_to_model_forward(LAYOUTLM_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="layoutlm-base-uncased",
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
bbox=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.layoutlm(
input_ids=input_ids,
bbox=bbox,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| [
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.nn.Softmax",
"torch.arange",
"torch.nn.Tanh",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.matmul",
"torch.nn.Embedding"
] | 1.0 | timpal0l/transformers | d86d57faa3b6511c6e4d9139535d77b695b9af8a |
1.4 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import math
import os
import pickle
import sys
from argparse import Namespace
from copy import deepcopy
from pathlib import Path
from unittest.mock import ANY, call, patch
import cloudpickle
import pytest
import torch
from omegaconf import OmegaConf
from torch.optim import SGD
from torch.utils.data import DataLoader
import tests.helpers.utils as tutils
from pytorch_lightning import Callback, LightningDataModule, LightningModule, Trainer
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.callbacks.prediction_writer import BasePredictionWriter
from pytorch_lightning.core.saving import load_hparams_from_tags_csv, load_hparams_from_yaml, save_hparams_to_tags_csv
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.overrides.distributed import IndexBatchSamplerWrapper, UnrepeatedDistributedSampler
from pytorch_lightning.plugins import DDPSpawnPlugin
from pytorch_lightning.trainer.states import TrainerFn
from pytorch_lightning.utilities import DeviceType, DistributedType
from pytorch_lightning.utilities.cloud_io import load as pl_load
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.seed import seed_everything
from tests.base import EvalModelTemplate
from tests.helpers import BoringModel, RandomDataset
from tests.helpers.runif import RunIf
@pytest.mark.parametrize("url_ckpt", [True, False])
def test_no_val_module(monkeypatch, tmpdir, tmpdir_server, url_ckpt):
"""Tests use case where trainer saves the model, and user loads it from tags independently."""
# set $TORCH_HOME, which determines torch hub's cache path, to tmpdir
monkeypatch.setenv("TORCH_HOME", str(tmpdir))
model = EvalModelTemplate()
# logger file to get meta
logger = tutils.get_default_logger(tmpdir)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
logger=logger,
callbacks=[ModelCheckpoint(dirpath=tmpdir)],
)
# fit model
trainer.fit(model)
# training complete
assert trainer.state.finished, f"Training failed with {trainer.state}"
# save model
new_weights_path = os.path.join(tmpdir, "save_test.ckpt")
trainer.save_checkpoint(new_weights_path)
# assert ckpt has hparams
ckpt = torch.load(new_weights_path)
assert LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in ckpt.keys(), "hyper_parameters missing from checkpoints"
# load new model
hparams_path = tutils.get_data_path(logger, path_dir=tmpdir)
hparams_path = os.path.join(hparams_path, "hparams.yaml")
ckpt_path = (
f"http://{tmpdir_server[0]}:{tmpdir_server[1]}/{os.path.basename(new_weights_path)}"
if url_ckpt else new_weights_path
)
model_2 = EvalModelTemplate.load_from_checkpoint(
checkpoint_path=ckpt_path,
hparams_file=hparams_path,
)
model_2.eval()
@pytest.mark.parametrize("url_ckpt", [True, False])
def test_no_val_end_module(monkeypatch, tmpdir, tmpdir_server, url_ckpt):
"""Tests use case where trainer saves the model, and user loads it from tags independently."""
# set $TORCH_HOME, which determines torch hub's cache path, to tmpdir
monkeypatch.setenv("TORCH_HOME", tmpdir)
model = EvalModelTemplate()
# logger file to get meta
logger = tutils.get_default_logger(tmpdir)
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
logger=logger,
callbacks=[ModelCheckpoint(dirpath=tmpdir)],
)
trainer.fit(model)
# training complete
assert trainer.state.finished, f"Training failed with {trainer.state}"
# save model
new_weights_path = os.path.join(tmpdir, "save_test.ckpt")
trainer.save_checkpoint(new_weights_path)
# load new model
hparams_path = tutils.get_data_path(logger, path_dir=tmpdir)
hparams_path = os.path.join(hparams_path, "hparams.yaml")
ckpt_path = (
f"http://{tmpdir_server[0]}:{tmpdir_server[1]}/{os.path.basename(new_weights_path)}"
if url_ckpt else new_weights_path
)
model_2 = EvalModelTemplate.load_from_checkpoint(
checkpoint_path=ckpt_path,
hparams_file=hparams_path,
)
model_2.eval()
@pytest.mark.parametrize("url_ckpt", [True, False])
def test_strict_model_load(monkeypatch, tmpdir, tmpdir_server, url_ckpt):
"""Tests use case where trainer saves the model, and user loads it from tags independently."""
# set $TORCH_HOME, which determines torch hub's cache path, to tmpdir
monkeypatch.setenv("TORCH_HOME", tmpdir)
model = EvalModelTemplate()
# Extra layer
model.c_d3 = torch.nn.Linear(model.hidden_dim, model.hidden_dim)
# logger file to get meta
logger = tutils.get_default_logger(tmpdir)
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
logger=logger,
callbacks=[ModelCheckpoint(dirpath=tmpdir)],
)
trainer.fit(model)
# training complete
assert trainer.state.finished, f"Training failed with {trainer.state}"
# save model
new_weights_path = os.path.join(tmpdir, "save_test.ckpt")
trainer.save_checkpoint(new_weights_path)
# load new model
hparams_path = tutils.get_data_path(logger, path_dir=tmpdir)
hparams_path = os.path.join(hparams_path, "hparams.yaml")
ckpt_path = (
f"http://{tmpdir_server[0]}:{tmpdir_server[1]}/{os.path.basename(new_weights_path)}"
if url_ckpt else new_weights_path
)
try:
EvalModelTemplate.load_from_checkpoint(
checkpoint_path=ckpt_path,
hparams_file=hparams_path,
)
# todo: specify the possible exception
except Exception:
failed = True
else:
failed = False
assert failed, "Model should not been loaded since the extra layer added."
failed = False
try:
EvalModelTemplate.load_from_checkpoint(
checkpoint_path=ckpt_path,
hparams_file=hparams_path,
strict=False,
)
# todo: specify the possible exception
except Exception:
failed = True
assert not failed, "Model should be loaded due to strict=False."
@pytest.mark.parametrize("accumulate_grad_batches", (1, 2, 3))
def test_trainer_accumulate_grad_batches_zero_grad(tmpdir, accumulate_grad_batches):
with patch("torch.optim.SGD.zero_grad") as sgd_zero_grad:
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=20,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
accumulate_grad_batches=accumulate_grad_batches,
)
trainer.fit(model)
assert sgd_zero_grad.call_count == math.ceil(trainer.limit_train_batches / accumulate_grad_batches)
@pytest.mark.parametrize(
["accumulate_grad_batches", "limit_train_batches"],
[
({
1: 2,
3: 4
}, 1.0),
({
1: 2,
3: 4
}, 0.5), # not to be divisible by accumulate_grad_batches on purpose
(3, 1.0),
(3, 0.8), # not to be divisible by accumulate_grad_batches on purpose
(4, 1.0),
(4, 0.7), # not to be divisible by accumulate_grad_batches on purpose
],
)
def test_gradient_accumulation_scheduling_last_batch(tmpdir, accumulate_grad_batches, limit_train_batches):
""" Verify optimizer.step() applied to last batch while grad accumulation """
class TestModel(BoringModel):
def state_dict(self, *args, **kwargs):
return deepcopy(super().state_dict(*args, **kwargs))
def check(self, d1, d2, equal=True):
keys = d1.keys() | d2.keys()
values = [torch.equal(d1[k], d2[k]) for k in keys]
return all(values) if equal else not any(values)
def backward(self, *args, **kwargs) -> None:
pre_bwd_state_dict = self.state_dict()
assert self.check(self.start_state_dict, pre_bwd_state_dict)
out = super().backward(*args, **kwargs)
# state dict is equal, just the gradients changed
assert self.check(pre_bwd_state_dict, self.state_dict())
return out
def optimizer_step(self, *args, **kwargs):
pre_opt_step_state_dict = self.state_dict()
assert self.check(self.start_state_dict, pre_opt_step_state_dict)
# this calls `backward` and `on_after_backward` inside the closure
out = super().optimizer_step(*args, **kwargs)
# the state dict changed
assert self.check(pre_opt_step_state_dict, self.state_dict(), equal=False)
self.opt_step_called = True
return out
def on_train_batch_start(self, *_):
self.start_state_dict = self.state_dict()
self.opt_step_called = False
def on_train_batch_end(self, outputs, batch, batch_idx, *_):
end_state_dict = self.state_dict()
is_last_batch = (batch_idx + 1) == self.trainer.num_training_batches
if is_last_batch or self.opt_step_called:
assert self.check(self.start_state_dict, end_state_dict, equal=False)
else:
assert self.check(self.start_state_dict, end_state_dict)
model = TestModel()
trainer = Trainer(
accumulate_grad_batches=accumulate_grad_batches,
max_epochs=2,
limit_train_batches=limit_train_batches,
limit_val_batches=0,
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
)
trainer.fit(model)
def test_loading_meta_tags(tmpdir):
""" test for backward compatibility to meta_tags.csv """
tutils.reset_seed()
hparams = EvalModelTemplate.get_default_hparams()
# save tags
logger = tutils.get_default_logger(tmpdir)
logger.log_hyperparams(Namespace(some_str="a_str", an_int=1, a_float=2.0))
logger.log_hyperparams(hparams)
logger.save()
# load hparams
path_expt_dir = tutils.get_data_path(logger, path_dir=tmpdir)
hparams_path = os.path.join(path_expt_dir, TensorBoardLogger.NAME_HPARAMS_FILE)
hparams = load_hparams_from_yaml(hparams_path)
# save as legacy meta_tags.csv
tags_path = os.path.join(path_expt_dir, "meta_tags.csv")
save_hparams_to_tags_csv(tags_path, hparams)
tags = load_hparams_from_tags_csv(tags_path)
assert hparams == tags
def test_loading_yaml(tmpdir):
tutils.reset_seed()
hparams = EvalModelTemplate.get_default_hparams()
# save tags
logger = tutils.get_default_logger(tmpdir)
logger.log_hyperparams(Namespace(some_str="a_str", an_int=1, a_float=2.0))
logger.log_hyperparams(hparams)
logger.save()
# load hparams
path_expt_dir = tutils.get_data_path(logger, path_dir=tmpdir)
hparams_path = os.path.join(path_expt_dir, "hparams.yaml")
tags = load_hparams_from_yaml(hparams_path)
assert tags["batch_size"] == 32 and tags["hidden_dim"] == 1000
@pytest.mark.parametrize(
"save_top_k,save_last,expected_files",
[
pytest.param(-1, False, [f"epoch={i}.ckpt" for i in range(5)], id="CASE K=-1 (all)"),
pytest.param(1, False, {"epoch=4.ckpt"}, id="CASE K=1 (2.5, epoch 4)"),
pytest.param(2, False, [f"epoch={i}.ckpt" for i in (2, 4)], id="CASE K=2 (2.5 epoch 4, 2.8 epoch 2)"),
pytest.param(4, False, [f"epoch={i}.ckpt" for i in range(1, 5)], id="CASE K=4 (save all 4 base)"),
pytest.param(3, False, [f"epoch={i}.ckpt" for i in range(2, 5)], id="CASE K=3 (save the 2nd, 3rd, 4th model)"),
pytest.param(1, True, {"epoch=4.ckpt", "last.ckpt"}, id="CASE K=1 (save the 4th model and the last model)"),
],
)
def test_model_checkpoint_options(tmpdir, save_top_k, save_last, expected_files):
"""Test ModelCheckpoint options."""
def mock_save_function(filepath, *args):
open(filepath, "a").close()
# simulated losses
losses = [10, 9, 2.8, 5, 2.5]
checkpoint_callback = ModelCheckpoint(
dirpath=tmpdir,
filename='{epoch}',
monitor='checkpoint_on',
save_top_k=save_top_k,
save_last=save_last,
verbose=True
)
trainer = Trainer()
trainer.state.fn = TrainerFn.FITTING
trainer.save_checkpoint = mock_save_function
# emulate callback's calls during the training
for i, loss in enumerate(losses):
trainer.fit_loop.current_epoch = i
trainer.fit_loop.global_step = i
trainer.logger_connector.callback_metrics.update({"checkpoint_on": loss})
checkpoint_callback.on_validation_end(trainer, trainer.lightning_module)
file_lists = set(os.listdir(tmpdir))
assert len(file_lists) == len(
expected_files
), f"Should save {len(expected_files)} models when save_top_k={save_top_k} but found={file_lists}"
# verify correct naming
for fname in expected_files:
assert fname in file_lists
def test_model_checkpoint_only_weights(tmpdir):
"""Tests use case where ModelCheckpoint is configured to save only model weights, and
user tries to load checkpoint to resume training.
"""
model = EvalModelTemplate()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
callbacks=[ModelCheckpoint(dirpath=tmpdir, monitor='early_stop_on', save_weights_only=True)],
)
# fit model
trainer.fit(model)
# training complete
assert trainer.state.finished, f"Training failed with {trainer.state}"
checkpoint_path = list(trainer.checkpoint_callback.best_k_models.keys())[0]
# assert saved checkpoint has no trainer data
checkpoint = torch.load(checkpoint_path)
assert "optimizer_states" not in checkpoint, "checkpoint should contain only model weights"
assert "lr_schedulers" not in checkpoint, "checkpoint should contain only model weights"
# assert loading model works when checkpoint has only weights
assert EvalModelTemplate.load_from_checkpoint(checkpoint_path=checkpoint_path)
# directly save model
new_weights_path = os.path.join(tmpdir, "save_test.ckpt")
trainer.save_checkpoint(new_weights_path, weights_only=True)
# assert saved checkpoint has no trainer data
checkpoint = torch.load(new_weights_path)
assert "optimizer_states" not in checkpoint, "checkpoint should contain only model weights"
assert "lr_schedulers" not in checkpoint, "checkpoint should contain only model weights"
# assert restoring train state fails
with pytest.raises(KeyError, match="checkpoint contains only the model"):
trainer.checkpoint_connector.restore(new_weights_path)
def test_model_freeze_unfreeze():
model = EvalModelTemplate()
model.freeze()
model.unfreeze()
@pytest.mark.parametrize("url_ckpt", [True, False])
def test_resume_from_checkpoint_epoch_restored(monkeypatch, tmpdir, tmpdir_server, url_ckpt):
"""Verify resuming from checkpoint runs the right number of epochs"""
# set $TORCH_HOME, which determines torch hub's cache path, to tmpdir
monkeypatch.setenv("TORCH_HOME", tmpdir)
class TestModel(BoringModel):
# Model that tracks epochs and batches seen
num_epochs_end_seen = 0
num_batches_seen = 0
num_on_load_checkpoint_called = 0
def on_epoch_end(self):
self.num_epochs_end_seen += 1
def on_train_batch_start(self, *_):
self.num_batches_seen += 1
def on_load_checkpoint(self, _):
self.num_on_load_checkpoint_called += 1
model = TestModel()
trainer = Trainer(
max_epochs=2,
limit_train_batches=0.65,
limit_val_batches=1,
callbacks=[ModelCheckpoint(dirpath=tmpdir, monitor='early_stop_on', save_top_k=-1)],
default_root_dir=tmpdir,
val_check_interval=1.0,
progress_bar_refresh_rate=0,
logger=False,
weights_summary=None,
)
trainer.fit(model)
# `on_epoch_end` will be called once for val_sanity, twice for train, twice for val
assert model.num_epochs_end_seen == 1 + 2 + 2
assert model.num_batches_seen == trainer.num_training_batches * 2
assert model.num_on_load_checkpoint_called == 0
# Other checkpoints can be uncommented if/when resuming mid-epoch is supported
checkpoints = Path(trainer.checkpoint_callback.dirpath).glob("*.ckpt")
if url_ckpt:
# transform local paths into url checkpoints
ip, port = tmpdir_server
checkpoints = [f"http://{ip}:{port}/" + ckpt.name for ckpt in checkpoints]
for ckpt in checkpoints:
next_model = TestModel()
state = pl_load(ckpt)
# Resume training
new_trainer = Trainer(
default_root_dir=tmpdir,
resume_from_checkpoint=ckpt,
max_epochs=2,
)
new_trainer.fit(next_model)
assert state["global_step"] + next_model.num_batches_seen == trainer.num_training_batches * trainer.max_epochs
assert next_model.num_on_load_checkpoint_called == 1
def test_trainer_max_steps_and_epochs(tmpdir):
"""Verify model trains according to specified max steps"""
model = BoringModel()
num_train_samples = math.floor(len(model.train_dataloader()) * 0.5)
# define less train steps than epochs
trainer_kwargs = {
'limit_train_batches': 0.5,
'default_root_dir': tmpdir,
'max_epochs': 3,
'max_steps': num_train_samples + 10,
'logger': False,
'weights_summary': None,
'progress_bar_refresh_rate': 0,
}
trainer = Trainer(**trainer_kwargs)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.global_step == trainer.max_steps, "Model did not stop at max_steps"
# define less train epochs than steps
trainer_kwargs['max_epochs'] = 2
trainer_kwargs['max_steps'] = 3 * 2 * num_train_samples
trainer = Trainer(**trainer_kwargs)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.global_step == num_train_samples * trainer.max_epochs
assert trainer.current_epoch == trainer.max_epochs - 1, "Model did not stop at max_epochs"
def test_trainer_min_steps_and_epochs(tmpdir):
"""Verify model trains according to specified min steps"""
model = EvalModelTemplate()
num_train_samples = math.floor(len(model.train_dataloader()) * 0.5)
trainer_kwargs = {
'limit_train_batches': 0.5,
'default_root_dir': tmpdir,
# define callback for stopping the model
'callbacks': [EarlyStopping(monitor="early_stop_on", min_delta=1.0)],
'val_check_interval': 2,
'min_epochs': 1,
'max_epochs': 7,
# define less min steps than 1 epoch
'min_steps': num_train_samples // 2,
'logger': False,
'weights_summary': None,
'progress_bar_refresh_rate': 0,
}
trainer = Trainer(**trainer_kwargs)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.current_epoch > 0
assert trainer.global_step >= num_train_samples, "Model did not train for at least min_epochs"
# define less epochs than min_steps
trainer_kwargs["min_steps"] = math.floor(num_train_samples * 1.5)
trainer = Trainer(**trainer_kwargs)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.current_epoch > 0
assert trainer.global_step >= math.floor(num_train_samples * 1.5), "Model did not train for at least min_steps"
def test_trainer_min_steps_and_min_epochs_not_reached(tmpdir, caplog):
""" Test that min_epochs/min_steps in Trainer are enforced even if EarlyStopping is triggered. """
class TestModel(BoringModel):
training_step_invoked = 0
def training_step(self, batch, batch_idx):
output = super().training_step(batch, batch_idx)
output["loss"] = output["loss"] * 0.0 # force minimal loss to trigger early stopping
self.log("loss", output["loss"])
self.training_step_invoked += 1
assert not self.trainer.should_stop
return output
model = TestModel()
early_stop = EarlyStopping(monitor="loss", patience=0, check_on_train_epoch_end=True)
min_epochs = 5
trainer = Trainer(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
min_epochs=min_epochs,
limit_val_batches=0,
limit_train_batches=2,
callbacks=[early_stop]
)
with caplog.at_level(logging.INFO, logger="pytorch_lightning.trainer.trainer"):
trainer.fit(model)
message = f"minimum epochs ({min_epochs}) or minimum steps (None) has not been met. Training will continue"
num_messages = len([record.message for record in caplog.records if message in record.message])
assert num_messages == min_epochs - 2
assert model.training_step_invoked == min_epochs * 2
def test_trainer_max_steps_accumulate_batches(tmpdir):
"""Verify model trains according to specified max steps with grad accumulated batches"""
model = BoringModel()
num_train_samples = math.floor(len(model.train_dataloader()) * 0.5)
# define less train steps than epochs
trainer = Trainer(
limit_train_batches=0.5,
default_root_dir=tmpdir,
max_steps=num_train_samples + 10,
accumulate_grad_batches=10,
logger=False,
weights_summary=None,
progress_bar_refresh_rate=0,
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.global_step == trainer.max_steps, "Model did not stop at max_steps"
def test_benchmark_option(tmpdir):
"""Verify benchmark option."""
model = EvalModelTemplate()
model.val_dataloader = model.val_dataloader__multiple
# verify torch.backends.cudnn.benchmark is not turned on
assert not torch.backends.cudnn.benchmark
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
benchmark=True,
)
trainer.fit(model)
# verify training completed
assert trainer.state.finished, f"Training failed with {trainer.state}"
# verify torch.backends.cudnn.benchmark is not turned off
assert torch.backends.cudnn.benchmark
@pytest.mark.parametrize("ckpt_path", (None, "best", "specific"))
@pytest.mark.parametrize("save_top_k", (-1, 0, 1, 2))
@pytest.mark.parametrize("fn", ("validate", "test", "predict"))
def test_tested_checkpoint_path(tmpdir, ckpt_path, save_top_k, fn):
class TestModel(BoringModel):
def validation_step(self, batch, batch_idx):
self.log("foo", -batch_idx)
return super().validation_step(batch, batch_idx)
def test_step(self, *args):
return self.validation_step(*args)
def predict_step(self, batch, *_):
return self(batch)
model = TestModel()
model.test_epoch_end = None
trainer = Trainer(
max_epochs=2,
limit_val_batches=1,
limit_test_batches=1,
limit_predict_batches=1,
progress_bar_refresh_rate=0,
default_root_dir=tmpdir,
callbacks=[ModelCheckpoint(monitor="foo", save_top_k=save_top_k)],
)
trainer.fit(model)
trainer_fn = getattr(trainer, fn)
path_attr = f"{fn}{'d' if fn == 'validate' else 'ed'}_ckpt_path"
assert getattr(trainer, path_attr) is None
if ckpt_path == "best":
# ckpt_path is 'best', meaning we load the best weights
if save_top_k == 0:
with pytest.raises(MisconfigurationException, match=".*is not configured to save the best.*"):
trainer_fn(ckpt_path=ckpt_path)
else:
trainer_fn(ckpt_path=ckpt_path)
assert getattr(trainer, path_attr) == trainer.checkpoint_callback.best_model_path
elif ckpt_path is None:
# ckpt_path is None, meaning we don't load any checkpoints and
# use the weights from the end of training
trainer_fn(ckpt_path=ckpt_path)
assert getattr(trainer, path_attr) is None
else:
# specific checkpoint, pick one from saved ones
if save_top_k == 0:
with pytest.raises(FileNotFoundError):
trainer_fn(ckpt_path="random.ckpt")
else:
ckpt_path = str(
list((Path(tmpdir) / f"lightning_logs/version_{trainer.logger.version}/checkpoints").iterdir()
)[0].absolute()
)
trainer_fn(ckpt_path=ckpt_path)
assert getattr(trainer, path_attr) == ckpt_path
def test_disabled_training(tmpdir):
"""Verify that `limit_train_batches=0` disables the training loop unless `fast_dev_run=True`."""
class CurrentModel(BoringModel):
training_step_invoked = False
training_epoch_end_invoked = False
def training_step(self, *args, **kwargs):
self.training_step_invoked = True
return super().training_step(*args, **kwargs)
def training_epoch_end(self, *args, **kwargs):
self.training_epoch_end_invoked = True
return super().training_epoch_end(*args, **kwargs)
model = CurrentModel()
trainer_options = dict(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=2,
limit_train_batches=0.0,
limit_val_batches=0.2,
fast_dev_run=False,
)
before_state_dict = deepcopy(model.state_dict())
trainer = Trainer(**trainer_options)
trainer.fit(model)
after_state_dict = model.state_dict()
for key in before_state_dict.keys():
assert torch.all(torch.eq(before_state_dict[key], after_state_dict[key]))
# check that limit_train_batches=0 turns off training
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.current_epoch == 0
assert not model.training_step_invoked, "`training_step` should not run when `limit_train_batches=0`"
assert not model.training_epoch_end_invoked, "`training_epoch_end` should not run when `limit_train_batches=0`"
# check that limit_train_batches has no influence when fast_dev_run is turned on
model = CurrentModel()
trainer_options.update(fast_dev_run=True)
before_state_dict = deepcopy(model.state_dict())
trainer = Trainer(**trainer_options)
trainer.fit(model)
after_state_dict = model.state_dict()
for key in before_state_dict.keys():
assert not torch.all(torch.eq(before_state_dict[key], after_state_dict[key]))
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.current_epoch == 0
assert model.training_step_invoked, "did not run `training_step` with `fast_dev_run=True`"
assert model.training_epoch_end_invoked, "did not run `training_epoch_end` with `fast_dev_run=True`"
def test_disabled_validation(tmpdir):
"""Verify that `limit_val_batches=0` disables the validation loop unless `fast_dev_run=True`."""
class CurrentModel(EvalModelTemplate):
validation_step_invoked = False
validation_epoch_end_invoked = False
def validation_step(self, *args, **kwargs):
self.validation_step_invoked = True
return super().validation_step(*args, **kwargs)
def validation_epoch_end(self, *args, **kwargs):
self.validation_epoch_end_invoked = True
return super().validation_epoch_end(*args, **kwargs)
hparams = EvalModelTemplate.get_default_hparams()
model = CurrentModel(**hparams)
trainer_options = dict(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=2,
limit_train_batches=0.4,
limit_val_batches=0.0,
fast_dev_run=False,
)
trainer = Trainer(**trainer_options)
trainer.fit(model)
# check that limit_val_batches=0 turns off validation
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.current_epoch == 1
assert not model.validation_step_invoked, "`validation_step` should not run when `limit_val_batches=0`"
assert not model.validation_epoch_end_invoked, "`validation_epoch_end` should not run when `limit_val_batches=0`"
# check that limit_val_batches has no influence when fast_dev_run is turned on
model = CurrentModel(**hparams)
trainer_options.update(fast_dev_run=True)
trainer = Trainer(**trainer_options)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.current_epoch == 0
assert model.validation_step_invoked, "did not run `validation_step` with `fast_dev_run=True`"
assert model.validation_epoch_end_invoked, "did not run `validation_epoch_end` with `fast_dev_run=True`"
def test_nan_loss_detection(tmpdir):
class CurrentModel(BoringModel):
test_batch_inf = 3
def training_step(self, batch, batch_idx):
output = super().training_step(batch, batch_idx)
if batch_idx == self.test_batch_inf:
if isinstance(output, dict):
output["loss"] *= torch.tensor(math.inf) # make loss infinite
else:
output /= 0
return output
model = CurrentModel()
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
max_steps=(model.test_batch_inf + 1),
terminate_on_nan=True,
)
with pytest.raises(ValueError, match=r".*The loss returned in `training_step` is.*"):
trainer.fit(model)
assert trainer.global_step == model.test_batch_inf
for param in model.parameters():
assert torch.isfinite(param).all()
def test_nan_params_detection(tmpdir):
class CurrentModel(BoringModel):
test_batch_nan = 3
def on_after_backward(self):
if self.global_step == self.test_batch_nan:
# simulate parameter that became nan
torch.nn.init.constant_(self.layer.bias, math.nan)
model = CurrentModel()
trainer = Trainer(
default_root_dir=tmpdir,
max_steps=(model.test_batch_nan + 1),
terminate_on_nan=True,
)
with pytest.raises(ValueError, match=r".*Detected nan and/or inf values in `layer.bias`.*"):
trainer.fit(model)
assert trainer.global_step == model.test_batch_nan
# after aborting the training loop, model still has nan-valued params
params = torch.cat([param.view(-1) for param in model.parameters()])
assert not torch.isfinite(params).all()
def test_trainer_interrupted_flag(tmpdir):
"""Test the flag denoting that a user interrupted training."""
model = EvalModelTemplate()
class InterruptCallback(Callback):
def __init__(self):
super().__init__()
def on_train_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx):
raise KeyboardInterrupt
class HandleInterruptCallback(Callback):
def __init__(self):
super().__init__()
self.exc_info = None
def on_keyboard_interrupt(self, trainer, pl_module):
self.exc_info = sys.exc_info()
interrupt_callback = InterruptCallback()
handle_interrupt_callback = HandleInterruptCallback()
trainer = Trainer(
callbacks=[interrupt_callback, handle_interrupt_callback],
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=0.2,
progress_bar_refresh_rate=0,
logger=False,
default_root_dir=tmpdir,
)
assert not trainer.interrupted
assert handle_interrupt_callback.exc_info is None
trainer.fit(model)
assert trainer.interrupted
assert isinstance(handle_interrupt_callback.exc_info[1], KeyboardInterrupt)
def test_gradient_clipping(tmpdir):
"""
Test gradient clipping
"""
tutils.reset_seed()
model = EvalModelTemplate()
trainer = Trainer(
max_steps=1,
max_epochs=1,
gradient_clip_val=1.0,
default_root_dir=tmpdir,
)
old_training_step_and_backward = trainer.fit_loop.training_loop.batch_loop.training_step_and_backward
def training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens):
"""
wrap the forward step in a closure so second order methods work
"""
# test that gradient is clipped correctly
ret_val = old_training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens)
parameters = model.parameters()
grad_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), 2) for p in parameters]), 2)
assert (grad_norm - 1.0).abs() < 0.01, "Gradient norm != 1.0: {grad_norm}".format(grad_norm=grad_norm)
return ret_val
trainer.fit_loop.training_loop.batch_loop.training_step_and_backward = training_step_and_backward
# for the test
model.prev_called_batch_idx = 0
trainer.fit(model)
def test_gradient_clipping_by_value(tmpdir):
"""
Test gradient clipping by value
"""
tutils.reset_seed()
model = BoringModel()
grad_clip_val = 1e-10
trainer = Trainer(
max_steps=1,
max_epochs=1,
gradient_clip_val=grad_clip_val,
gradient_clip_algorithm='value',
default_root_dir=tmpdir
)
old_training_step_and_backward = trainer.fit_loop.training_loop.batch_loop.training_step_and_backward
def training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens):
"""
wrap the forward step in a closure so second order methods work
"""
# test that gradient is clipped correctly
ret_val = old_training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens)
parameters = model.parameters()
grad_max_list = [torch.max(p.grad.detach().abs()) for p in parameters]
grad_max = torch.max(torch.stack(grad_max_list))
assert abs(grad_max.item() - grad_clip_val) < 1e-11, \
f"Gradient max value {grad_max} != grad_clip_val {grad_clip_val} ."
return ret_val
trainer.fit_loop.training_loop.batch_loop.training_step_and_backward = training_step_and_backward
# for the test
model.prev_called_batch_idx = 0
trainer.fit(model)
@RunIf(min_gpus=1, amp_native=True)
def test_gradient_clipping_fp16(tmpdir):
"""
Test gradient clipping with fp16
"""
tutils.reset_seed()
model = EvalModelTemplate()
trainer = Trainer(
max_steps=1,
max_epochs=1,
precision=16,
gpus=1,
gradient_clip_val=1.0,
default_root_dir=tmpdir,
)
old_training_step_and_backward = trainer.fit_loop.training_loop.batch_loop.training_step_and_backward
def training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens):
"""
wrap the forward step in a closure so second order methods work
"""
# test that gradient is clipped correctly
ret_val = old_training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens)
parameters = model.parameters()
grad_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), 2) for p in parameters]), 2)
assert (grad_norm - 1.0).abs() < 0.01, "Gradient norm != 1.0: {grad_norm}".format(grad_norm=grad_norm)
return ret_val
trainer.fit_loop.training_loop.batch_loop.training_step_and_backward = training_step_and_backward
model.prev_called_batch_idx = 0
trainer.fit(model)
@RunIf(min_gpus=1, amp_native=True)
def test_gradient_clipping_by_value_fp16(tmpdir):
"""
Test gradient clipping by value with fp16
"""
tutils.reset_seed()
model = BoringModel()
grad_clip_val = 1e-10
trainer = Trainer(
max_steps=1,
max_epochs=1,
precision=16,
gpus=1,
gradient_clip_val=grad_clip_val,
gradient_clip_algorithm='value',
default_root_dir=tmpdir,
)
old_training_step_and_backward = trainer.fit_loop.training_loop.batch_loop.training_step_and_backward
def training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens):
"""
wrap the forward step in a closure so second order methods work
"""
# test that gradient is clipped correctly
ret_val = old_training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens)
parameters = model.parameters()
grad_max_list = [torch.max(p.grad.detach().abs()) for p in parameters]
grad_max = torch.max(torch.stack(grad_max_list))
assert abs(grad_max.item() - grad_clip_val) < 1e-11, \
f"Gradient max value {grad_max} != grad_clip_val {grad_clip_val} ."
return ret_val
trainer.fit_loop.training_loop.batch_loop.training_step_and_backward = training_step_and_backward
model.prev_called_batch_idx = 0
trainer.fit(model)
def test_gpu_choice(tmpdir):
trainer_options = dict(default_root_dir=tmpdir)
# Only run if CUDA is available
if not torch.cuda.is_available():
return
num_gpus = torch.cuda.device_count()
Trainer(**trainer_options, gpus=num_gpus, auto_select_gpus=True)
with pytest.raises(RuntimeError, match=r".*No GPUs available.*"):
Trainer(**trainer_options, gpus=num_gpus + 1, auto_select_gpus=True)
@pytest.mark.parametrize(
"limit_val_batches",
[0.0, 1, 1.0, 0.5, 5],
)
def test_num_sanity_val_steps(tmpdir, limit_val_batches):
"""
Test that the number of sanity check batches is clipped to `limit_val_batches`.
"""
model = EvalModelTemplate()
model.validation_step = model.validation_step__multiple_dataloaders
model.validation_epoch_end = model.validation_epoch_end__multiple_dataloaders
num_sanity_val_steps = 4
trainer = Trainer(
default_root_dir=tmpdir,
num_sanity_val_steps=num_sanity_val_steps,
limit_val_batches=limit_val_batches,
max_steps=1,
)
assert trainer.num_sanity_val_steps == num_sanity_val_steps
with patch.object(
trainer.fit_loop.validation_loop.epoch_loop,
"evaluation_step",
wraps=trainer.fit_loop.validation_loop.epoch_loop.evaluation_step
) as mocked:
val_dataloaders = model.val_dataloader__multiple_mixed_length()
trainer.fit(model, val_dataloaders=val_dataloaders)
assert mocked.call_count == sum(
min(num_sanity_val_steps, num_batches) for num_batches in trainer.num_val_batches
)
@pytest.mark.parametrize("limit_val_batches", [0.0, 1, 1.0, 0.3])
def test_num_sanity_val_steps_neg_one(tmpdir, limit_val_batches):
"""
Test that `num_sanity_val_steps=-1` runs through all validation data once, and as many batches as
limited by `limit_val_batches` Trainer argument.
"""
model = EvalModelTemplate()
model.validation_step = model.validation_step__multiple_dataloaders
model.validation_epoch_end = model.validation_epoch_end__multiple_dataloaders
trainer = Trainer(
default_root_dir=tmpdir,
num_sanity_val_steps=-1,
limit_val_batches=limit_val_batches,
max_steps=1,
)
assert trainer.num_sanity_val_steps == float("inf")
with patch.object(
trainer.fit_loop.validation_loop.epoch_loop,
"evaluation_step",
wraps=trainer.fit_loop.validation_loop.epoch_loop.evaluation_step
) as mocked:
val_dataloaders = model.val_dataloader__multiple()
trainer.fit(model, val_dataloaders=val_dataloaders)
assert mocked.call_count == sum(trainer.num_val_batches)
@pytest.mark.parametrize(
"trainer_kwargs,expected",
[
(
dict(accelerator=None, gpus=None),
dict(_distrib_type=None, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),
),
(
dict(accelerator="dp", gpus=None),
dict(_distrib_type=None, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),
),
(
dict(accelerator="ddp", gpus=None),
dict(_distrib_type=None, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),
),
(
dict(accelerator="ddp", num_processes=2, gpus=None),
dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.CPU, num_gpus=0, num_processes=2),
),
(
dict(accelerator="ddp", num_nodes=2, gpus=None),
dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),
),
(
dict(accelerator="ddp_cpu", num_processes=2, gpus=None),
dict(_distrib_type=DistributedType.DDP_SPAWN, _device_type=DeviceType.CPU, num_gpus=0, num_processes=2),
),
(
dict(accelerator="ddp2", gpus=None),
dict(_distrib_type=None, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),
),
(
dict(accelerator=None, gpus=1),
dict(_distrib_type=None, _device_type=DeviceType.GPU, num_gpus=1, num_processes=1),
),
(
dict(accelerator="dp", gpus=1),
dict(_distrib_type=DistributedType.DP, _device_type=DeviceType.GPU, num_gpus=1, num_processes=1),
),
(
dict(accelerator="ddp", gpus=1),
dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.GPU, num_gpus=1, num_processes=1),
),
(
dict(accelerator="ddp_cpu", num_processes=2, gpus=1),
dict(_distrib_type=DistributedType.DDP_SPAWN, _device_type=DeviceType.CPU, num_gpus=0, num_processes=2),
),
(
dict(accelerator="ddp2", gpus=1),
dict(_distrib_type=DistributedType.DDP2, _device_type=DeviceType.GPU, num_gpus=1, num_processes=1),
),
(
dict(accelerator=None, gpus=2),
dict(_distrib_type=DistributedType.DDP_SPAWN, _device_type=DeviceType.GPU, num_gpus=2, num_processes=2),
),
(
dict(accelerator="dp", gpus=2),
dict(_distrib_type=DistributedType.DP, _device_type=DeviceType.GPU, num_gpus=2, num_processes=1),
),
(
dict(accelerator="ddp", gpus=2),
dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.GPU, num_gpus=2, num_processes=2),
),
(
dict(accelerator="ddp2", gpus=2),
dict(_distrib_type=DistributedType.DDP2, _device_type=DeviceType.GPU, num_gpus=2, num_processes=1),
),
],
)
def test_trainer_config(trainer_kwargs, expected, monkeypatch):
if trainer_kwargs["gpus"] is not None:
monkeypatch.setattr(torch.cuda, "is_available", lambda: True)
monkeypatch.setattr(torch.cuda, "device_count", lambda: trainer_kwargs["gpus"])
trainer = Trainer(**trainer_kwargs)
assert len(expected) == 4
for k, v in expected.items():
assert getattr(trainer, k) == v, f"Failed {k}: {v}"
def test_trainer_subclassing():
model = EvalModelTemplate()
# First way of pulling out args from signature is to list them
class TrainerSubclass(Trainer):
def __init__(self, custom_arg, *args, custom_kwarg="test", **kwargs):
super().__init__(*args, **kwargs)
self.custom_arg = custom_arg
self.custom_kwarg = custom_kwarg
trainer = TrainerSubclass(123, custom_kwarg="custom", fast_dev_run=True)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.custom_arg == 123
assert trainer.custom_kwarg == "custom"
assert trainer.fast_dev_run
# Second way is to pop from the dict
# It's a special case because Trainer does not have any positional args
class TrainerSubclass(Trainer):
def __init__(self, **kwargs):
self.custom_arg = kwargs.pop("custom_arg", 0)
self.custom_kwarg = kwargs.pop("custom_kwarg", "test")
super().__init__(**kwargs)
trainer = TrainerSubclass(custom_kwarg="custom", fast_dev_run=True)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.custom_kwarg == "custom"
assert trainer.fast_dev_run
# when we pass in an unknown arg, the base class should complain
with pytest.raises(TypeError, match=r"__init__\(\) got an unexpected keyword argument 'abcdefg'"):
TrainerSubclass(abcdefg="unknown_arg")
@pytest.mark.parametrize(
"trainer_params", [
OmegaConf.create(dict(max_epochs=1, gpus=1)),
OmegaConf.create(dict(max_epochs=1, gpus=[0])),
]
)
@RunIf(min_gpus=1)
def test_trainer_omegaconf(trainer_params):
Trainer(**trainer_params)
def test_trainer_pickle(tmpdir):
trainer = Trainer(
max_epochs=1,
default_root_dir=tmpdir,
)
pickle.dumps(trainer)
cloudpickle.dumps(trainer)
@pytest.mark.parametrize("stage", ("fit", "validate", "test"))
def test_trainer_setup_call(tmpdir, stage):
"""Test setup call gets the correct stage"""
class CurrentModel(BoringModel):
def setup(self, stage):
self.stage = stage
class TrainerSubclass(Trainer):
def setup(self, model, stage):
assert model is not None
self.stage = stage
model = CurrentModel()
# fit model
trainer = TrainerSubclass(default_root_dir=tmpdir, max_epochs=1, checkpoint_callback=False)
if stage == "fit":
trainer.fit(model)
elif stage == "validate":
trainer.validate(model, ckpt_path=None)
else:
trainer.test(model, ckpt_path=None)
assert trainer.stage == stage
assert trainer.lightning_module.stage == stage
@pytest.mark.parametrize(
"train_batches, max_steps, log_interval",
[
(10, 10, 1),
(3, 10, 1),
(3, 10, 5),
],
)
@patch("pytorch_lightning.loggers.tensorboard.TensorBoardLogger.log_metrics")
def test_log_every_n_steps(log_metrics_mock, tmpdir, train_batches, max_steps, log_interval):
class TestModel(BoringModel):
def training_step(self, *args, **kwargs):
self.log("foo", -1)
return super().training_step(*args, **kwargs)
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
log_every_n_steps=log_interval,
flush_logs_every_n_steps=log_interval,
limit_train_batches=train_batches,
limit_val_batches=0,
max_steps=max_steps,
)
trainer.fit(model)
expected_calls = [call(metrics=ANY, step=s) for s in range(log_interval - 1, max_steps, log_interval)]
log_metrics_mock.assert_has_calls(expected_calls)
class TestLightningDataModule(LightningDataModule):
def __init__(self, dataloaders):
super().__init__()
self._dataloaders = dataloaders
def test_dataloader(self):
return self._dataloaders
def predict_dataloader(self):
return self._dataloaders
class CustomPredictionWriter(BasePredictionWriter):
write_on_batch_end_called = False
write_on_epoch_end_called = False
def __init__(self, output_dir: str, *args, **kwargs):
super().__init__(*args, **kwargs)
self.output_dir = output_dir
def write_on_batch_end(self, trainer, pl_module, prediction, batch_indices, *args, **kwargs):
assert prediction.shape == torch.Size([1, 2])
if trainer.accelerator_connector.is_distributed:
assert len(batch_indices) == 1
else:
assert batch_indices is None
self.write_on_batch_end_called = True
def write_on_epoch_end(self, trainer, pl_module, predictions, batch_indices):
expected = 1 if trainer.accelerator_connector.is_distributed else 2
assert len(predictions) == 2
assert len(predictions[0]) == expected
if trainer.accelerator_connector.is_distributed:
assert len(batch_indices) == 2
assert len(batch_indices[0]) == expected
else:
assert batch_indices is None
self.write_on_epoch_end_called = True
def on_predict_epoch_end(self, trainer, pl_module, outputs):
if trainer.accelerator_connector.is_distributed:
for idx in range(2):
assert isinstance(trainer.predict_dataloaders[idx].batch_sampler.sampler, UnrepeatedDistributedSampler)
assert isinstance(trainer.predict_dataloaders[idx].batch_sampler, IndexBatchSamplerWrapper)
super().on_predict_epoch_end(trainer, pl_module, outputs)
def predict(
tmpdir, accelerator, gpus, num_processes, model=None, plugins=None, datamodule=True, pbrr=None, use_callbacks=True
):
dataloaders = [torch.utils.data.DataLoader(RandomDataset(32, 2)), torch.utils.data.DataLoader(RandomDataset(32, 2))]
model = model or BoringModel()
dm = TestLightningDataModule(dataloaders)
cb = CustomPredictionWriter(tmpdir, write_interval="batch")
cb_1 = CustomPredictionWriter(tmpdir, write_interval="epoch")
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
log_every_n_steps=1,
weights_summary=None,
accelerator=accelerator,
gpus=gpus,
num_processes=num_processes,
plugins=plugins,
progress_bar_refresh_rate=pbrr,
callbacks=[cb, cb_1] if use_callbacks else []
)
if accelerator == "ddp_spawn":
with pytest.raises(MisconfigurationException):
trainer.predict(model, datamodule=dm, return_predictions=True)
if datamodule:
results = trainer.predict(model, datamodule=dm)
else:
results = trainer.predict(model, dataloaders=dataloaders)
if not isinstance(trainer.training_type_plugin, DDPSpawnPlugin):
if use_callbacks:
assert cb.write_on_batch_end_called
assert not cb.write_on_epoch_end_called
assert not cb_1.write_on_batch_end_called
assert cb_1.write_on_epoch_end_called
num_samples = 1 if accelerator == "ddp" else 2
assert len(results) == 2
assert len(results[0]) == num_samples
assert results[0][0].shape == torch.Size([1, 2])
def test_trainer_predict_no_return(tmpdir):
"""
Test trainer.predict warns when nothing is returned
"""
class CustomBoringModel(BoringModel):
def predict_step(self, batch, batch_idx, dataloader_idx=None):
if (batch_idx + 1) % 2 == 0:
return
return super().predict_step(batch, batch_idx, dataloader_idx)
with pytest.warns(UserWarning, match='predict returned None'):
predict(tmpdir, None, None, 1, model=CustomBoringModel(), use_callbacks=False)
def test_trainer_predict_grad(tmpdir):
class CustomBoringModel(BoringModel):
def predict_step(self, batch, batch_idx, dataloader_idx=None):
assert batch.expand_as(batch).grad_fn is None
return super().predict_step(batch, batch_idx, dataloader_idx)
predict(tmpdir, None, None, 1, model=CustomBoringModel(), use_callbacks=False)
x = torch.zeros(1, requires_grad=True)
assert x.expand_as(x).grad_fn is not None
@pytest.mark.parametrize('progress_bar_refresh_rate', [0, 5, None])
@pytest.mark.parametrize('datamodule', [False, True])
def test_trainer_predict_cpu(tmpdir, datamodule, progress_bar_refresh_rate):
predict(tmpdir, None, None, 1, datamodule=datamodule, pbrr=progress_bar_refresh_rate)
@RunIf(min_gpus=2, special=True)
@pytest.mark.parametrize('num_gpus', [1, 2])
def test_trainer_predict_dp(tmpdir, num_gpus):
predict(tmpdir, "dp", num_gpus, None)
@RunIf(min_gpus=2, special=True, fairscale=True)
def test_trainer_predict_ddp(tmpdir):
predict(tmpdir, "ddp", 2, None)
@RunIf(min_gpus=2, skip_windows=True, special=True)
def test_trainer_predict_ddp_spawn(tmpdir):
predict(tmpdir, "ddp_spawn", 2, None)
@RunIf(min_gpus=2, special=True)
def test_trainer_predict_1_gpu(tmpdir):
predict(tmpdir, None, 1, None)
@RunIf(skip_windows=True)
def test_trainer_predict_ddp_cpu(tmpdir):
predict(tmpdir, "ddp_cpu", 0, 2)
@patch('torch.cuda.device_count', return_value=2)
@patch('torch.cuda.is_available', return_value=True)
def test_spawn_predict_return_predictions(*_):
"""
Test that `return_predictions=True` raise a MisconfigurationException with spawn training type plugins.
"""
model = BoringModel()
def run(expected_plugin, **trainer_kwargs):
trainer = Trainer(**trainer_kwargs, fast_dev_run=True)
assert isinstance(trainer.training_type_plugin, expected_plugin)
with pytest.raises(MisconfigurationException, match="`return_predictions` should be set to `False`"):
trainer.predict(model, dataloaders=model.train_dataloader(), return_predictions=True)
run(DDPSpawnPlugin, accelerator="ddp_spawn", gpus=2)
run(DDPSpawnPlugin, accelerator="ddp_cpu", num_processes=2)
@pytest.mark.parametrize("return_predictions", [None, False, True])
@pytest.mark.parametrize("precision", [32, 64])
def test_predict_return_predictions_cpu(return_predictions, precision, tmpdir):
"""
Test that `return_predictions=True`.
"""
seed_everything(42)
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, precision=precision)
preds = trainer.predict(model, dataloaders=model.train_dataloader(), return_predictions=return_predictions)
if return_predictions or return_predictions is None:
assert len(preds) == 1
assert preds[0].shape == torch.Size([1, 2])
assert preds[0].dtype == (torch.float64 if precision == 64 else torch.float32)
@pytest.mark.parametrize(
["limit_train_batches", "global_step", "num_training_batches", "current_epoch", "should_train"],
[(0.2, 0, 0, 0, False), (0.5, 10, 2, 4, True)],
)
def test_disabled_training_for_insufficient_limit_train_batches(
tmpdir, limit_train_batches, global_step, num_training_batches, current_epoch, should_train
):
"""
Verify when `limit_train_batches` is float & between [0.0, 1.0] and
`int(self.num_training_batches * self.limit_train_batches) == 0`, the training loop is disabled.
"""
class CurrentModel(BoringModel):
training_step_invoked = False
training_epoch_end_invoked = False
def training_step(self, *args, **kwargs):
self.training_step_invoked = True
return super().training_step(*args, **kwargs)
def training_epoch_end(self, *args, **kwargs):
self.training_epoch_end_invoked = True
return super().training_epoch_end(*args, **kwargs)
dataset_len = 100
batch_size = 25
train = RandomDataset(32, length=dataset_len)
train_loader = DataLoader(train, batch_size=batch_size)
model = CurrentModel()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=5,
limit_train_batches=limit_train_batches,
)
trainer.fit(model, train_loader)
params_string = f"""`limit_train_batches={limit_train_batches}`, `dataset_len={dataset_len}`
& `batch_size={batch_size}` as
`num_training_batches={num_training_batches}`"""
if should_train:
error_string = f"should run with {params_string}"
else:
error_string = f"should not run with {params_string}"
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.global_step == global_step
assert trainer.num_training_batches == num_training_batches
assert trainer.current_epoch == current_epoch
assert model.training_step_invoked == should_train, f"`training_step` {error_string}"
assert model.training_epoch_end_invoked == should_train, f"`training_epoch_end` {error_string}"
@pytest.mark.parametrize(["max_steps", "max_epochs", "global_step"], [(10, 5, 10), (20, None, 20)])
def test_repeated_fit_calls_with_max_epochs_and_steps(tmpdir, max_steps, max_epochs, global_step):
"""
Ensure that the training loop is bound by `max_steps` and
`max_epochs` for repeated calls of `trainer.fit`, and
disabled if the limit is reached
"""
dataset_len = 200
batch_size = 10
train_data = DataLoader(RandomDataset(32, dataset_len), batch_size=batch_size)
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
max_steps=max_steps,
max_epochs=max_epochs,
)
trainer.fit(model, train_data)
assert trainer.global_step == global_step
trainer.fit(model, train_data)
assert trainer.global_step == global_step
def test_trainer_access_in_configure_optimizers(tmpdir):
"""
Verify that the configure optimizer function can reference the trainer.
"""
class TestModel(BoringModel):
def configure_optimizers(self):
assert self.trainer is not None, "Expect to have access to the trainer within `configure_optimizers`"
train_data = torch.utils.data.DataLoader(RandomDataset(32, 64))
model = TestModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
trainer.fit(model, train_data)
@RunIf(min_gpus=1)
def test_setup_hook_move_to_device_correctly(tmpdir):
"""
Verify that if a user defines a layer in the setup hook function, this is moved to the correct device.
"""
class TestModel(BoringModel):
def setup(self, stage: str) -> None:
self.new_layer = torch.nn.Linear(2, 2)
def training_step(self, batch, batch_idx):
output = self.layer(batch)
# will crash if not moved to correct device
output = self.new_layer(output)
loss = self.loss(batch, output)
return {"loss": loss}
# fake data
train_data = torch.utils.data.DataLoader(RandomDataset(32, 64))
# model
model = TestModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, gpus=1)
trainer.fit(model, train_data)
def test_train_loop_system(tmpdir):
"""
Test the following methods are called in the order in automatic optimization.
1. optimizer.step (skip when gradient accumulation)
2. model.training_step
3. optimizer.zero_grad (run when the first batch of gradient accumulation)
4. model.backward
Note that the order is NOT `training_step`->`zero_grad`->`backward`->`step`.
This is because `optimizer.step(closure)` calls `closure()` which then calls
the three remaining methods `training_step`, `zero_grad` and `backward` inside.
"""
called_methods = []
trainer_options = dict(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=5,
limit_val_batches=1,
limit_test_batches=1,
progress_bar_refresh_rate=0,
)
class TestOptimizer(SGD):
def step(self, *args, **kwargs):
called_methods.append("step")
return super().step(*args, **kwargs)
def zero_grad(self, *args, **kwargs):
called_methods.append("zero_grad")
return super().zero_grad(*args, **kwargs)
class TestModel(BoringModel):
def configure_optimizers(self):
return TestOptimizer(self.parameters(), lr=0.1)
def training_step(self, *args, **kwargs):
called_methods.append("training_step")
return super().training_step(*args, **kwargs)
def backward(self, *args, **kwargs):
called_methods.append("backward")
return super().backward(*args, **kwargs)
model = TestModel()
trainer = Trainer(**trainer_options)
# No methods are called yet.
assert called_methods == []
trainer.fit(model)
assert called_methods == [
"step",
"training_step",
"zero_grad",
"backward",
] * trainer.limit_train_batches
called_methods.clear()
trainer = Trainer(**trainer_options, accumulate_grad_batches=3)
# No methods are called yet.
assert called_methods == []
trainer.fit(model)
assert called_methods == [
# 0
"training_step",
"zero_grad",
"backward",
# 1
"training_step",
"backward",
# 2
"step",
"training_step",
"backward",
# 3
"training_step",
"zero_grad",
"backward",
# 4
"step",
"training_step",
"backward",
]
def test_init_optimizers_resets_lightning_optimizers(tmpdir):
""" Test that the Trainer resets the `lightning_optimizers` list everytime new optimizers get initialized. """
def compare_optimizers():
assert trainer.lightning_optimizers[0].optimizer is trainer.optimizers[0]
model = BoringModel()
model.lr = 0.2
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
auto_lr_find=True,
)
trainer.tune(model)
compare_optimizers()
trainer.fit(model)
compare_optimizers()
trainer.fit_loop.max_epochs = 2 # simulate multiple fit calls
trainer.fit(model)
compare_optimizers()
def test_check_val_every_n_epoch_exception(tmpdir):
with pytest.raises(MisconfigurationException, match="should be an integer."):
Trainer(
default_root_dir=tmpdir,
max_epochs=1,
check_val_every_n_epoch=1.2,
)
def test_trainer_attach_data_pipeline_to_model(tmpdir):
class DataPipeline:
pass
class TestDataModule(LightningDataModule):
data_pipeline = DataPipeline()
def train_dataloader(self):
return DataLoader(RandomDataset(32, 64))
def val_dataloader(self):
return DataLoader(RandomDataset(32, 64))
def test_dataloader(self):
return DataLoader(RandomDataset(32, 64))
class TestCallback(Callback):
def on_fit_start(self, trainer, pl_module: LightningModule) -> None:
"""Called when fit begins"""
assert isinstance(pl_module.data_pipeline, DataPipeline)
model = BoringModel()
dm = TestDataModule()
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, callbacks=[TestCallback()])
trainer.fit(model, datamodule=dm)
def test_exception_when_testing_or_validating_with_fast_dev_run(tmpdir):
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
model = BoringModel()
trainer.fit(model)
with pytest.raises(MisconfigurationException, match=r"\.validate\(\)` with `fast_dev_run=True"):
trainer.validate()
with pytest.raises(MisconfigurationException, match=r"\.test\(\)` with `fast_dev_run=True"):
trainer.test()
class TrainerStagesModel(BoringModel):
def on_train_start(self) -> None:
assert self.trainer.model.training
assert self.training
def on_validation_start(self) -> None:
assert not self.trainer.model.training
assert not self.training
def on_test_start(self) -> None:
assert not self.trainer.model.training
assert not self.training
def on_predict_start(self) -> None:
assert not self.trainer.model.training
assert not self.training
@pytest.mark.parametrize(
'accelerator,num_processes', [(None, 1), pytest.param('ddp', 2, marks=RunIf(skip_windows=True))]
)
def test_model_in_correct_mode_during_stages(tmpdir, accelerator, num_processes):
model = TrainerStagesModel()
trainer = Trainer(default_root_dir=tmpdir, accelerator=accelerator, num_processes=num_processes, fast_dev_run=True)
trainer.fit(model)
trainer.validate(model)
trainer.test(model)
trainer.predict(model, model.val_dataloader())
class TestDummyModelForCheckpoint(BoringModel):
def validation_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
self.log('x', loss)
def validation_epoch_end(self, outputs) -> None:
pass
@RunIf(skip_windows=True)
def test_fit_test_synchronization(tmpdir):
"""Test that the trainer synchronizes processes before returning control back to the caller. """
tutils.set_random_master_port()
model = TestDummyModelForCheckpoint()
checkpoint = ModelCheckpoint(dirpath=tmpdir, monitor='x', mode='min', save_top_k=1)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
accelerator='ddp_cpu',
num_processes=2,
callbacks=[checkpoint],
)
trainer.fit(model)
assert os.path.exists(checkpoint.best_model_path), f'Could not find checkpoint at rank {trainer.global_rank}'
trainer.test()
class CustomCallbackOnLoadCheckpoint(Callback):
def on_save_checkpoint(self, trainer, pl_module, checkpoint) -> dict:
return {"a": None}
def test_on_load_checkpoint_missing_callbacks(tmpdir):
""" Test a warning appears when callbacks in the checkpoint don't match callbacks provided when resuming. """
model = BoringModel()
chk = ModelCheckpoint(dirpath=tmpdir, save_last=True)
trainer = Trainer(default_root_dir=tmpdir, max_epochs=3, callbacks=[chk, CustomCallbackOnLoadCheckpoint()])
trainer.fit(model)
trainer = Trainer(
default_root_dir=tmpdir, max_epochs=5, resume_from_checkpoint=chk.last_model_path, progress_bar_refresh_rate=1
)
with pytest.warns(UserWarning, match="CustomCallbackOnLoadCheckpoint"):
trainer.fit(model)
def test_module_current_fx_attributes_reset(tmpdir):
""" Ensure that lightning module's attributes related to current fx are reset at the end of execution. """
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
fast_dev_run=1,
checkpoint_callback=False,
logger=False,
)
trainer.fit(model)
assert model._current_fx_name is None
assert model._current_dataloader_idx is None
trainer.test(model)
assert model._current_fx_name is None
assert model._current_dataloader_idx is None
def test_exception_when_lightning_module_is_not_set_on_trainer():
trainer = Trainer()
with pytest.raises(MisconfigurationException, match=r"`model` must be provided.*validate"):
trainer.validate()
with pytest.raises(MisconfigurationException, match=r"`model` must be provided.*test"):
trainer.test()
with pytest.raises(MisconfigurationException, match=r"`model` must be provided.*predict"):
trainer.predict()
| [
"torch.nn.Linear",
"torch.zeros",
"torch.Size",
"torch.stack",
"torch.eq",
"torch.nn.init.constant_",
"torch.isfinite",
"torch.cuda.device_count",
"torch.cuda.is_available",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.load",
"torch.equal"
] | 1.4 | pollenjp/pytorch-lightning | 06f83492919c4c72a989f9bb8f271b92b479648b |
1.8 | from torchflare.batch_mixers.mixers import cutmix, mixup, get_collate_fn
import torch
x = torch.randn(4, 3, 256, 256)
targets = torch.tensor([0, 1, 0, 1])
ds = torch.utils.data.TensorDataset(x, targets)
def test_mixup():
dl = torch.utils.data.DataLoader(ds, batch_size=2)
batch = next(iter(dl))
op, y = mixup(batch=batch, alpha=0.35)
assert torch.is_tensor(op) is True
assert isinstance(y, (tuple, list)) is True
targets_a, targets_b, lam = y
assert torch.is_tensor(targets_a) is True
assert torch.is_tensor(targets_b) is True
assert isinstance(lam, (int, float)) is True
def test_cutmix():
dl = torch.utils.data.DataLoader(ds, batch_size=2)
batch = next(iter(dl))
op, y = cutmix(batch=batch, alpha=0.35)
assert torch.is_tensor(op) is True
assert isinstance(y, (tuple, list)) is True
targets_a, targets_b, lam = y
assert torch.is_tensor(targets_a) is True
assert torch.is_tensor(targets_b) is True
assert isinstance(lam, (int, float)) is True
def test_collate_fn_mixup():
mixup_collate_fn = get_collate_fn(mixer_name="mixup", alpha=0.35)
dl = torch.utils.data.DataLoader(ds, batch_size=2, collate_fn=mixup_collate_fn)
op, y = next(iter(dl))
assert torch.is_tensor(op) is True
assert isinstance(y, (tuple, list)) is True
targets_a, targets_b, lam = y
assert torch.is_tensor(targets_a) is True
assert torch.is_tensor(targets_b) is True
assert isinstance(lam, (int, float)) is True
def test_collate_fn_cutmix():
mixup_collate_fn = get_collate_fn(mixer_name="cutmix", alpha=0.35)
dl = torch.utils.data.DataLoader(ds, batch_size=2, collate_fn=mixup_collate_fn)
op, y = next(iter(dl))
assert torch.is_tensor(op) is True
assert isinstance(y, (tuple, list)) is True
targets_a, targets_b, lam = y
assert torch.is_tensor(targets_a) is True
assert torch.is_tensor(targets_b) is True
assert isinstance(lam, (int, float)) is True
| [
"torch.is_tensor",
"torch.randn",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.utils.data.TensorDataset"
] | 1.8.0 | earlbabson/torchflare | 15db06d313a53a3ec4640869335ba87730562b28 |
1.4 | import torch, numpy as np, scipy.sparse as sp
from torch.nn import functional as F
from tqdm import tqdm
def adjacency(H):
"""
construct adjacency for recursive hypergraph
arguments:
H: recursive hypergraph
"""
A = np.eye(H['n'])
E = H['D0']
for k in tqdm(E):
e = list(E[k])
for u in e:
A[k][u], A[u][k] = 1, 1
for v in e:
if u != v: A[u][v], A[v][u] = 1, 1
E = H['D1']
for k in tqdm(E):
e = list(E[k])
for u in e:
for v in e:
if u != v: A[u][v], A[v][u] = 1, 1
return ssm2tst(symnormalise(sp.csr_matrix(A)))
def symnormalise(M):
"""
symmetrically normalise sparse matrix
arguments:
M: scipy sparse matrix
returns:
D^{-1/2} M D^{-1/2}
where D is the diagonal node-degree matrix
"""
d = np.array(M.sum(1))
dhi = np.power(d, -1/2).flatten()
dhi[np.isinf(dhi)] = 0.
DHI = sp.diags(dhi) # D half inverse i.e. D^{-1/2}
return (DHI.dot(M)).dot(DHI)
def ssm2tst(M):
"""
converts a scipy sparse matrix (ssm) to a torch sparse tensor (tst)
arguments:
M: scipy sparse matrix
returns:
a torch sparse tensor of M
"""
M = M.tocoo().astype(np.float32)
indices = torch.from_numpy(np.vstack((M.row, M.col))).long()
values = torch.from_numpy(M.data)
shape = torch.Size(M.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def normalise(M):
"""
row-normalise sparse matrix
arguments:
M: scipy sparse matrix
returns:
D^{-1} M
where D is the diagonal node-degree matrix
"""
d = np.array(M.sum(1))
di = np.power(d, -1).flatten()
di[np.isinf(di)] = 0.
DI = sp.diags(di) # D inverse i.e. D^{-1}
return DI.dot(M) | [
"torch.Size",
"torch.sparse.FloatTensor",
"torch.from_numpy"
] | 1.4.0 | naganandy/G-MPNN-R | 04564c059e6e8cfc08edb27403dfe6bb89ba8bab |
0.3 | import math
from typing import Tuple, List, Dict, Any
import bnn
import numpy as np
import torch
from bnn import BDropout, CDropout
from torch import Tensor
from torch.nn import Module, functional as F
from .ssm_cem import CemSSM
from ..ssm_pytorch import utilities
from ..utils import get_device, assert_shape
class McDropoutSSM(CemSSM):
"""A BNN state space model, approximated using concrete mc dropout.
Uses the "bnn" package from https://github.com/anassinator/bnn
"""
def __init__(self, conf, state_dimen: int, action_dimen: int):
super().__init__(state_dimen, action_dimen)
self._training_iterations = conf.mc_dropout_training_iterations
self._num_mc_samples = conf.mc_dropout_num_samples
self._predict_std = conf.mc_dropout_predict_std
self._reinitialize_on_train = conf.mc_dropout_reinitialize
self._state_dimen = state_dimen
self._model_constructor = self._get_model_constructor(conf, state_dimen, action_dimen)
self._model = self._model_constructor()
self._model.eval()
self._loss_function = self._get_loss_function(conf)
def _get_loss_function(self, conf):
if conf.mc_dropout_type == 'fixed':
return self._fixed_dropout_loss
elif conf.mc_dropout_type == 'concrete':
return self._concrete_dropout_loss
else:
raise ValueError(f'Unknown dropout type {conf.mc_dropout_type}')
def _get_model_constructor(self, conf, state_dimen: int, action_dimen: int):
in_features = state_dimen + action_dimen
# Double the regression outputs. We need one for the mean and one for the predicted std (if enabled)
out_features = state_dimen * 2 if self._predict_std else state_dimen
def constructor() -> Module:
input_dropout, dropout_layers = self._get_dropout_layers(conf)
model = bnn.bayesian_model(in_features, out_features, hidden_features=conf.mc_dropout_hidden_features,
dropout_layers=dropout_layers, input_dropout=input_dropout)
model = model.to(get_device(conf))
return model
return constructor
@staticmethod
def _get_dropout_layers(conf) -> Tuple[Module, List[Module]]:
hidden_features = conf.mc_dropout_hidden_features
if conf.mc_dropout_type == 'fixed':
p = conf.mc_dropout_fixed_probability
input_dropout = BDropout(rate=p) if conf.mc_dropout_on_input else None
dropout_layers = [BDropout(rate=p) for _ in hidden_features]
elif conf.mc_dropout_type == 'concrete':
p = conf.mc_dropout_concrete_initial_probability
input_dropout = CDropout(rate=p) if conf.mc_dropout_on_input else None
dropout_layers = [CDropout(rate=p) for _ in hidden_features]
else:
raise ValueError(f'Unknown dropout type {conf.mc_dropout_type}')
return input_dropout, dropout_layers
def predict_with_jacobians(self, states: Tensor, actions: Tensor) -> Tuple[Tensor, Tensor, Tensor]:
z = self._join_states_actions(states, actions)
pred_mean, pred_var = self.predict_raw(z)
def mean_func(x: Tensor):
return self.predict_raw(x)[0]
pred_mean_jac = utilities.compute_jacobian_fast(mean_func, z, num_outputs=self.num_states)
return pred_mean, pred_var, pred_mean_jac
def predict_without_jacobians(self, states: Tensor, actions: Tensor) -> Tuple[Tensor, Tensor]:
z = self._join_states_actions(states, actions)
return self.predict_raw(z)
def predict_raw(self, z: Tensor):
N = z.size(0)
assert_shape(z, (N, self.num_states + self.num_actions))
# To get the variance, we sample _num_particles times from the network.
z_particles = z.repeat((self._num_mc_samples, 1, 1))
output = self._model(z_particles)
preds = output[:, :, :self._state_dimen]
if self._predict_std:
pred_log_stds = output[:, :, self._state_dimen:]
preds_with_noise = preds + pred_log_stds.exp() * torch.randn_like(preds)
return preds_with_noise.mean(dim=0), preds_with_noise.var(dim=0)
else:
return preds.mean(dim=0), preds.var(dim=0)
def _update_model(self, x_train: Tensor, y_train: Tensor) -> None:
# Nothing to do. We do not store the training data, just incorporate it in the model in _train_model().
pass
def _train_model(self, x_train: Tensor, y_train: Tensor) -> None:
if self._reinitialize_on_train:
# Construct an entirely new model to ensure all parameters are reinitialized correctly.
self._model = self._model_constructor()
self._model.train()
optimizer = torch.optim.Adam(p for p in self._model.parameters() if p.requires_grad)
# The size of y_train may be [N], but we require [N x n].
if y_train.dim() == 1:
y_train = y_train.unsqueeze(1)
# TODO: should we reset the weights at the start of each training?
print(f'Training BNN on {x_train.size(0)} data points for {self._training_iterations} iterations...')
losses = []
for i in range(self._training_iterations):
optimizer.zero_grad()
output = self._model(x_train, resample=True)
pred_means = output[:, :self._state_dimen]
pred_log_stds = output[:, self._state_dimen:] if self._predict_std else None
loss = self._loss_function(y_train, pred_means, pred_log_stds)
loss.backward()
optimizer.step()
losses.append(loss.item())
print(f'Training complete. Final losses: {losses[-4:]}')
self._model.eval()
def _fixed_dropout_loss(self, targets, pred_means, pred_log_stds):
if pred_log_stds is not None:
raise ValueError('Predicting aleatoric uncertainty is not supported for fixed dropout.')
return F.mse_loss(pred_means, targets) + 1e-2 * self._model.regularization()
def _concrete_dropout_loss(self, targets, pred_means, pred_log_stds):
return (-self._gaussian_log_likelihood(targets, pred_means,
pred_log_stds) + 1e-2 * self._model.regularization()).mean()
@staticmethod
def _gaussian_log_likelihood(targets, pred_means, pred_log_stds):
"""Taken from https://github.com/anassinator/bnn/blob/master/examples/sin_x.ipynb"""
deltas = pred_means - targets
if pred_log_stds is not None:
pred_stds = pred_log_stds.exp()
# TODO: does the np.log below cause a speed problem?
return - ((deltas / pred_stds) ** 2).sum(-1) * 0.5 - pred_stds.log().sum(-1) - np.log(2 * math.pi) * 0.5
else:
return - (deltas ** 2).sum(-1) * 0.5
def collect_metrics(self) -> Dict[str, Any]:
dropout_ps = self._get_dropout_probabilities()
return dropout_ps
def _get_dropout_probabilities(self) -> Dict[str, float]:
ps = dict()
for i, layer in enumerate(self._model.children()):
if isinstance(layer, (CDropout, BDropout)):
# p is the inverse of the dropout rate.
ps[f'dropout_p_layer_{i}'] = 1 - layer.p.item()
return ps
@property
def parametric(self) -> bool:
return True
| [
"torch.nn.functional.mse_loss",
"torch.randn_like"
] | 0.3.2 | oscarkey/safe-exploration | 32f0582a7b54ab7d4c1d415afbcf5e9554e8bcec |
0.4 | """Trainining script for WaveNet vocoder
usage: train.py [options]
options:
--dump-root=<dir> Directory contains preprocessed features.
--checkpoint-dir=<dir> Directory where to save model checkpoints [default: checkpoints].
--hparams=<parmas> Hyper parameters [default: ].
--preset=<json> Path of preset parameters (json).
--checkpoint=<path> Restore model from checkpoint path if given.
--restore-parts=<path> Restore part of the model.
--log-event-path=<name> Log event path.
--reset-optimizer Reset optimizer.
--speaker-id=<N> Use specific speaker of data in case for multi-speaker datasets.
-h, --help Show this help message and exit
"""
from docopt import docopt
import sys
import os
from os.path import dirname, join, expanduser, exists
from tqdm import tqdm
from datetime import datetime
import random
import json
from glob import glob
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import lrschedule
import torch
from torch import nn
from torch.nn import functional as F
from torch import optim
import torch.backends.cudnn as cudnn
from torch.utils import data as data_utils
from torch.utils.data.sampler import Sampler
from nnmnkwii import preprocessing as P
from nnmnkwii.datasets import FileSourceDataset, FileDataSource
import librosa.display
from tensorboardX import SummaryWriter
from matplotlib import cm
from warnings import warn
from wavenet_vocoder import WaveNet
from wavenet_vocoder.util import is_mulaw_quantize, is_mulaw, is_raw, is_scalar_input
from wavenet_vocoder.mixture import discretized_mix_logistic_loss
from wavenet_vocoder.mixture import sample_from_discretized_mix_logistic
from wavenet_vocoder.mixture import mix_gaussian_loss
from wavenet_vocoder.mixture import sample_from_mix_gaussian
import audio
from hparams import hparams, hparams_debug_string
global_step = 0
global_test_step = 0
global_epoch = 0
use_cuda = torch.cuda.is_available()
if use_cuda:
cudnn.benchmark = True
def sanity_check(model, c, g):
if model.has_speaker_embedding():
if g is None:
raise RuntimeError(
"WaveNet expects speaker embedding, but speaker-id is not provided")
else:
if g is not None:
raise RuntimeError(
"WaveNet expects no speaker embedding, but speaker-id is provided")
if model.local_conditioning_enabled():
if c is None:
raise RuntimeError("WaveNet expects conditional features, but not given")
else:
if c is not None:
raise RuntimeError("WaveNet expects no conditional features, but given")
def maybe_set_epochs_based_on_max_steps(hp, steps_per_epoch):
nepochs = hp.nepochs
max_train_steps = hp.max_train_steps
if max_train_steps is not None:
epochs = int(np.ceil(max_train_steps / steps_per_epoch))
hp.nepochs = epochs
print("info; Number of epochs is set based on max_train_steps: {}".format(epochs))
def _pad(seq, max_len, constant_values=0):
return np.pad(seq, (0, max_len - len(seq)),
mode='constant', constant_values=constant_values)
def _pad_2d(x, max_len, b_pad=0, constant_values=0):
x = np.pad(x, [(b_pad, max_len - len(x) - b_pad), (0, 0)],
mode="constant", constant_values=constant_values)
return x
# from: https://github.com/keras-team/keras/blob/master/keras/utils/np_utils.py
# to avoid keras dependency
def to_categorical(y, num_classes=None, dtype='float32'):
"""Converts a class vector (integers) to binary class matrix.
E.g. for use with categorical_crossentropy.
# Arguments
y: class vector to be converted into a matrix
(integers from 0 to num_classes).
num_classes: total number of classes.
dtype: The data type expected by the input, as a string
(`float32`, `float64`, `int32`...)
# Returns
A binary matrix representation of the input. The classes axis
is placed last.
# Example
```python
# Consider an array of 5 labels out of a set of 3 classes {0, 1, 2}:
> labels
array([0, 2, 1, 2, 0])
# `to_categorical` converts this into a matrix with as many
# columns as there are classes. The number of rows
# stays the same.
> to_categorical(labels)
array([[ 1., 0., 0.],
[ 0., 0., 1.],
[ 0., 1., 0.],
[ 0., 0., 1.],
[ 1., 0., 0.]], dtype=float32)
```
"""
y = np.array(y, dtype='int')
input_shape = y.shape
if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
input_shape = tuple(input_shape[:-1])
y = y.ravel()
if not num_classes:
num_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, num_classes), dtype=dtype)
categorical[np.arange(n), y] = 1
output_shape = input_shape + (num_classes,)
categorical = np.reshape(categorical, output_shape)
return categorical
# TODO: I know this is too ugly...
class _NPYDataSource(FileDataSource):
def __init__(self, dump_root, col, typ="", speaker_id=None, max_steps=8000,
cin_pad=0, hop_size=256):
self.dump_root = dump_root
self.col = col
self.lengths = []
self.speaker_id = speaker_id
self.multi_speaker = False
self.speaker_ids = None
self.max_steps = max_steps
self.cin_pad = cin_pad
self.hop_size = hop_size
self.typ = typ
def collect_files(self):
meta = join(self.dump_root, "train.txt")
if not exists(meta):
paths = sorted(glob(join(self.dump_root, "*-{}.npy".format(self.typ))))
return paths
with open(meta, "rb") as f:
lines = f.readlines()
l = lines[0].decode("utf-8").split("|")
assert len(l) == 4 or len(l) == 5
self.multi_speaker = len(l) == 5
self.lengths = list(
map(lambda l: int(l.decode("utf-8").split("|")[2]), lines))
paths_relative = list(map(lambda l: l.decode("utf-8").split("|")[self.col], lines))
paths = list(map(lambda f: join(self.dump_root, f), paths_relative))
# Exclude small files (assuming lenghts are in frame unit)
# TODO: consider this for multi-speaker
if self.max_steps is not None:
idx = np.array(self.lengths) * self.hop_size > self.max_steps + 2 * self.cin_pad * self.hop_size
if idx.sum() != len(self.lengths):
print("{} short samples are omitted for training.".format(len(self.lengths) - idx.sum()))
self.lengths = list(np.array(self.lengths)[idx])
paths = list(np.array(paths)[idx])
if self.multi_speaker:
speaker_ids = list(map(lambda l: int(l.decode("utf-8").split("|")[-1]), lines))
self.speaker_ids = speaker_ids
if self.speaker_id is not None:
# Filter by speaker_id
# using multi-speaker dataset as a single speaker dataset
indices = np.array(speaker_ids) == self.speaker_id
paths = list(np.array(paths)[indices])
self.lengths = list(np.array(self.lengths)[indices])
# aha, need to cast numpy.int64 to int
self.lengths = list(map(int, self.lengths))
self.multi_speaker = False
if self.multi_speaker:
speaker_ids_np = list(np.array(self.speaker_ids)[indices])
self.speaker_ids = list(map(int, speaker_ids_np))
assert len(paths) == len(self.speaker_ids)
return paths
def collect_features(self, path):
return np.load(path)
class RawAudioDataSource(_NPYDataSource):
def __init__(self, dump_root, **kwargs):
super(RawAudioDataSource, self).__init__(dump_root, 0, "wave", **kwargs)
class MelSpecDataSource(_NPYDataSource):
def __init__(self, dump_root, **kwargs):
super(MelSpecDataSource, self).__init__(dump_root, 1, "feats", **kwargs)
class PartialyRandomizedSimilarTimeLengthSampler(Sampler):
"""Partially randomized sampler
1. Sort by lengths
2. Pick a small patch and randomize it
3. Permutate mini-batches
"""
def __init__(self, lengths, batch_size=8, batch_group_size=None):
self.lengths, self.sorted_indices = torch.sort(torch.LongTensor(lengths))
self.batch_size = batch_size
if batch_group_size is None:
batch_group_size = min(batch_size * 8, len(self.lengths))
if batch_group_size % batch_size != 0:
batch_group_size -= batch_group_size % batch_size
self.batch_group_size = batch_group_size
assert batch_group_size % batch_size == 0
def __iter__(self):
indices = self.sorted_indices.numpy()
batch_group_size = self.batch_group_size
s, e = 0, 0
bins = []
for i in range(len(indices) // batch_group_size):
s = i * batch_group_size
e = s + batch_group_size
group = indices[s:e]
random.shuffle(group)
bins += [group]
# Permutate batches
random.shuffle(bins)
binned_idx = np.stack(bins).reshape(-1)
# Handle last elements
s += batch_group_size
if s < len(indices):
last_bin = indices[len(binned_idx):]
random.shuffle(last_bin)
binned_idx = np.concatenate([binned_idx, last_bin])
return iter(torch.tensor(binned_idx).long())
def __len__(self):
return len(self.sorted_indices)
class PyTorchDataset(object):
def __init__(self, X, Mel):
self.X = X
self.Mel = Mel
# alias
self.multi_speaker = X.file_data_source.multi_speaker
def __getitem__(self, idx):
if self.Mel is None:
mel = None
else:
mel = self.Mel[idx]
raw_audio = self.X[idx]
if self.multi_speaker:
speaker_id = self.X.file_data_source.speaker_ids[idx]
else:
speaker_id = None
# (x,c,g)
return raw_audio, mel, speaker_id
def __len__(self):
return len(self.X)
def sequence_mask(sequence_length, max_len=None):
if max_len is None:
max_len = sequence_length.data.max()
batch_size = sequence_length.size(0)
seq_range = torch.arange(0, max_len).long()
seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)
if sequence_length.is_cuda:
seq_range_expand = seq_range_expand.cuda()
seq_length_expand = sequence_length.unsqueeze(1) \
.expand_as(seq_range_expand)
return (seq_range_expand < seq_length_expand).float()
# https://discuss.pytorch.org/t/how-to-apply-exponential-moving-average-decay-for-variables/10856/4
# https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
class ExponentialMovingAverage(object):
def __init__(self, decay):
self.decay = decay
self.shadow = {}
def register(self, name, val):
self.shadow[name] = val.clone()
def update(self, name, x):
assert name in self.shadow
update_delta = self.shadow[name] - x
self.shadow[name] -= (1.0 - self.decay) * update_delta
def clone_as_averaged_model(device, model, ema):
assert ema is not None
averaged_model = build_model().to(device)
averaged_model.load_state_dict(model.state_dict())
for name, param in averaged_model.named_parameters():
if name in ema.shadow:
param.data = ema.shadow[name].clone()
return averaged_model
class MaskedCrossEntropyLoss(nn.Module):
def __init__(self):
super(MaskedCrossEntropyLoss, self).__init__()
self.criterion = nn.CrossEntropyLoss(reduction='none')
def forward(self, input, target, lengths=None, mask=None, max_len=None):
if lengths is None and mask is None:
raise RuntimeError("Should provide either lengths or mask")
# (B, T, 1)
if mask is None:
mask = sequence_mask(lengths, max_len).unsqueeze(-1)
# (B, T, D)
mask_ = mask.expand_as(target)
losses = self.criterion(input, target)
return ((losses * mask_).sum()) / mask_.sum()
class DiscretizedMixturelogisticLoss(nn.Module):
def __init__(self):
super(DiscretizedMixturelogisticLoss, self).__init__()
def forward(self, input, target, lengths=None, mask=None, max_len=None):
if lengths is None and mask is None:
raise RuntimeError("Should provide either lengths or mask")
# (B, T, 1)
if mask is None:
mask = sequence_mask(lengths, max_len).unsqueeze(-1)
# (B, T, 1)
mask_ = mask.expand_as(target)
losses = discretized_mix_logistic_loss(
input, target, num_classes=hparams.quantize_channels,
log_scale_min=hparams.log_scale_min, reduce=False)
assert losses.size() == target.size()
return ((losses * mask_).sum()) / mask_.sum()
class MixtureGaussianLoss(nn.Module):
def __init__(self):
super(MixtureGaussianLoss, self).__init__()
def forward(self, input, target, lengths=None, mask=None, max_len=None):
if lengths is None and mask is None:
raise RuntimeError("Should provide either lengths or mask")
# (B, T, 1)
if mask is None:
mask = sequence_mask(lengths, max_len).unsqueeze(-1)
# (B, T, 1)
mask_ = mask.expand_as(target)
losses = mix_gaussian_loss(
input, target, log_scale_min=hparams.log_scale_min, reduce=False)
assert losses.size() == target.size()
return ((losses * mask_).sum()) / mask_.sum()
def ensure_divisible(length, divisible_by=256, lower=True):
if length % divisible_by == 0:
return length
if lower:
return length - length % divisible_by
else:
return length + (divisible_by - length % divisible_by)
def assert_ready_for_upsampling(x, c, cin_pad):
assert len(x) == (len(c) - 2 * cin_pad) * audio.get_hop_size()
def collate_fn(batch):
"""Create batch
Args:
batch(tuple): List of tuples
- x[0] (ndarray,int) : list of (T,)
- x[1] (ndarray,int) : list of (T, D)
- x[2] (ndarray,int) : list of (1,), speaker id
Returns:
tuple: Tuple of batch
- x (FloatTensor) : Network inputs (B, C, T)
- y (LongTensor) : Network targets (B, T, 1)
"""
local_conditioning = len(batch[0]) >= 2 and hparams.cin_channels > 0
global_conditioning = len(batch[0]) >= 3 and hparams.gin_channels > 0
if hparams.max_time_sec is not None:
max_time_steps = int(hparams.max_time_sec * hparams.sample_rate)
elif hparams.max_time_steps is not None:
max_time_steps = hparams.max_time_steps
else:
max_time_steps = None
# Time resolution adjustment
cin_pad = hparams.cin_pad
if local_conditioning:
new_batch = []
for idx in range(len(batch)):
x, c, g = batch[idx]
if hparams.upsample_conditional_features:
assert_ready_for_upsampling(x, c, cin_pad=0)
if max_time_steps is not None:
max_steps = ensure_divisible(max_time_steps, audio.get_hop_size(), True)
if len(x) > max_steps:
max_time_frames = max_steps // audio.get_hop_size()
s = np.random.randint(cin_pad, len(c) - max_time_frames - cin_pad)
ts = s * audio.get_hop_size()
x = x[ts:ts + audio.get_hop_size() * max_time_frames]
c = c[s - cin_pad:s + max_time_frames + cin_pad, :]
assert_ready_for_upsampling(x, c, cin_pad=cin_pad)
else:
x, c = audio.adjust_time_resolution(x, c)
if max_time_steps is not None and len(x) > max_time_steps:
s = np.random.randint(cin_pad, len(x) - max_time_steps - cin_pad)
x = x[s:s + max_time_steps]
c = c[s - cin_pad:s + max_time_steps + cin_pad, :]
assert len(x) == len(c)
new_batch.append((x, c, g))
batch = new_batch
else:
new_batch = []
for idx in range(len(batch)):
x, c, g = batch[idx]
x = audio.trim(x)
if max_time_steps is not None and len(x) > max_time_steps:
s = np.random.randint(0, len(x) - max_time_steps)
if local_conditioning:
x, c = x[s:s + max_time_steps], c[s:s + max_time_steps, :]
else:
x = x[s:s + max_time_steps]
new_batch.append((x, c, g))
batch = new_batch
# Lengths
input_lengths = [len(x[0]) for x in batch]
max_input_len = max(input_lengths)
# (B, T, C)
# pad for time-axis
if is_mulaw_quantize(hparams.input_type):
padding_value = P.mulaw_quantize(0, mu=hparams.quantize_channels - 1)
x_batch = np.array([_pad_2d(to_categorical(
x[0], num_classes=hparams.quantize_channels),
max_input_len, 0, padding_value) for x in batch], dtype=np.float32)
else:
x_batch = np.array([_pad_2d(x[0].reshape(-1, 1), max_input_len)
for x in batch], dtype=np.float32)
assert len(x_batch.shape) == 3
# (B, T)
if is_mulaw_quantize(hparams.input_type):
padding_value = P.mulaw_quantize(0, mu=hparams.quantize_channels - 1)
y_batch = np.array([_pad(x[0], max_input_len, constant_values=padding_value)
for x in batch], dtype=np.int)
else:
y_batch = np.array([_pad(x[0], max_input_len) for x in batch], dtype=np.float32)
assert len(y_batch.shape) == 2
# (B, T, D)
if local_conditioning:
max_len = max([len(x[1]) for x in batch])
c_batch = np.array([_pad_2d(x[1], max_len) for x in batch], dtype=np.float32)
assert len(c_batch.shape) == 3
# (B x C x T)
c_batch = torch.FloatTensor(c_batch).transpose(1, 2).contiguous()
else:
c_batch = None
if global_conditioning:
g_batch = torch.LongTensor([x[2] for x in batch])
else:
g_batch = None
# Covnert to channel first i.e., (B, C, T)
x_batch = torch.FloatTensor(x_batch).transpose(1, 2).contiguous()
# Add extra axis
if is_mulaw_quantize(hparams.input_type):
y_batch = torch.LongTensor(y_batch).unsqueeze(-1).contiguous()
else:
y_batch = torch.FloatTensor(y_batch).unsqueeze(-1).contiguous()
input_lengths = torch.LongTensor(input_lengths)
return x_batch, y_batch, c_batch, g_batch, input_lengths
def time_string():
return datetime.now().strftime('%Y-%m-%d %H:%M')
def save_waveplot(path, y_hat, y_target):
sr = hparams.sample_rate
plt.figure(figsize=(16, 6))
plt.subplot(2, 1, 1)
librosa.display.waveplot(y_target, sr=sr)
plt.subplot(2, 1, 2)
librosa.display.waveplot(y_hat, sr=sr)
plt.tight_layout()
plt.savefig(path, format="png")
plt.close()
def eval_model(global_step, writer, device, model, y, c, g, input_lengths, eval_dir, ema=None):
if ema is not None:
print("Using averaged model for evaluation")
model = clone_as_averaged_model(device, model, ema)
model.make_generation_fast_()
model.eval()
idx = np.random.randint(0, len(y))
length = input_lengths[idx].data.cpu().item()
# (T,)
y_target = y[idx].view(-1).data.cpu().numpy()[:length]
if c is not None:
if hparams.upsample_conditional_features:
c = c[idx, :, :length // audio.get_hop_size() + hparams.cin_pad * 2].unsqueeze(0)
else:
c = c[idx, :, :length].unsqueeze(0)
assert c.dim() == 3
print("Shape of local conditioning features: {}".format(c.size()))
if g is not None:
# TODO: test
g = g[idx]
print("Shape of global conditioning features: {}".format(g.size()))
# Dummy silence
if is_mulaw_quantize(hparams.input_type):
initial_value = P.mulaw_quantize(0, hparams.quantize_channels - 1)
elif is_mulaw(hparams.input_type):
initial_value = P.mulaw(0.0, hparams.quantize_channels)
else:
initial_value = 0.0
# (C,)
if is_mulaw_quantize(hparams.input_type):
initial_input = to_categorical(
initial_value, num_classes=hparams.quantize_channels).astype(np.float32)
initial_input = torch.from_numpy(initial_input).view(
1, 1, hparams.quantize_channels)
else:
initial_input = torch.zeros(1, 1, 1).fill_(initial_value)
initial_input = initial_input.to(device)
# Run the model in fast eval mode
with torch.no_grad():
y_hat = model.incremental_forward(
initial_input, c=c, g=g, T=length, softmax=True, quantize=True, tqdm=tqdm,
log_scale_min=hparams.log_scale_min)
if is_mulaw_quantize(hparams.input_type):
y_hat = y_hat.max(1)[1].view(-1).long().cpu().data.numpy()
y_hat = P.inv_mulaw_quantize(y_hat, hparams.quantize_channels - 1)
y_target = P.inv_mulaw_quantize(y_target, hparams.quantize_channels - 1)
elif is_mulaw(hparams.input_type):
y_hat = P.inv_mulaw(y_hat.view(-1).cpu().data.numpy(), hparams.quantize_channels)
y_target = P.inv_mulaw(y_target, hparams.quantize_channels)
else:
y_hat = y_hat.view(-1).cpu().data.numpy()
# Save audio
os.makedirs(eval_dir, exist_ok=True)
path = join(eval_dir, "step{:09d}_predicted.wav".format(global_step))
librosa.output.write_wav(path, y_hat, sr=hparams.sample_rate)
path = join(eval_dir, "step{:09d}_target.wav".format(global_step))
librosa.output.write_wav(path, y_target, sr=hparams.sample_rate)
# save figure
path = join(eval_dir, "step{:09d}_waveplots.png".format(global_step))
save_waveplot(path, y_hat, y_target)
def save_states(global_step, writer, y_hat, y, input_lengths, checkpoint_dir=None):
print("Save intermediate states at step {}".format(global_step))
idx = np.random.randint(0, len(y_hat))
length = input_lengths[idx].data.cpu().item()
# (B, C, T)
if y_hat.dim() == 4:
y_hat = y_hat.squeeze(-1)
if is_mulaw_quantize(hparams.input_type):
# (B, T)
y_hat = F.softmax(y_hat, dim=1).max(1)[1]
# (T,)
y_hat = y_hat[idx].data.cpu().long().numpy()
y = y[idx].view(-1).data.cpu().long().numpy()
y_hat = P.inv_mulaw_quantize(y_hat, hparams.quantize_channels - 1)
y = P.inv_mulaw_quantize(y, hparams.quantize_channels - 1)
else:
# (B, T)
if hparams.output_distribution == "Logistic":
y_hat = sample_from_discretized_mix_logistic(
y_hat, log_scale_min=hparams.log_scale_min)
elif hparams.output_distribution == "Normal":
y_hat = sample_from_mix_gaussian(
y_hat, log_scale_min=hparams.log_scale_min)
else:
assert False
# (T,)
y_hat = y_hat[idx].view(-1).data.cpu().numpy()
y = y[idx].view(-1).data.cpu().numpy()
if is_mulaw(hparams.input_type):
y_hat = P.inv_mulaw(y_hat, hparams.quantize_channels)
y = P.inv_mulaw(y, hparams.quantize_channels)
# Mask by length
y_hat[length:] = 0
y[length:] = 0
# Save audio
audio_dir = join(checkpoint_dir, "intermediate", "audio")
os.makedirs(audio_dir, exist_ok=True)
path = join(audio_dir, "step{:09d}_predicted.wav".format(global_step))
librosa.output.write_wav(path, y_hat, sr=hparams.sample_rate)
path = join(audio_dir, "step{:09d}_target.wav".format(global_step))
librosa.output.write_wav(path, y, sr=hparams.sample_rate)
# workaround for https://github.com/pytorch/pytorch/issues/15716
# the idea is to return outputs and replicas explicitly, so that making pytorch
# not to release the nodes (this is a pytorch bug though)
def data_parallel_workaround(model, input):
device_ids = list(range(torch.cuda.device_count()))
output_device = device_ids[0]
replicas = torch.nn.parallel.replicate(model, device_ids)
inputs = torch.nn.parallel.scatter(input, device_ids)
replicas = replicas[:len(inputs)]
outputs = torch.nn.parallel.parallel_apply(replicas, inputs)
y_hat = torch.nn.parallel.gather(outputs, output_device)
return y_hat, outputs, replicas
def __train_step(device, phase, epoch, global_step, global_test_step,
model, optimizer, writer, criterion,
x, y, c, g, input_lengths,
checkpoint_dir, eval_dir=None, do_eval=False, ema=None):
sanity_check(model, c, g)
# x : (B, C, T)
# y : (B, T, 1)
# c : (B, C, T)
# g : (B,)
train = (phase == "train_no_dev")
clip_thresh = hparams.clip_thresh
if train:
model.train()
step = global_step
else:
model.eval()
step = global_test_step
# Learning rate schedule
current_lr = hparams.optimizer_params["lr"]
if train and hparams.lr_schedule is not None:
lr_schedule_f = getattr(lrschedule, hparams.lr_schedule)
current_lr = lr_schedule_f(
hparams.optimizer_params["lr"], step, **hparams.lr_schedule_kwargs)
for param_group in optimizer.param_groups:
param_group['lr'] = current_lr
optimizer.zero_grad()
# Prepare data
x, y = x.to(device), y.to(device)
input_lengths = input_lengths.to(device)
c = c.to(device) if c is not None else None
g = g.to(device) if g is not None else None
# (B, T, 1)
mask = sequence_mask(input_lengths, max_len=x.size(-1)).unsqueeze(-1)
mask = mask[:, 1:, :]
# Apply model: Run the model in regular eval mode
# NOTE: softmax is handled in F.cross_entrypy_loss
# y_hat: (B x C x T)
if use_cuda:
# multi gpu support
# you must make sure that batch size % num gpu == 0
y_hat, _outputs, _replicas = data_parallel_workaround(model, (x, c, g, False))
else:
y_hat = model(x, c, g, False)
if is_mulaw_quantize(hparams.input_type):
# wee need 4d inputs for spatial cross entropy loss
# (B, C, T, 1)
y_hat = y_hat.unsqueeze(-1)
loss = criterion(y_hat[:, :, :-1, :], y[:, 1:, :], mask=mask)
else:
loss = criterion(y_hat[:, :, :-1], y[:, 1:, :], mask=mask)
if train and step > 0 and step % hparams.checkpoint_interval == 0:
save_states(step, writer, y_hat, y, input_lengths, checkpoint_dir)
save_checkpoint(device, model, optimizer, step, checkpoint_dir, epoch, ema)
if do_eval:
# NOTE: use train step (i.e., global_step) for filename
eval_model(global_step, writer, device, model, y, c, g, input_lengths, eval_dir, ema)
# Update
if train:
loss.backward()
if clip_thresh > 0:
grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), clip_thresh)
optimizer.step()
# update moving average
if ema is not None:
for name, param in model.named_parameters():
if name in ema.shadow:
ema.update(name, param.data)
# Logs
writer.add_scalar("{} loss".format(phase), float(loss.item()), step)
if train:
if clip_thresh > 0:
writer.add_scalar("gradient norm", grad_norm, step)
writer.add_scalar("learning rate", current_lr, step)
return loss.item()
def train_loop(device, model, data_loaders, optimizer, writer, checkpoint_dir=None):
if is_mulaw_quantize(hparams.input_type):
criterion = MaskedCrossEntropyLoss()
else:
if hparams.output_distribution == "Logistic":
criterion = DiscretizedMixturelogisticLoss()
elif hparams.output_distribution == "Normal":
criterion = MixtureGaussianLoss()
else:
raise RuntimeError(
"Not supported output distribution type: {}".format(
hparams.output_distribution))
if hparams.exponential_moving_average:
ema = ExponentialMovingAverage(hparams.ema_decay)
for name, param in model.named_parameters():
if param.requires_grad:
ema.register(name, param.data)
else:
ema = None
global global_step, global_epoch, global_test_step
while global_epoch < hparams.nepochs:
for phase, data_loader in data_loaders.items():
train = (phase == "train_no_dev")
running_loss = 0.
test_evaluated = False
for step, (x, y, c, g, input_lengths) in tqdm(enumerate(data_loader)):
# Whether to save eval (i.e., online decoding) result
do_eval = False
eval_dir = join(checkpoint_dir, "intermediate", "{}_eval".format(phase))
# Do eval per eval_interval for train
if train and global_step > 0 \
and global_step % hparams.train_eval_interval == 0:
do_eval = True
# Do eval for test
# NOTE: Decoding WaveNet is quite time consuming, so
# do only once in a single epoch for testset
if not train and not test_evaluated \
and global_epoch % hparams.test_eval_epoch_interval == 0:
do_eval = True
test_evaluated = True
if do_eval:
print("[{}] Eval at train step {}".format(phase, global_step))
# Do step
running_loss += __train_step(device,
phase, global_epoch, global_step, global_test_step, model,
optimizer, writer, criterion, x, y, c, g, input_lengths,
checkpoint_dir, eval_dir, do_eval, ema)
# update global state
if train:
global_step += 1
else:
global_test_step += 1
if global_step >= hparams.max_train_steps:
print("Training reached max train steps ({}). will exit".format(hparams.max_train_steps))
return ema
# log per epoch
averaged_loss = running_loss / len(data_loader)
writer.add_scalar("{} loss (per epoch)".format(phase),
averaged_loss, global_epoch)
print("Step {} [{}] Loss: {}".format(
global_step, phase, running_loss / len(data_loader)))
global_epoch += 1
return ema
def save_checkpoint(device, model, optimizer, step, checkpoint_dir, epoch, ema=None):
checkpoint_path = join(
checkpoint_dir, "checkpoint_step{:09d}.pth".format(global_step))
optimizer_state = optimizer.state_dict() if hparams.save_optimizer_state else None
global global_test_step
torch.save({
"state_dict": model.state_dict(),
"optimizer": optimizer_state,
"global_step": step,
"global_epoch": epoch,
"global_test_step": global_test_step,
}, checkpoint_path)
print("Saved checkpoint:", checkpoint_path)
import shutil
latest_pth = join(checkpoint_dir, "checkpoint_latest.pth")
shutil.copyfile(checkpoint_path, latest_pth)
if ema is not None:
averaged_model = clone_as_averaged_model(device, model, ema)
checkpoint_path = join(
checkpoint_dir, "checkpoint_step{:09d}_ema.pth".format(global_step))
torch.save({
"state_dict": averaged_model.state_dict(),
"optimizer": optimizer_state,
"global_step": step,
"global_epoch": epoch,
"global_test_step": global_test_step,
}, checkpoint_path)
print("Saved averaged checkpoint:", checkpoint_path)
latest_pth = join(checkpoint_dir, "checkpoint_latest_ema.pth")
shutil.copyfile(checkpoint_path, latest_pth)
def build_model():
if is_mulaw_quantize(hparams.input_type):
if hparams.out_channels != hparams.quantize_channels:
raise RuntimeError(
"out_channels must equal to quantize_chennels if input_type is 'mulaw-quantize'")
if hparams.upsample_conditional_features and hparams.cin_channels < 0:
s = "Upsample conv layers were specified while local conditioning disabled. "
s += "Notice that upsample conv layers will never be used."
warn(s)
upsample_params = hparams.upsample_params
upsample_params["cin_channels"] = hparams.cin_channels
upsample_params["cin_pad"] = hparams.cin_pad
model = WaveNet(
out_channels=hparams.out_channels,
layers=hparams.layers,
stacks=hparams.stacks,
residual_channels=hparams.residual_channels,
gate_channels=hparams.gate_channels,
skip_out_channels=hparams.skip_out_channels,
cin_channels=hparams.cin_channels,
gin_channels=hparams.gin_channels,
n_speakers=hparams.n_speakers,
dropout=hparams.dropout,
kernel_size=hparams.kernel_size,
cin_pad=hparams.cin_pad,
upsample_conditional_features=hparams.upsample_conditional_features,
upsample_params=upsample_params,
scalar_input=is_scalar_input(hparams.input_type),
output_distribution=hparams.output_distribution,
)
return model
def _load(checkpoint_path):
if use_cuda:
checkpoint = torch.load(checkpoint_path)
else:
checkpoint = torch.load(checkpoint_path,
map_location=lambda storage, loc: storage)
return checkpoint
def load_checkpoint(path, model, optimizer, reset_optimizer):
global global_step
global global_epoch
global global_test_step
print("Load checkpoint from: {}".format(path))
checkpoint = _load(path)
model.load_state_dict(checkpoint["state_dict"])
if not reset_optimizer:
optimizer_state = checkpoint["optimizer"]
if optimizer_state is not None:
print("Load optimizer state from {}".format(path))
optimizer.load_state_dict(checkpoint["optimizer"])
global_step = checkpoint["global_step"]
global_epoch = checkpoint["global_epoch"]
global_test_step = checkpoint.get("global_test_step", 0)
return model
# https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/3
def restore_parts(path, model):
print("Restore part of the model from: {}".format(path))
state = _load(path)["state_dict"]
model_dict = model.state_dict()
valid_state_dict = {k: v for k, v in state.items() if k in model_dict}
try:
model_dict.update(valid_state_dict)
model.load_state_dict(model_dict)
except RuntimeError as e:
# there should be invalid size of weight(s), so load them per parameter
print(str(e))
model_dict = model.state_dict()
for k, v in valid_state_dict.items():
model_dict[k] = v
try:
model.load_state_dict(model_dict)
except RuntimeError as e:
print(str(e))
warn("{}: may contain invalid size of weight. skipping...".format(k))
def get_data_loaders(dump_root, speaker_id, test_shuffle=True):
data_loaders = {}
local_conditioning = hparams.cin_channels > 0
if hparams.max_time_steps is not None:
max_steps = ensure_divisible(hparams.max_time_steps, audio.get_hop_size(), True)
else:
max_steps = None
for phase in ["train_no_dev", "dev"]:
train = phase == "train_no_dev"
X = FileSourceDataset(
RawAudioDataSource(join(dump_root, phase), speaker_id=speaker_id,
max_steps=max_steps, cin_pad=hparams.cin_pad,
hop_size=audio.get_hop_size()))
if local_conditioning:
Mel = FileSourceDataset(
MelSpecDataSource(join(dump_root, phase), speaker_id=speaker_id,
max_steps=max_steps, cin_pad=hparams.cin_pad,
hop_size=audio.get_hop_size()))
assert len(X) == len(Mel)
print("Local conditioning enabled. Shape of a sample: {}.".format(
Mel[0].shape))
else:
Mel = None
print("[{}]: length of the dataset is {}".format(phase, len(X)))
if train:
lengths = np.array(X.file_data_source.lengths)
# Prepare sampler
sampler = PartialyRandomizedSimilarTimeLengthSampler(
lengths, batch_size=hparams.batch_size)
shuffle = False
# make sure that there's no sorting bugs for https://github.com/r9y9/wavenet_vocoder/issues/130
sampler_idx = np.asarray(sorted(list(map(lambda s: int(s), sampler))))
assert (sampler_idx == np.arange(len(sampler_idx), dtype=np.int)).all()
else:
sampler = None
shuffle = test_shuffle
dataset = PyTorchDataset(X, Mel)
data_loader = data_utils.DataLoader(
dataset, batch_size=hparams.batch_size, drop_last=True,
num_workers=hparams.num_workers, sampler=sampler, shuffle=shuffle,
collate_fn=collate_fn, pin_memory=hparams.pin_memory)
speaker_ids = {}
if X.file_data_source.multi_speaker:
for idx, (x, c, g) in enumerate(dataset):
if g is not None:
try:
speaker_ids[g] += 1
except KeyError:
speaker_ids[g] = 1
if len(speaker_ids) > 0:
print("Speaker stats:", speaker_ids)
data_loaders[phase] = data_loader
return data_loaders
if __name__ == "__main__":
args = docopt(__doc__)
print("Command line args:\n", args)
checkpoint_dir = args["--checkpoint-dir"]
checkpoint_path = args["--checkpoint"]
checkpoint_restore_parts = args["--restore-parts"]
speaker_id = args["--speaker-id"]
speaker_id = int(speaker_id) if speaker_id is not None else None
preset = args["--preset"]
dump_root = args["--dump-root"]
if dump_root is None:
dump_root = join(dirname(__file__), "data", "ljspeech")
log_event_path = args["--log-event-path"]
reset_optimizer = args["--reset-optimizer"]
# Load preset if specified
if preset is not None:
with open(preset) as f:
hparams.parse_json(f.read())
# Override hyper parameters
hparams.parse(args["--hparams"])
assert hparams.name == "wavenet_vocoder"
print(hparams_debug_string())
fs = hparams.sample_rate
os.makedirs(checkpoint_dir, exist_ok=True)
output_json_path = join(checkpoint_dir, "hparams.json")
with open(output_json_path, "w") as f:
json.dump(hparams.values(), f, indent=2)
# Dataloader setup
print(dump_root)
data_loaders = get_data_loaders(dump_root, speaker_id, test_shuffle=True)
maybe_set_epochs_based_on_max_steps(hparams, len(data_loaders["train_no_dev"]))
device = torch.device("cuda" if use_cuda else "cpu")
# Model
model = build_model().to(device)
receptive_field = model.receptive_field
print("Receptive field (samples / ms): {} / {}".format(
receptive_field, receptive_field / fs * 1000))
from torch import optim
Optimizer = getattr(optim, hparams.optimizer)
optimizer = Optimizer(model.parameters(), **hparams.optimizer_params)
if checkpoint_restore_parts is not None:
restore_parts(checkpoint_restore_parts, model)
# Load checkpoints
if checkpoint_path is not None:
load_checkpoint(checkpoint_path, model, optimizer, reset_optimizer)
# Setup summary writer for tensorboard
if log_event_path is None:
log_event_path = "log/run-test" + str(datetime.now()).replace(" ", "_")
print("TensorBoard event log path: {}".format(log_event_path))
writer = SummaryWriter(log_dir=log_event_path)
# Train!
ema = None
try:
ema = train_loop(device, model, data_loaders, optimizer, writer,
checkpoint_dir=checkpoint_dir)
except KeyboardInterrupt:
print("Interrupted!")
pass
finally:
save_checkpoint(
device, model, optimizer, global_step, checkpoint_dir, global_epoch, ema)
print("Finished")
sys.exit(0)
| [
"torch.cuda.is_available",
"torch.LongTensor",
"torch.load",
"torch.nn.CrossEntropyLoss",
"torch.FloatTensor",
"torch.nn.parallel.scatter",
"torch.utils.data.DataLoader",
"torch.tensor",
"torch.device",
"torch.zeros",
"torch.nn.parallel.gather",
"torch.cuda.device_count",
"torch.nn.parallel.parallel_apply",
"torch.nn.functional.softmax",
"torch.nn.parallel.replicate",
"torch.arange",
"torch.no_grad",
"torch.from_numpy"
] | 0.4.1 | bnelo12/wavenet_vocoder | 68de8b8abf37fb3eec41817704f06c859925f7a5 |
1.10 | # Copyright 2021 solo-learn development team.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import argparse
from typing import Any, Dict, List, Sequence, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from solo.losses.moco import moco_loss_func
# from solo.methods.base import BaseDistillationMethod
from solo.methods.base_for_AT_dual_bn import BaseDistillationATMethodDualBN
from solo.utils.momentum import initialize_momentum_params
from solo.utils.misc import gather
from torchvision import models
from solo.utils.metrics import accuracy_at_k, weighted_mean
class MoCoV2KDATDualBN(BaseDistillationATMethodDualBN):
queue: torch.Tensor
def __init__(
self,
proj_output_dim: int,
proj_hidden_dim: int,
temperature: float,
queue_size: int,
**kwargs
):
"""Implements MoCo V2+ (https://arxiv.org/abs/2011.10566).
Args:
proj_output_dim (int): number of dimensions of projected features.
proj_hidden_dim (int): number of neurons of the hidden layers of the projector.
temperature (float): temperature for the softmax in the contrastive loss.
queue_size (int): number of samples to keep in the queue.
"""
super().__init__(**kwargs)
self.temperature = temperature
self.queue_size = queue_size
# projector
self.projector = nn.Sequential(
nn.Linear(self.features_dim, proj_hidden_dim),
nn.ReLU(),
nn.Linear(proj_hidden_dim, proj_output_dim),
)
# Make StudentNetwork have same weight with Teacher
self.projector.load_state_dict(self.projector_state_dict)
# momentum projector
self.momentum_projector = nn.Sequential(
nn.Linear(self.features_dim, proj_hidden_dim),
nn.ReLU(),
nn.Linear(proj_hidden_dim, proj_output_dim),
)
# initialize_momentum_params(self.projector, self.momentum_projector)
self.momentum_projector.load_state_dict(self.projector_state_dict)
# create the queue
self.register_buffer("queue", torch.randn(2, proj_output_dim, queue_size))
self.queue = nn.functional.normalize(self.queue, dim=1)
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
# To remove both projector
self.projector = nn.Identity()
self.momentum_projector = nn.Identity()
@staticmethod
def add_model_specific_args(parent_parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parent_parser = super(MoCoV2KDATDualBN, MoCoV2KDATDualBN).add_model_specific_args(parent_parser)
parser = parent_parser.add_argument_group("mocov2_kd_at")
# projector
parser.add_argument("--proj_output_dim", type=int, default=128)
parser.add_argument("--proj_hidden_dim", type=int, default=2048)
# parameters
parser.add_argument("--temperature", type=float, default=0.1)
# queue settings
parser.add_argument("--queue_size", default=65536, type=int)
# parser.add_argument("--limit_val_batches", type=float, default=0.2)
return parent_parser
@property
def learnable_params(self) -> List[dict]:
"""Adds projector parameters together with parent's learnable parameters.
Returns:
List[dict]: list of learnable parameters.
"""
extra_learnable_params = [{"params": self.projector.parameters()}]
return super().learnable_params + extra_learnable_params
@property
def momentum_pairs(self) -> List[Tuple[Any, Any]]:
"""Adds (projector, momentum_projector) to the parent's momentum pairs.
Returns:
List[Tuple[Any, Any]]: list of momentum pairs.
"""
extra_momentum_pairs = [(self.projector, self.momentum_projector)]
return super().momentum_pairs + extra_momentum_pairs
@torch.no_grad()
def _dequeue_and_enqueue(self, keys: torch.Tensor):
"""Adds new samples and removes old samples from the queue in a fifo manner.
Args:
keys (torch.Tensor): output features of the momentum backbone.
"""
batch_size = keys.shape[1]
ptr = int(self.queue_ptr) # type: ignore
assert self.queue_size % batch_size == 0 # for simplicity
# replace the keys at ptr (dequeue and enqueue)
keys = keys.permute(0, 2, 1)
self.queue[:, :, ptr : ptr + batch_size] = keys
ptr = (ptr + batch_size) % self.queue_size # move pointer
self.queue_ptr[0] = ptr # type: ignore
def forward(self, X: torch.Tensor, *args, **kwargs) -> Dict[str, Any]:
"""Performs the forward pass of the online backbone and projector.
Args:
X (torch.Tensor): a batch of images in the tensor format.
Returns:
Dict[str, Any]: a dict containing the outputs of the parent and the projected features.
"""
out = super().forward(X, *args, **kwargs)
z = F.normalize(self.projector(out["feats"]), dim=-1)
return {**out, "z": z}
def training_step(self, batch: Sequence[Any], batch_idx: int) -> torch.Tensor:
"""
Training step for MoCo reusing BaseMomentumMethod training step.
Args:
batch (Sequence[Any]): a batch of data in the
format of [img_indexes, [X], Y], where [X] is a list of size self.num_large_crops
containing batches of images.
batch_idx (int): index of the batch.
Returns:
torch.Tensor: total loss composed of MOCO loss and classification loss.
"""
self.momentum_backbone.eval()
# import ipdb; ipdb.set_trace()
image_tau1, image_weak = batch[1]
targets = batch[2]
############################################################################
# Adversarial Training (DUAL BN)
############################################################################
away_target = self.momentum_projector(self.momentum_backbone(image_weak))
AE_generation_image = image_weak
image_AE = self.generate_training_AE(AE_generation_image, away_target)
student_logtis_clean = self.backbone(image_weak, "normal")
student_logits_AE = self.backbone(image_AE, "pgd")
adv_loss = -F.cosine_similarity(student_logits_AE, away_target).mean()
adv_loss += -3*F.cosine_similarity(student_logits_AE, student_logtis_clean).mean()
############################################################################
# Adversarial Training (DUAL BN)
############################################################################
############################################################################
# Adversarial Training (CAT)
############################################################################
# away_target = self.momentum_projector(self.momentum_backbone(image_weak))
# AE_generation_image = image_weak
# image_AE = self.generate_training_AE(AE_generation_image, away_target)
# image_CAT = torch.cat([image_weak, image_AE])
# logits_all = self.backbone(image_CAT)
# bs = image_weak.size(0)
# student_logtis_clean = logits_all[:bs]
# student_logits_AE = logits_all[bs:]
# adv_loss = -F.cosine_similarity(student_logits_AE, away_target).mean()
# adv_loss += -3*F.cosine_similarity(student_logits_AE, student_logtis_clean).mean()
############################################################################
# Adversarial Training (CAT)
############################################################################
############################################################################
# Adversarial Training (CODE Experiment)
############################################################################
# # NOTE: Check Wether to Make Student Start Same Weight with Teacher
# # CODE Definition. 0: Weak augmentation 1: Tau1 augmentation
# # CS_Teacher$1_Student-AE_CS_Teacher$2_Student$3, so case 1 is 110
# # 1. CS_TeacherTau1_Student-AE_CS_TeacherTau1_StudentWeak
# # 110
# # with torch.no_grad():
# # logits_TeacherTau1 = self.projector(self.momentum_backbone(image_tau1))
# # image_AE = self.generate_training_AE(image_weak, logits_TeacherTau1)
# # student_logit = self.projector(self.backbone(image_AE))
# # adv_loss = -F.cosine_similarity(student_logit, logits_TeacherTau1).mean()
# # 2. CS_TeacherTau1_Student-AE_CS_TeacherTau1_StudentTau1
# # 111
# # with torch.no_grad():
# # logits_TeacherTau1 = self.projector(self.momentum_backbone(image_tau1))
# # image_AE = self.generate_training_AE(image_tau1, logits_TeacherTau1)
# # student_logit = self.projector(self.backbone(image_AE))
# # adv_loss = -F.cosine_similarity(student_logit, logits_TeacherTau1).mean()
# # CODE Based experiment
# # 110
# # 111
# # experiment_code = [1, 0, 1]
# # experiment_code = [1, 0, 0]
# experiment_code = [0, 0, 0]
# # experiment_code = [0, 1, 1]
# # experiment_code = [0, 0, 1]
# # experiment_code = [0, 1, 0]
# with torch.no_grad():
# # away target
# if experiment_code[1] == 0:
# # away_target = self.projector(self.momentum_backbone(image_weak))
# away_target = self.momentum_projector(self.momentum_backbone(image_weak))
# elif experiment_code[1] == 1:
# # away_target = self.projector(self.momentum_backbone(image_tau1))
# away_target = self.momentum_projector(self.momentum_backbone(image_tau1))
# # Loss learning target
# if experiment_code[0] == experiment_code[1]:
# learning_target = away_target
# elif experiment_code[0] == 0:
# # learning_target = self.projector(self.momentum_backbone(image_weak))
# learning_target = self.momentum_projector(self.momentum_backbone(image_weak))
# elif experiment_code[0] == 1:
# # learning_target = self.projector(self.momentum_backbone(image_tau1))
# learning_target = self.momentum_projector(self.momentum_backbone(image_tau1))
# if experiment_code[2] == 0:
# AE_generation_image = image_weak
# elif experiment_code[2] == 1:
# AE_generation_image = image_tau1
# image_AE = self.generate_training_AE(AE_generation_image, away_target)
# student_logit = self.projector(self.backbone(image_AE))
# adv_loss = -F.cosine_similarity(student_logit, learning_target).mean()
############################################################################
# Adversarial Training (CODE Experiment)
############################################################################
############################################################################
# Online clean classifier training
############################################################################
# Bug Fix: train classifier using evaluation mode
self.backbone.eval()
outs_image_weak = self._base_shared_step(image_weak, targets, bn_name="pgd")
self.backbone.train()
metrics = {
"train_class_loss": outs_image_weak["loss"],
"train_acc1": outs_image_weak["acc1"],
"train_acc5": outs_image_weak["acc5"],
}
class_loss_clean = outs_image_weak["loss"]
self.log_dict(metrics, on_epoch=True)
############################################################################
# Online clean classifier training
############################################################################
############################################################################
# Online adv classifier training
############################################################################
# Bug Fix: train classifier using evaluation mode
# logits = self.classifier_adv(student_logits_AE)
self.backbone.eval()
AE_feats = self.backbone(image_AE, "pgd")
logits = self.classifier_adv(AE_feats.detach())
self.backbone.train()
class_loss_adv = F.cross_entropy(logits, targets, ignore_index=-1)
# handle when the number of classes is smaller than 5
top_k_max = min(5, logits.size(1))
acc1, acc5 = accuracy_at_k(logits, targets, top_k=(1, top_k_max))
metrics = {
"train_class_loss_adv_classifier": class_loss_adv,
"train_acc1_adv_classifier": acc1,
"train_acc5_adv_classifier": acc5,
}
class_loss_clean = outs_image_weak["loss"]
self.log_dict(metrics, on_epoch=True)
############################################################################
# Online adv classifier training
############################################################################
self.log("adv_loss", adv_loss, on_epoch=True, sync_dist=True)
return adv_loss + class_loss_adv + class_loss_clean
def generate_training_AE(self, image: torch.Tensor, away_target: torch.Tensor):
"""
images_org: weak aug
away_target: from teacher
"""
self.epsilon = 8/255.
self.num_steps = 5
self.step_size = 2/255.
x_cl = image.clone().detach()
# if self.rand:
x_cl = x_cl + torch.zeros_like(image).uniform_(-self.epsilon, self.epsilon)
# f_ori_proj = self.model(images_org).detach()
# Change the attack process of model to eval
self.backbone.eval()
for i in range(self.num_steps):
x_cl.requires_grad_()
with torch.enable_grad():
f_proj = self.projector(self.backbone(x_cl, "pgd"))
# loss_contrast = -F.cosine_similarity(f_proj, f_ori_proj, dim=1).mean()
loss_contrast = -F.cosine_similarity(f_proj, away_target, dim=1).sum() *256
loss = loss_contrast
# import ipdb ;ipdb.set_trace()
grad_x_cl = torch.autograd.grad(loss, x_cl)[0]
# grad_x_cl = torch.autograd.grad(loss, x_cl, grad_outputs=torch.ones_like(loss))[0]
x_cl = x_cl.detach() + self.step_size * torch.sign(grad_x_cl.detach())
x_cl = torch.min(torch.max(x_cl, image - self.epsilon), image + self.epsilon)
x_cl = torch.clamp(x_cl, 0, 1)
self.backbone.train()
return x_cl
| [
"torch.nn.Linear",
"torch.nn.functional.normalize",
"torch.nn.Identity",
"torch.zeros",
"torch.max",
"torch.no_grad",
"torch.enable_grad",
"torch.clamp",
"torch.nn.ReLU",
"torch.autograd.grad",
"torch.nn.functional.cross_entropy",
"torch.nn.functional.cosine_similarity",
"torch.zeros_like",
"torch.randn"
] | 1.10.0 | pantheon5100/DeACL | 32cf8182f2ef271fab7453bc5cc1ddea6dfa3c22 |
1.9 | import torch
import torch.nn as nn
import argparse
import numpy as np
from torch.utils.data import DataLoader , Dataset
import pandas as pd
from tqdm import tqdm
from transformers import (
BertTokenizer,
AdamW ,
get_linear_schedule_with_warmup ,
T5Tokenizer,
T5ForConditionalGeneration)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def parse_args():
parser = argparse.ArgumentParser(description="Pretrained Machine Translation French to Wolof")
parser.add_argument(
"--train_file", type=str, default=None, help="A csv file containing the training data."
)
parser.add_argument(
"--max_source_length",
type=int,
default=150,
help="The maximum total input sequence length after "
"tokenization.Sequences longer than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--max_target_length",
type=int,
default=150,
help="The maximum total sequence length for target text after "
"tokenization. Sequences longer than this will be truncated, sequences shorter will be padded."
)
parser.add_argument(
"--number_epochs",
type=int,
default=3,
help="Total number of training steps to perform the model .",
)
parser.add_argument(
"--learning_rate",
type=float,
default=3e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--epsilone",
type=float,
default=1e-8,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--train_batch_size",
type=int,
default=1,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--model_name",
type=str,
default="t5-base",
help="Pretrained model name.",
)
parser.add_argument(
"--task_prefix",
type=str,
default="translate French to Wolof: ",
help="The task prefix for the translation.",
)
args = parser.parse_args()
if args.train_file is not None:
extension = args.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
return args
class NMTDataset(Dataset):
"""
Dataset Class
"""
def __init__(self, frenchs, wolofs , tokenizer , max_len_source , max_len_target):
"""
Parameters :
------------
frenchs :
wolofs :
tokenizer:
max_len_sourrce:
max_len_target:
"""
self.frenchs = frenchs
self.wolofs = wolofs
self.tokenizer = tokenizer
self.max_len_source = max_len_source
self.max_len_target = max_len_target
def __len__(self):
return len(self.frenchs)
def __getitem__(self, item):
french = str(self.frenchs[item])
wolof = str(self.wolofs[item])
french_encoding = self.tokenizer(
french,
add_special_tokens=True,
max_length=self.max_len_source,
pad_to_max_length=True,
return_attention_mask=True,
return_tensors='pt')
labels = self.tokenizer(
wolof,
add_special_tokens=True,
max_length=self.max_len_target,
return_token_type_ids=True,
pad_to_max_length=True,
return_attention_mask=True
).input_ids
labels = [
[(label if label != self.tokenizer.pad_token_id else -100) for label in labels]]
labels = torch.tensor(labels)
return {
'input_ids': french_encoding['input_ids'].flatten(),
'attention_mask':french_encoding['attention_mask'].flatten(),
'labels': labels.flatten()
}
def NMTDataloader(df , batch_size , tokenizer , max_len_source , max_len_target):
dataset = NMTDataset(df.french.values , df.wolof.values , tokenizer , max_len_source , max_len_target)
dataloader = DataLoader(dataset , batch_size , num_workers= 4)
return dataloader
def yield_optimizer(model):
"""
Returns optimizer for specific parameters
"""
param_optimizer = list(model.named_parameters())
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_parameters = [
{
"params": [
p for n, p in param_optimizer if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.001,
},
{
"params": [
p for n, p in param_optimizer if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
return AdamW(optimizer_parameters, lr=3e-5, eps=1e-8)
def train_epoch (model , data_loader, optimizer , device , scheduler):
model.train()
losses = []
for step , d in tqdm(enumerate(data_loader) , total=len(data_loader)):
input_ids =d['input_ids'].to(device)
attention_mask = d['attention_mask'].to(device)
labels = d['labels'].to(device)
outputs = model(input_ids=input_ids , attention_mask=attention_mask , labels=labels)
loss = outputs.loss
losses.append(loss.item())
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
if (step + 1) % 10 == 0:
print('Epoch: {} | loss: {} '.format(step+1, np.mean(losses)))
def train():
args = parse_args()
tokenizer = T5Tokenizer.from_pretrained(args.model_name)
model= T5ForConditionalGeneration.from_pretrained(args.model_name)
model.to(device)
df = pd.read_csv(args.train_file)
df["french"]=df["french"].apply(lambda x:args.task_prefix +x)
train_data_loader= NMTDataloader(df,args.train_batch_size , tokenizer , args.max_source_length , args.max_target_length)
nb_train_steps = int(len(train_data_loader) /args.train_batch_size * args.number_epochs)
optimizer = yield_optimizer(model)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=0,
num_training_steps=nb_train_steps)
for epoch in range(args.number_epochs):
print(f'Epoch {epoch + 1}')
train_epoch(model,train_data_loader,optimizer,device,scheduler)
return model.save_pretrained("../model/bert2bert")
if __name__ == '__main__':
train() | [
"torch.cuda.is_available",
"torch.tensor",
"torch.utils.data.DataLoader"
] | 1.9.0 | abdouaziz/wolof-translation | 505324f8a7c5a91a42e2c775495fc3bdebc8f761 |
1.7 | # MIT License
# Copyright (c) 2019 Sebastian Penhouet
# GitHub project: https://github.com/Spenhouet/tensorboard-aggregator
# ==============================================================================
"""Aggregates multiple tensorbaord runs"""
# python scripts/aggregator.py -r result/CAModel-all-4e-nict-coref-ocz-noun
import argparse
import os
from pathlib import Path
from typing import List, Tuple, Dict
from functools import reduce
import numpy as np
from ordered_set import OrderedSet
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
from torch.utils.tensorboard import FileWriter
from torch.utils.tensorboard.summary import Summary
from torch.utils.tensorboard.writer import Event
def extract(paths: List[Path]) -> Dict[str, tuple]:
event_accumulators: List[EventAccumulator] = [EventAccumulator(str(path)).Reload() for path in paths]
all_scalar_tags: List[OrderedSet[str]] = [OrderedSet(accum.Tags()['scalars']) for accum in event_accumulators]
tags: OrderedSet[str] = reduce(lambda x, y: x & y, all_scalar_tags)
return {tag: _extract_tag(event_accumulators, tag) for tag in tags}
def _extract_tag(event_accumulators: List[EventAccumulator],
tag: str
) -> Tuple[List[float], tuple, List[List[float]]]:
all_scalar_events: List[List[Event]] = [accum.Scalars(tag) for accum in event_accumulators]
wall_times: List[float] = list(np.mean([[event.wall_time for event in events] for events in all_scalar_events],
axis=0))
all_steps: List[tuple] = [tuple(event.step for event in events) for events in all_scalar_events]
assert len(set(all_steps)) == 1, \
'For scalar {} the step numbering or count doesn\'t match. Step count for all runs: {}'.format(
tag, [len(steps) for steps in all_steps])
steps: tuple = all_steps[0]
all_values: List[List[float]] = [[event.value for event in events] for events in all_scalar_events]
return wall_times, steps, all_values
def write_summary(base_dir: Path, aggregations_per_tag) -> None:
# remove existing files
for path in base_dir.glob('events.out.tfevents.*'):
os.remove(str(path))
writer = FileWriter(base_dir)
for tag, (steps, wall_times, aggregations) in aggregations_per_tag.items():
for wall_time, step, aggregation in zip(steps, wall_times, aggregations):
summary = Summary(value=[Summary.Value(tag=tag, simple_value=aggregation)])
scalar_event = Event(wall_time=wall_time, step=step, summary=summary)
writer.add_event(scalar_event)
writer.flush()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--result', '-r', type=str, required=True, help='main path for tensorboard files')
parser.add_argument('--aggr-name', type=str, default='aggregates',
help='name of directory where aggregated summaries are output')
parser.add_argument('--operations', '--ops', choices=['mean', 'min', 'max', 'median', 'std', 'var'],
default=['mean', 'min', 'max'], nargs='*',
help='operations to aggregate summaries')
args = parser.parse_args()
base_dir = Path(args.result)
if not base_dir.exists():
raise argparse.ArgumentTypeError(f'Parameter {base_dir} is not a valid path')
tfevents_paths = list(base_dir.glob('*/events.out.tfevents.*'))
if not tfevents_paths:
raise ValueError(f'No tfevents file found in {base_dir}/*/')
print(f'Started aggregation {base_dir.name}')
extracts = extract(tfevents_paths)
for op_name in args.operations:
op = getattr(np, op_name)
summary_dir = base_dir / args.aggr_name / op_name
aggregations_per_tag = {tag: (steps, wall_times, op(values, axis=0))
for tag, (steps, wall_times, values) in extracts.items()}
write_summary(summary_dir, aggregations_per_tag)
print(f'Finished aggregation {base_dir.name}')
if __name__ == '__main__':
main()
| [
"torch.utils.tensorboard.summary.Summary.Value",
"torch.utils.tensorboard.FileWriter",
"torch.utils.tensorboard.writer.Event"
] | 1.7.1 | nobu-g/cohesion-analysis | bf2e22c1aff51f96fd2aaef6359839646548c3be |
1.4 | import os
import sys
import torch
import numpy as np
from tqdm import tqdm
from torch.utils.data import DataLoader
from neat_eo.core import load_config, check_classes, check_channels
from neat_eo.tiles import tiles_from_dir, tile_label_from_file, tiles_from_csv
def add_parser(subparser, formatter_class):
parser = subparser.add_parser("dataset", help="train dataset helper", formatter_class=formatter_class)
parser.add_argument("--config", type=str, help="path to config file [required, if no global config setting]")
parser.add_argument("--dataset", type=str, required=True, help="dataset path [required]")
parser.add_argument("--cover", type=str, help="path to csv tiles cover file, to filter tiles dataset on [optional]")
parser.add_argument("--workers", type=int, help="number of workers [default: CPU]")
choices = ["check", "weights"]
parser.add_argument("--mode", type=str, default="check", choices=choices, help="dataset mode [default: check]")
parser.set_defaults(func=main)
class LabelsDataset(torch.utils.data.Dataset):
def __init__(self, root, num_classes, cover=None):
super().__init__()
self.num_classes = num_classes
self.tiles = [path for tile, path in tiles_from_dir(os.path.join(root, "labels"), cover=cover, xyz_path=True)]
assert len(self.tiles), "Empty Dataset"
def __len__(self):
return len(self.tiles)
def __getitem__(self, i):
mask = torch.from_numpy(tile_label_from_file(self.tiles[i]))
return torch.bincount(mask.view(-1), minlength=self.num_classes), mask.nelement()
def compute_classes_weights(dataset, classes, cover, workers):
label_dataset = LabelsDataset(dataset, len(classes), cover)
loader = DataLoader(label_dataset, batch_size=workers, num_workers=workers)
n_classes = np.zeros(len(classes))
n_pixels = 0
for c, n in tqdm(loader, desc="Classes Weights", unit="batch", ascii=True):
n_classes += c.data.numpy()[0]
n_pixels += int(n.data.numpy()[0])
weights = 1 / np.log(1.02 + (n_classes / n_pixels)) # cf https://arxiv.org/pdf/1606.02147.pdf
return weights.round(3, out=weights).tolist()
def main(args):
assert os.path.isdir(os.path.expanduser(args.dataset)), "--dataset path is not a directory"
args.cover = [tile for tile in tiles_from_csv(os.path.expanduser(args.cover))] if args.cover else None
config = load_config(args.config)
if not args.workers:
args.workers = os.cpu_count()
print("neo dataset {} on CPU, with {} workers".format(args.mode, args.workers), file=sys.stderr, flush=True)
if args.mode == "check":
check_classes(config)
check_channels(config)
# TODO check dataset
if args.mode == "weights":
check_classes(config)
weights = compute_classes_weights(args.dataset, config["classes"], args.cover, args.workers)
print(",".join(map(str, weights)))
| [
"torch.utils.data.DataLoader"
] | 1.4.0 | hzitoun/neat-EO | 3519f1b2a5b4eb6b1b8ec38bce0e722efb61a94b |
1.3 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch_geometric.nn import GCNConv, GATConv
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.nn.inits import glorot, uniform
from torch_geometric.utils import softmax
import math
class HGTConv(MessagePassing):
def __init__(self, in_dim, out_dim, num_types, num_relations, n_heads, dropout = 0.2, **kwargs):
super(HGTConv, self).__init__(aggr='add', **kwargs)
self.in_dim = in_dim
self.out_dim = out_dim
self.num_types = num_types
self.num_relations = num_relations
self.total_rel = num_types * num_relations * num_types
self.n_heads = n_heads
self.d_k = out_dim // n_heads
self.sqrt_dk = math.sqrt(self.d_k)
self.att = None
self.k_linears = nn.ModuleList()
self.q_linears = nn.ModuleList()
self.v_linears = nn.ModuleList()
self.a_linears = nn.ModuleList()
for t in range(num_types):
self.k_linears.append(nn.Linear(in_dim, out_dim))
self.q_linears.append(nn.Linear(in_dim, out_dim))
self.v_linears.append(nn.Linear(in_dim, out_dim))
self.a_linears.append(nn.Linear(out_dim, out_dim))
'''
TODO: make relation_pri smaller, as not all <st, rt, tt> pair exist in meta relation list.
'''
self.relation_pri = nn.Parameter(torch.ones(num_types, num_relations, num_types, self.n_heads))
self.relation_att = nn.Parameter(torch.Tensor(num_relations, n_heads, self.d_k, self.d_k))
self.relation_msg = nn.Parameter(torch.Tensor(num_relations, n_heads, self.d_k, self.d_k))
self.skip = nn.Parameter(torch.ones(num_types))
self.drop = nn.Dropout(dropout)
self.emb = RelTemporalEncoding(in_dim)
glorot(self.relation_att)
glorot(self.relation_msg)
def forward(self, node_inp, node_type, edge_index, edge_type, edge_time):
return self.propagate(edge_index, node_inp=node_inp, node_type=node_type, \
edge_type=edge_type, edge_time = edge_time)
def message(self, edge_index_i, node_inp_i, node_inp_j, node_type_i, node_type_j, edge_type, edge_time):
'''
j: source, i: target; <j, i>
'''
data_size = edge_index_i.size(0)
'''
Create Attention and Message tensor beforehand.
'''
res_att = torch.zeros(data_size, self.n_heads).to(node_inp_i.device)
res_msg = torch.zeros(data_size, self.n_heads, self.d_k).to(node_inp_i.device)
for source_type in range(self.num_types):
sb = (node_type_j == int(source_type))
k_linear = self.k_linears[source_type]
v_linear = self.v_linears[source_type]
for target_type in range(self.num_types):
tb = (node_type_i == int(target_type)) & sb
q_linear = self.q_linears[target_type]
for relation_type in range(self.num_relations):
'''
idx is all the edges with meta relation <source_type, relation_type, target_type>
'''
idx = (edge_type == int(relation_type)) & tb
if idx.sum() == 0:
continue
'''
Get the corresponding input node representations by idx.
Add tempotal encoding to source representation (j)
'''
target_node_vec = node_inp_i[idx]
source_node_vec = self.emb(node_inp_j[idx], edge_time[idx])
'''
Step 1: Heterogeneous Mutual Attention
'''
q_mat = q_linear(target_node_vec).view(-1, self.n_heads, self.d_k)
k_mat = k_linear(source_node_vec).view(-1, self.n_heads, self.d_k)
k_mat = torch.bmm(k_mat.transpose(1,0), self.relation_att[relation_type]).transpose(1,0)
res_att[idx] = (q_mat * k_mat).sum(dim=-1) * \
self.relation_pri[target_type][relation_type][source_type] / self.sqrt_dk
'''
Step 2: Heterogeneous Message Passing
'''
v_mat = v_linear(source_node_vec).view(-1, self.n_heads, self.d_k)
res_msg[idx] = torch.bmm(v_mat.transpose(1,0), self.relation_msg[relation_type]).transpose(1,0)
'''
Softmax based on target node's id (edge_index_i). Store attention value in self.att for later visualization.
'''
self.att = softmax(res_att, edge_index_i)
res = res_msg * self.att.view(-1, self.n_heads, 1)
del res_att, res_msg
return res.view(-1, self.out_dim)
def update(self, aggr_out, node_inp, node_type):
'''
Step 3: Target-specific Aggregation
x = W[node_type] * gelu(Agg(x)) + x
'''
aggr_out = F.gelu(aggr_out)
res = torch.zeros(aggr_out.size(0), self.out_dim).to(node_inp.device)
for target_type in range(self.num_types):
idx = (node_type == int(target_type))
if idx.sum() == 0:
continue
'''
Add skip connection with learnable weight self.skip[t_id]
'''
alpha = F.sigmoid(self.skip[target_type])
res[idx] = self.a_linears[target_type](aggr_out[idx]) * alpha + node_inp[idx] * (1 - alpha)
return self.drop(res)
def __repr__(self):
return '{}(in_dim={}, out_dim={}, num_types={}, num_types={})'.format(
self.__class__.__name__, self.in_dim, self.out_dim,
self.num_types, self.num_relations)
class RelTemporalEncoding(nn.Module):
'''
Implement the Temporal Encoding (Sinusoid) function.
'''
def __init__(self, n_hid, max_len = 240, dropout = 0.2):
super(RelTemporalEncoding, self).__init__()
self.drop = nn.Dropout(dropout)
position = torch.arange(0., max_len).unsqueeze(1)
div_term = 1 / (10000 ** (torch.arange(0., n_hid * 2, 2.)) / n_hid / 2)
self.emb = nn.Embedding(max_len, n_hid * 2)
self.emb.weight.data[:, 0::2] = torch.sin(position * div_term) / math.sqrt(n_hid)
self.emb.weight.data[:, 1::2] = torch.cos(position * div_term) / math.sqrt(n_hid)
self.emb.requires_grad = False
self.lin = nn.Linear(n_hid * 2, n_hid)
def forward(self, x, t):
return x + self.lin(self.drop(self.emb(t)))
class GeneralConv(nn.Module):
def __init__(self, conv_name, in_hid, out_hid, num_types, num_relations, n_heads, dropout):
super(GeneralConv, self).__init__()
self.conv_name = conv_name
if self.conv_name == 'hgt':
self.base_conv = HGTConv(in_hid, out_hid, num_types, num_relations, n_heads, dropout)
elif self.conv_name == 'gcn':
self.base_conv = GCNConv(in_hid, out_hid)
elif self.conv_name == 'gat':
self.base_conv = GATConv(in_hid, out_hid // n_heads, heads=n_heads)
def forward(self, meta_xs, node_type, edge_index, edge_type, edge_time):
if self.conv_name == 'hgt':
return self.base_conv(meta_xs, node_type, edge_index, edge_type, edge_time)
elif self.conv_name == 'gcn':
return self.base_conv(meta_xs, edge_index)
elif self.conv_name == 'gat':
return self.base_conv(meta_xs, edge_index)
| [
"torch.nn.Linear",
"torch.nn.functional.sigmoid",
"torch.nn.Dropout",
"torch.cos",
"torch.zeros",
"torch.nn.ModuleList",
"torch.sin",
"torch.arange",
"torch.nn.functional.gelu",
"torch.ones",
"torch.Tensor",
"torch.nn.Embedding"
] | 1.3.0 | zheng-da/pyHGT | b654495053c82edcc8a7e1e00b7873ac93e6e59d |
1.0 | import numpy as np
import time
import torch
import torch.nn as nn
def move_data_to_device(x, device):
if 'float' in str(x.dtype):
x = torch.Tensor(x)
elif 'int' in str(x.dtype):
x = torch.LongTensor(x)
else:
return x
return x.to(device)
def do_mixup(x, mixup_lambda):
"""Mixup x of even indexes (0, 2, 4, ...) with x of odd indexes
(1, 3, 5, ...).
Args:
x: (batch_size * 2, ...)
mixup_lambda: (batch_size * 2,)
Returns:
out: (batch_size, ...)
"""
out = (x[0 :: 2].transpose(0, -1) * mixup_lambda[0 :: 2] + \
x[1 :: 2].transpose(0, -1) * mixup_lambda[1 :: 2]).transpose(0, -1)
return out
def append_to_dict(dict, key, value):
if key in dict.keys():
dict[key].append(value)
else:
dict[key] = [value]
def forward(model, generator, return_input=False,
return_target=False):
"""Forward data to a model.
Args:
model: object
generator: object
return_input: bool
return_target: bool
Returns:
audio_name: (audios_num,)
clipwise_output: (audios_num, classes_num)
(ifexist) segmentwise_output: (audios_num, segments_num, classes_num)
(ifexist) framewise_output: (audios_num, frames_num, classes_num)
(optional) return_input: (audios_num, segment_samples)
(optional) return_target: (audios_num, classes_num)
"""
output_dict = {}
device = next(model.parameters()).device
time1 = time.time()
# Forward data to a model in mini-batches
for n, batch_data_dict in enumerate(generator):
print(n)
batch_waveform = move_data_to_device(batch_data_dict['waveform'], device)
with torch.no_grad():
model.eval()
batch_output = model(batch_waveform)
append_to_dict(output_dict, 'audio_name', batch_data_dict['audio_name'])
append_to_dict(output_dict, 'clipwise_output',
batch_output['clipwise_output'].data.cpu().numpy())
if 'segmentwise_output' in batch_output.keys():
append_to_dict(output_dict, 'segmentwise_output',
batch_output['segmentwise_output'].data.cpu().numpy())
if 'framewise_output' in batch_output.keys():
append_to_dict(output_dict, 'framewise_output',
batch_output['framewise_output'].data.cpu().numpy())
if return_input:
append_to_dict(output_dict, 'waveform', batch_data_dict['waveform'])
if return_target:
if 'target' in batch_data_dict.keys():
append_to_dict(output_dict, 'target', batch_data_dict['target'])
if n % 10 == 0:
print(' --- Inference time: {:.3f} s / 10 iterations ---'.format(
time.time() - time1))
time1 = time.time()
for key in output_dict.keys():
output_dict[key] = np.concatenate(output_dict[key], axis=0)
return output_dict
def interpolate(x, ratio):
"""Interpolate data in time domain. This is used to compensate the
resolution reduction in downsampling of a CNN.
Args:
x: (batch_size, time_steps, classes_num)
ratio: int, ratio to interpolate
Returns:
upsampled: (batch_size, time_steps * ratio, classes_num)
"""
(batch_size, time_steps, classes_num) = x.shape
upsampled = x[:, :, None, :].repeat(1, 1, ratio, 1)
upsampled = upsampled.reshape(batch_size, time_steps * ratio, classes_num)
return upsampled
def pad_framewise_output(framewise_output, frames_num):
"""Pad framewise_output to the same length as input frames. The pad value
is the same as the value of the last frame.
Args:
framewise_output: (batch_size, frames_num, classes_num)
frames_num: int, number of frames to pad
Outputs:
output: (batch_size, frames_num, classes_num)
"""
if frames_num == framewise_output.shape[1]:
return framewise_output
pad = framewise_output[:, -1 :, :].repeat(1, frames_num - framewise_output.shape[1], 1)
"""tensor for padding"""
output = torch.cat((framewise_output, pad), dim=1)
"""(batch_size, frames_num, classes_num)"""
return output
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def count_flops(model, audio_length):
"""Count flops. Code modified from others' implementation.
"""
multiply_adds = True
list_conv2d=[]
def conv2d_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size[0] * self.kernel_size[1] * (self.in_channels / self.groups) * (2 if multiply_adds else 1)
bias_ops = 1 if self.bias is not None else 0
params = output_channels * (kernel_ops + bias_ops)
flops = batch_size * params * output_height * output_width
list_conv2d.append(flops)
list_conv1d=[]
def conv1d_hook(self, input, output):
batch_size, input_channels, input_length = input[0].size()
output_channels, output_length = output[0].size()
kernel_ops = self.kernel_size[0] * (self.in_channels / self.groups) * (2 if multiply_adds else 1)
bias_ops = 1 if self.bias is not None else 0
params = output_channels * (kernel_ops + bias_ops)
flops = batch_size * params * output_length
list_conv1d.append(flops)
list_linear=[]
def linear_hook(self, input, output):
batch_size = input[0].size(0) if input[0].dim() == 2 else 1
weight_ops = self.weight.nelement() * (2 if multiply_adds else 1)
bias_ops = self.bias.nelement()
flops = batch_size * (weight_ops + bias_ops)
list_linear.append(flops)
list_bn=[]
def bn_hook(self, input, output):
list_bn.append(input[0].nelement() * 2)
list_relu=[]
def relu_hook(self, input, output):
list_relu.append(input[0].nelement() * 2)
list_pooling2d=[]
def pooling2d_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size * self.kernel_size
bias_ops = 0
params = output_channels * (kernel_ops + bias_ops)
flops = batch_size * params * output_height * output_width
list_pooling2d.append(flops)
list_pooling1d=[]
def pooling1d_hook(self, input, output):
batch_size, input_channels, input_length = input[0].size()
output_channels, output_length = output[0].size()
kernel_ops = self.kernel_size[0]
bias_ops = 0
params = output_channels * (kernel_ops + bias_ops)
flops = batch_size * params * output_length
list_pooling2d.append(flops)
def foo(net):
childrens = list(net.children())
if not childrens:
if isinstance(net, nn.Conv2d):
net.register_forward_hook(conv2d_hook)
elif isinstance(net, nn.Conv1d):
net.register_forward_hook(conv1d_hook)
elif isinstance(net, nn.Linear):
net.register_forward_hook(linear_hook)
elif isinstance(net, nn.BatchNorm2d) or isinstance(net, nn.BatchNorm1d):
net.register_forward_hook(bn_hook)
elif isinstance(net, nn.ReLU):
net.register_forward_hook(relu_hook)
elif isinstance(net, nn.AvgPool2d) or isinstance(net, nn.MaxPool2d):
net.register_forward_hook(pooling2d_hook)
elif isinstance(net, nn.AvgPool1d) or isinstance(net, nn.MaxPool1d):
net.register_forward_hook(pooling1d_hook)
else:
print('Warning: flop of module {} is not counted!'.format(net))
return
for c in childrens:
foo(c)
# Register hook
foo(model)
device = device = next(model.parameters()).device
input = torch.rand(1, audio_length).to(device)
out = model(input)
total_flops = sum(list_conv2d) + sum(list_conv1d) + sum(list_linear) + \
sum(list_bn) + sum(list_relu) + sum(list_pooling2d) + sum(list_pooling1d)
return total_flops
| [
"torch.rand",
"torch.cat",
"torch.no_grad",
"torch.LongTensor",
"torch.Tensor"
] | 1.0.1 | tontsam/audioset_tagging_cnn | 223f0a92fb753a34ac145a64c6713ee497fbda0c |
1.4 | import pytest
import torch
from torchts.nn.loss import masked_mae_loss, mis_loss, quantile_loss
@pytest.fixture
def y_true():
data = [1, 2, 3]
return torch.tensor(data)
@pytest.fixture
def y_pred():
data = [1.1, 1.9, 3.1]
return torch.tensor(data)
def test_masked_mae_loss(y_true, y_pred):
"""Test masked_mae_loss()"""
loss = masked_mae_loss(y_pred, y_true)
assert loss == pytest.approx(0.1)
@pytest.mark.parametrize(
"lower, upper, interval, expected_loss",
[
([1, 2, 3], [1.1, 2.1, 3.1], 0.8, 0.1),
([0.9, 1.9, 2.9], [1.1, 2.1, 3.1], 0.8, 0.2),
([0.9, 1.9, 2.9], [1.1, 2.1, 3.1], 0.95, 0.2),
([0.7, 1.9, 2.9], [0.9, 2.1, 3.1], 0.8, 1.6 / 3),
([0.7, 1.9, 2.9], [0.9, 2.1, 3.1], 0.95, 4.6 / 3),
([0.9, 1.9, 3.1], [1.1, 2.1, 3.3], 0.8, 1.6 / 3),
([0.9, 1.9, 3.1], [1.1, 2.1, 3.3], 0.95, 4.6 / 3),
],
)
def test_mis_loss(y_true, lower, upper, interval, expected_loss):
"""Test quantile_loss()"""
y_true = y_true.reshape(-1, 1)
y_pred = torch.transpose(torch.tensor([lower, upper]), 0, 1)
loss = mis_loss(y_pred, y_true, interval)
assert loss == pytest.approx(expected_loss)
@pytest.mark.parametrize(
"quantile, expected_loss", [(0.05, 0.065), (0.5, 0.05), (0.95, 0.035)]
)
def test_quantile_loss(y_true, y_pred, quantile, expected_loss):
"""Test quantile_loss()"""
loss = quantile_loss(y_pred, y_true, quantile)
assert loss == pytest.approx(expected_loss)
| [
"torch.tensor"
] | 1.4 | JudyJin/torchTS | 2856e1bae8be3b9fdc23dcc2e8339674f1558ba5 |
0.6 | """
Lovasz-Softmax and Jaccard hinge loss in PyTorch
Maxim Berman 2018 ESAT-PSI KU Leuven (MIT License)
"""
from __future__ import print_function, division
from typing import Optional
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.modules.loss import _Loss
from .constants import BINARY_MODE, MULTICLASS_MODE, MULTILABEL_MODE
try:
from itertools import ifilterfalse
except ImportError: # py3k
from itertools import filterfalse as ifilterfalse
__all__ = ["LovaszLoss"]
def _lovasz_grad(gt_sorted):
"""Compute gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1.0 - intersection / union
if p > 1: # cover 1-pixel case
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def _lovasz_hinge(logits, labels, per_image=True, ignore=None):
"""
Binary Lovasz hinge loss
logits: [B, H, W] Variable, logits at each pixel (between -infinity and +infinity)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
per_image: compute the loss per image instead of per batch
ignore: void class id
"""
if per_image:
loss = mean(
_lovasz_hinge_flat(*_flatten_binary_scores(log.unsqueeze(0), lab.unsqueeze(0), ignore))
for log, lab in zip(logits, labels)
)
else:
loss = _lovasz_hinge_flat(*_flatten_binary_scores(logits, labels, ignore))
return loss
def _lovasz_hinge_flat(logits, labels):
"""Binary Lovasz hinge loss
Args:
logits: [P] Variable, logits at each prediction (between -infinity and +infinity)
labels: [P] Tensor, binary ground truth labels (0 or 1)
ignore: label to ignore
"""
if len(labels) == 0:
# only void pixels, the gradients should be 0
return logits.sum() * 0.0
signs = 2.0 * labels.float() - 1.0
errors = 1.0 - logits * Variable(signs)
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = _lovasz_grad(gt_sorted)
loss = torch.dot(F.relu(errors_sorted), Variable(grad))
return loss
def _flatten_binary_scores(scores, labels, ignore=None):
"""Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
"""
scores = scores.view(-1)
labels = labels.view(-1)
if ignore is None:
return scores, labels
valid = labels != ignore
vscores = scores[valid]
vlabels = labels[valid]
return vscores, vlabels
# --------------------------- MULTICLASS LOSSES ---------------------------
def _lovasz_softmax(probas, labels, classes="present", per_image=False, ignore=None):
"""Multi-class Lovasz-Softmax loss
Args:
@param probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1).
Interpreted as binary (sigmoid) output with outputs of size [B, H, W].
@param labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)
@param classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
@param per_image: compute the loss per image instead of per batch
@param ignore: void class labels
"""
if per_image:
loss = mean(
_lovasz_softmax_flat(*_flatten_probas(prob.unsqueeze(0), lab.unsqueeze(0), ignore), classes=classes)
for prob, lab in zip(probas, labels)
)
else:
loss = _lovasz_softmax_flat(*_flatten_probas(probas, labels, ignore), classes=classes)
return loss
def _lovasz_softmax_flat(probas, labels, classes="present"):
"""Multi-class Lovasz-Softmax loss
Args:
@param probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1)
@param labels: [P] Tensor, ground truth labels (between 0 and C - 1)
@param classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
"""
if probas.numel() == 0:
# only void pixels, the gradients should be 0
return probas * 0.0
C = probas.size(1)
losses = []
class_to_sum = list(range(C)) if classes in ["all", "present"] else classes
for c in class_to_sum:
fg = (labels == c).type_as(probas) # foreground for class c
if classes == "present" and fg.sum() == 0:
continue
if C == 1:
if len(classes) > 1:
raise ValueError("Sigmoid output possible only with 1 class")
class_pred = probas[:, 0]
else:
class_pred = probas[:, c]
errors = (fg - class_pred).abs()
errors_sorted, perm = torch.sort(errors, 0, descending=True)
perm = perm.data
fg_sorted = fg[perm]
losses.append(torch.dot(errors_sorted, _lovasz_grad(fg_sorted)))
return mean(losses)
def _flatten_probas(probas, labels, ignore=None):
"""Flattens predictions in the batch
"""
if probas.dim() == 3:
# assumes output of a sigmoid layer
B, H, W = probas.size()
probas = probas.view(B, 1, H, W)
C = probas.size(1)
probas = torch.movedim(probas, 0, -1) # [B, C, Di, Dj, Dk...] -> [B, C, Di...Dk, C]
probas = probas.contiguous().view(-1, C) # [P, C]
labels = labels.view(-1)
if ignore is None:
return probas, labels
valid = labels != ignore
vprobas = probas[valid]
vlabels = labels[valid]
return vprobas, vlabels
# --------------------------- HELPER FUNCTIONS ---------------------------
def isnan(x):
return x != x
def mean(values, ignore_nan=False, empty=0):
"""Nanmean compatible with generators.
"""
values = iter(values)
if ignore_nan:
values = ifilterfalse(isnan, values)
try:
n = 1
acc = next(values)
except StopIteration:
if empty == "raise":
raise ValueError("Empty mean")
return empty
for n, v in enumerate(values, 2):
acc += v
if n == 1:
return acc
return acc / n
class LovaszLoss(_Loss):
def __init__(
self,
mode: str,
per_image: bool = False,
ignore_index: Optional[int] = None,
from_logits: bool = True,
):
"""Implementation of Lovasz loss for image segmentation task.
It supports binary, multiclass and multilabel cases
Args:
mode: Loss mode 'binary', 'multiclass' or 'multilabel'
ignore_index: Label that indicates ignored pixels (does not contribute to loss)
per_image: If True loss computed per each image and then averaged, else computed per whole batch
Shape
- **y_pred** - torch.Tensor of shape (N, C, H, W)
- **y_true** - torch.Tensor of shape (N, H, W) or (N, C, H, W)
Reference
https://github.com/BloodAxe/pytorch-toolbelt
"""
assert mode in {BINARY_MODE, MULTILABEL_MODE, MULTICLASS_MODE}
super().__init__()
self.mode = mode
self.ignore_index = ignore_index
self.per_image = per_image
def forward(self, y_pred, y_true):
if self.mode in {BINARY_MODE, MULTILABEL_MODE}:
loss = _lovasz_hinge(y_pred, y_true, per_image=self.per_image, ignore=self.ignore_index)
elif self.mode == MULTICLASS_MODE:
y_pred = y_pred.softmax(dim=1)
loss = _lovasz_softmax(y_pred, y_true, per_image=self.per_image, ignore=self.ignore_index)
else:
raise ValueError("Wrong mode {}.".format(self.mode))
return loss | [
"torch.movedim",
"torch.nn.functional.relu",
"torch.sort",
"torch.autograd.Variable"
] | 0.6.3 | MikePham05/segmentation_models.pytorch | f61acfedf5e5b122430abb71181126bf1a288a94 |
0.4 | import pytest
import torch
import torch.nn
import numpy as np
import copy
from memcnn.models.affine import AffineAdapterNaive, AffineAdapterSigmoid
from memcnn import ReversibleBlock
def set_seeds(seed):
np.random.seed(seed)
torch.manual_seed(seed)
@pytest.mark.parametrize('coupling', ['additive', 'affine'])
def test_reversible_block_additive_notimplemented(coupling):
fm = torch.nn.Conv2d(10, 10, (3, 3), padding=1)
X = torch.zeros(1, 20, 10, 10)
with pytest.raises(NotImplementedError):
f = ReversibleBlock(fm, coupling=coupling, implementation_bwd=0, implementation_fwd=-2,
adapter=AffineAdapterNaive)
f.forward(X)
with pytest.raises(NotImplementedError):
f = ReversibleBlock(fm, coupling=coupling, implementation_bwd=-2, implementation_fwd=0,
adapter=AffineAdapterNaive)
f.inverse(X)
with pytest.raises(NotImplementedError):
ReversibleBlock(fm, coupling='unknown', implementation_bwd=-2, implementation_fwd=0,
adapter=AffineAdapterNaive)
@pytest.mark.parametrize('coupling,adapter', [('additive', None),
('affine', AffineAdapterNaive),
('affine', AffineAdapterSigmoid)])
def test_reversible_block_fwd_bwd(coupling, adapter):
"""ReversibleBlock test of the memory saving forward and backward passes
* test inversion Y = RB(X) and X = RB.inverse(Y)
* test training the block for a single step and compare weights for implementations: 0, 1
* test automatic discard of input X and its retrieval after the backward pass
* test usage of BN to identify non-contiguous memory blocks
"""
dims = (2, 10, 8, 8)
data = np.random.random(dims).astype(np.float32)
target_data = np.random.random(dims).astype(np.float32)
class SubModule(torch.nn.Module):
def __init__(self, in_filters, out_filters):
super(SubModule, self).__init__()
self.bn = torch.nn.BatchNorm2d(out_filters)
self.conv = torch.nn.Conv2d(in_filters, out_filters, (3, 3), padding=1)
def forward(self, x):
return self.bn(self.conv(x))
Gm = SubModule(in_filters=5, out_filters=5 if coupling == 'additive' or adapter is AffineAdapterNaive else 10)
s_grad = [p.data.numpy().copy() for p in Gm.parameters()]
for seed in range(10):
set_seeds(seed)
for bwd in [False, True]:
impl_out, impl_grad = [], []
for keep_input_sub in [False, True]:
for keep_input_inverse_sub in [False, True]:
for implementation_fwd in [-1, 0, 1]:
for implementation_bwd in [-1, 0, 1]:
keep_input = keep_input_sub or implementation_fwd == -1
keep_input_inverse = keep_input_inverse_sub or implementation_bwd == -1
# print(bwd, coupling, keep_input, implementation_fwd, implementation_bwd)
# test with zero padded convolution
X = torch.from_numpy(data.copy())
Ytarget = torch.from_numpy(target_data.copy())
Xshape = X.shape
Gm2 = copy.deepcopy(Gm)
rb = ReversibleBlock(Gm2, coupling=coupling, implementation_fwd=implementation_fwd,
implementation_bwd=implementation_bwd, adapter=adapter,
keep_input=keep_input, keep_input_inverse=keep_input_inverse)
rb.train()
rb.zero_grad()
optim = torch.optim.RMSprop(rb.parameters())
optim.zero_grad()
if not bwd:
Xin = X.clone()
Y = rb(Xin)
Yrev = Y.clone()
Xinv = rb.inverse(Yrev)
else:
Xin = X.clone()
Y = rb.inverse(Xin)
Yrev = Y.clone()
Xinv = rb(Yrev)
loss = torch.nn.MSELoss()(Y, Ytarget)
# has input been retained/discarded after forward (and backward) passes?
def test_memory_cleared(var, isclear, shape):
if isclear:
assert var.storage().size() == 0
else:
assert var.storage().size() > 0
assert var.shape == shape
if not bwd:
test_memory_cleared(Xin, not keep_input, Xshape)
test_memory_cleared(Yrev, not keep_input_inverse, Xshape)
else:
test_memory_cleared(Yrev, not keep_input, Xshape)
test_memory_cleared(Xin, not keep_input_inverse, Xshape)
optim.zero_grad()
loss.backward()
optim.step()
assert Y.shape == Xshape
assert X.data.numpy().shape == data.shape
assert np.allclose(X.data.numpy(), data, atol=1e-06)
assert np.allclose(X.data.numpy(), Xinv.data.numpy(), atol=1e-05)
impl_out.append(Y.data.numpy().copy())
impl_grad.append([p.data.numpy().copy() for p in Gm2.parameters()])
assert not np.allclose(impl_grad[-1][0], s_grad[0])
# output and gradients similar over all implementations?
for i in range(0, len(impl_grad) - 1, 1):
assert np.allclose(impl_grad[i][0], impl_grad[i + 1][0])
assert np.allclose(impl_out[i], impl_out[i + 1])
@pytest.mark.parametrize('coupling,adapter', [('additive', None),
('affine', AffineAdapterNaive),
('affine', AffineAdapterSigmoid)])
def test_revblock_chained(coupling, adapter):
set_seeds(42)
dims = (2, 10, 8, 8)
data = np.random.random(dims).astype(np.float32)
target_data = np.random.random(dims).astype(np.float32)
X = torch.from_numpy(data.copy())
Ytarget = torch.from_numpy(target_data.copy())
class SubModule(torch.nn.Module):
def __init__(self, in_filters, out_filters):
super(SubModule, self).__init__()
self.bn = torch.nn.BatchNorm2d(out_filters)
self.conv = torch.nn.Conv2d(in_filters, out_filters, (3, 3), padding=1)
def forward(self, x):
return self.bn(self.conv(x))
class SubModuleStack(torch.nn.Module):
def __init__(self, Gm, coupling='additive', depth=10, implementation_fwd=1, implementation_bwd=1,
keep_input=False, adapter=None):
super(SubModuleStack, self).__init__()
self.stack = torch.nn.Sequential(
*[ReversibleBlock(Gm, Gm, coupling=coupling, implementation_fwd=implementation_fwd,
implementation_bwd=implementation_bwd, adapter=adapter,
keep_input=keep_input) for _ in range(depth)]
)
def forward(self, x):
return self.stack(x)
Gm = SubModule(in_filters=5, out_filters=5 if coupling == 'additive' or adapter is AffineAdapterNaive else 10)
rb = SubModuleStack(Gm, coupling=coupling, depth=2, keep_input=False, adapter=adapter)
rb.train()
rb.zero_grad()
optim = torch.optim.RMSprop(rb.parameters())
optim.zero_grad()
Xin = X.clone()
Y = rb(Xin)
loss = torch.nn.MSELoss()(Y, Ytarget)
optim.zero_grad()
loss.backward()
optim.step()
@pytest.mark.parametrize('coupling', ['additive', 'affine'])
def test_revblock_simple_inverse(coupling):
"""ReversibleBlock inverse test
* test inversion Y = RB(X) and X = RB.inverse(Y)
"""
for seed in range(10):
set_seeds(seed)
for implementation_fwd in [-1, 0, 1]:
for implementation_bwd in [-1, 0, 1]:
# define some data
X = torch.rand(2, 4, 5, 5)
# define an arbitrary reversible function
fn = ReversibleBlock(torch.nn.Conv2d(2, 2, 3, padding=1), keep_input=False, coupling=coupling,
implementation_fwd=implementation_fwd, implementation_bwd=implementation_bwd,
adapter=AffineAdapterNaive)
# compute output
Y = fn.forward(X.clone())
# compute input from output
X2 = fn.inverse(Y)
# check that the inverted output and the original input are approximately similar
assert np.allclose(X2.data.numpy(), X.data.numpy(), atol=1e-06)
@pytest.mark.parametrize('coupling', ['additive', 'affine'])
@pytest.mark.parametrize('implementation_fwd', [-1, 0, 1])
@pytest.mark.parametrize('implementation_bwd', [-1, 0, 1])
def test_normal_vs_revblock(coupling, implementation_fwd, implementation_bwd):
"""ReversibleBlock test if similar gradients and weights results are obtained after similar training
* test training the block for a single step and compare weights and grads for implementations: 0, 1
* test against normal non Reversible Block function
* test if recreated input and produced output are contiguous
"""
for seed in range(10):
set_seeds(seed)
X = torch.rand(2, 4, 5, 5)
# define models and their copies
c1 = torch.nn.Conv2d(2, 2, 3, padding=1)
c2 = torch.nn.Conv2d(2, 2, 3, padding=1)
c1_2 = copy.deepcopy(c1)
c2_2 = copy.deepcopy(c2)
# are weights between models the same, but do they differ between convolutions?
assert torch.equal(c1.weight, c1_2.weight)
assert torch.equal(c2.weight, c2_2.weight)
assert torch.equal(c1.bias, c1_2.bias)
assert torch.equal(c2.bias, c2_2.bias)
assert not torch.equal(c1.weight, c2.weight)
# define optimizers
optim1 = torch.optim.SGD([e for e in c1.parameters()] + [e for e in c2.parameters()], 0.1)
optim2 = torch.optim.SGD([e for e in c1_2.parameters()] + [e for e in c2_2.parameters()], 0.1)
for e in [c1, c2, c1_2, c2_2]:
e.train()
# define an arbitrary reversible function and define graph for model 1
Xin = X.clone()
fn = ReversibleBlock(c1_2, c2_2, keep_input=False, coupling=coupling, adapter=AffineAdapterNaive,
implementation_fwd=implementation_fwd, implementation_bwd=implementation_bwd)
Y = fn.forward(Xin)
loss2 = torch.mean(Y)
# define the reversible function without custom backprop and define graph for model 2
XX = X.clone().data
XX.requires_grad = True
x1, x2 = torch.chunk(XX, 2, dim=1)
if coupling == 'additive':
y1 = x1 + c1.forward(x2)
y2 = x2 + c2.forward(y1)
elif coupling == 'affine':
fmr2 = c1.forward(x2)
fmr1 = torch.exp(fmr2)
y1 = (x1 * fmr1) + fmr2
gmr2 = c2.forward(y1)
gmr1 = torch.exp(gmr2)
y2 = (x2 * gmr1) + gmr2
else:
raise NotImplementedError()
YY = torch.cat([y1, y2], dim=1)
loss = torch.mean(YY)
# compute gradients manually
grads = torch.autograd.grad(loss, (XX, c1.weight, c2.weight, c1.bias, c2.bias), None, retain_graph=True)
# compute gradients and perform optimization model 2
loss.backward()
optim1.step()
# gradients computed manually match those of the .backward() pass
assert torch.equal(c1.weight.grad, grads[1])
assert torch.equal(c2.weight.grad, grads[2])
assert torch.equal(c1.bias.grad, grads[3])
assert torch.equal(c2.bias.grad, grads[4])
# weights differ after training a single model?
assert not torch.equal(c1.weight, c1_2.weight)
assert not torch.equal(c2.weight, c2_2.weight)
assert not torch.equal(c1.bias, c1_2.bias)
assert not torch.equal(c2.bias, c2_2.bias)
# compute gradients and perform optimization model 1
loss2.backward()
optim2.step()
# input is contiguous tests
assert Xin.is_contiguous()
assert Y.is_contiguous()
# weights are approximately the same after training both models?
assert np.allclose(c1.weight.data.numpy(), c1_2.weight.data.numpy(), atol=1e-06)
assert np.allclose(c2.weight.data.numpy(), c2_2.weight.data.numpy())
assert np.allclose(c1.bias.data.numpy(), c1_2.bias.data.numpy())
assert np.allclose(c2.bias.data.numpy(), c2_2.bias.data.numpy())
# gradients are approximately the same after training both models?
assert np.allclose(c1.weight.grad.data.numpy(), c1_2.weight.grad.data.numpy(), atol=1e-06)
assert np.allclose(c2.weight.grad.data.numpy(), c2_2.weight.grad.data.numpy())
assert np.allclose(c1.bias.grad.data.numpy(), c1_2.bias.grad.data.numpy())
assert np.allclose(c2.bias.grad.data.numpy(), c2_2.bias.grad.data.numpy())
| [
"torch.zeros",
"torch.rand",
"torch.cat",
"torch.nn.MSELoss",
"torch.nn.BatchNorm2d",
"torch.manual_seed",
"torch.autograd.grad",
"torch.nn.Conv2d",
"torch.equal",
"torch.exp",
"torch.mean",
"torch.chunk"
] | 0.4.0 | hzy5660251/memcnn | 1293468e4ee4ed83fcf9da36940065bbe72dd54b |
0.3 | # python -m unittest -v test/torch_test.py
import unittest
from unittest import TestCase
import random
import syft as sy
import numpy as np
from syft.core.frameworks.torch import utils as torch_utils
from syft.core.frameworks import encode
from syft.core.frameworks.torch.tensor import _GeneralizedPointerTensor
import torch
import torch.nn.functional as F
from torch.autograd import Variable as Var
import msgpack
bob = None
alice = None
james = None
me = None
hook = None
def setUpModule():
print("setup module")
global me
global bob
global alice
global james
global hook
hook = sy.TorchHook(verbose=True)
me = hook.local_worker
me.is_client_worker = False
bob = sy.VirtualWorker(id="bob", hook=hook, is_client_worker=False)
alice = sy.VirtualWorker(id="alice", hook=hook, is_client_worker=False)
james = sy.VirtualWorker(id="james", hook=hook, is_client_worker=False)
bob.add_workers([alice, james])
alice.add_workers([bob, james])
james.add_workers([bob, alice])
class Chain:
def __init__(self, leaf=False):
if not leaf:
self.tensor = Chain(True)
self.var = Chain(True)
global display_chain
display_chain = Chain()
display_chain.tensor.local = "FloatTensor > _LocalTensor"
display_chain.tensor.pointer = "FloatTensor > _PointerTensor"
display_chain.tensor.fixp_local = (
"FloatTensor > _FixedPrecisionTensor > LongTensor > _LocalTensor"
)
display_chain.tensor.fixp_mpc_gpt = (
"FloatTensor > _FixedPrecisionTensor"
"> LongTensor > _SNNTensor > LongTensor > _GeneralizedPointerTensor"
)
display_chain.var.local = (
"Variable > _LocalTensor\n"
" - FloatTensor > _LocalTensor\n"
" - - Variable > _LocalTensor\n"
" - FloatTensor > _LocalTensor"
)
display_chain.var.pointer = (
"Variable > _PointerTensor\n"
" - FloatTensor > _PointerTensor\n"
" - - Variable > _PointerTensor\n"
" - FloatTensor > _PointerTensor"
)
display_chain.var.fixp_local = (
"Variable > _FixedPrecisionTensor > Variable > _LocalTensor\n"
" - FloatTensor > _FixedPrecisionTensor > LongTensor > _LocalTensor\n"
" - - Variable > _FixedPrecisionTensor > Variable > _LocalTensor\n"
" - FloatTensor > _FixedPrecisionTensor > LongTensor > _LocalTensor"
)
display_chain.var.fixp_mpc_gpt = (
"Variable > _FixedPrecisionTensor > Variable > _SNNTensor "
"> Variable > _GeneralizedPointerTensor\n"
" - FloatTensor > _FixedPrecisionTensor > LongTensor"
"> _SNNTensor > LongTensor > _GeneralizedPointerTensor\n"
" - - Variable > _FixedPrecisionTensor > Variable > _SNNTensor"
"> Variable > _GeneralizedPointerTensor\n"
" - FloatTensor > _FixedPrecisionTensor > LongTensor"
"> _SNNTensor > LongTensor > _GeneralizedPointerTensor"
)
class TestChainTensor(TestCase):
def test_plus_is_minus_tensor_local(self):
x = torch.FloatTensor([5, 6])
y = torch.FloatTensor([3, 4])
x = sy._PlusIsMinusTensor().on(x)
y = sy._PlusIsMinusTensor().on(y)
assert (
torch_utils.chain_print(x, display=False)
== "FloatTensor > _PlusIsMinusTensor > _LocalTensor"
)
z = x.add(y)
assert (
torch_utils.chain_print(z, display=False)
== "FloatTensor > _PlusIsMinusTensor > _LocalTensor"
)
# cut chain for the equality check
z.child = z.child.child
assert torch.equal(z, torch.FloatTensor([2, 2]))
z = torch.add(x, y)
# cut chain for the equality check
z.child = z.child.child
assert torch.equal(z, torch.FloatTensor([2, 2]))
def test_plus_is_minus_tensor_remote(self):
x = torch.FloatTensor([5, 6])
y = torch.FloatTensor([3, 4])
x = sy._PlusIsMinusTensor().on(x)
y = sy._PlusIsMinusTensor().on(y)
id1 = random.randint(0, 10e10)
id2 = random.randint(0, 10e10)
x.send(bob, ptr_id=id1)
y.send(bob, ptr_id=id2)
z = x.add(y)
assert (
torch_utils.chain_print(z, display=False) == "FloatTensor > _PointerTensor"
)
# Check chain on remote
ptr_id = z.child.id_at_location
assert (
torch_utils.chain_print(bob._objects[ptr_id].parent, display=False)
== "FloatTensor > _PlusIsMinusTensor > _LocalTensor"
)
z.get()
assert (
torch_utils.chain_print(z, display=False)
== "FloatTensor > _PlusIsMinusTensor > _LocalTensor"
)
# cut chain for the equality check
z.child = z.child.child
assert torch.equal(z, torch.FloatTensor([2, 2]))
def test_plus_is_minus_variable_local(self):
x = sy.Variable(torch.FloatTensor([5, 6]))
y = sy.Variable(torch.FloatTensor([3, 4]))
x = sy._PlusIsMinusTensor().on(x)
y = sy._PlusIsMinusTensor().on(y)
display = (
"Variable > _PlusIsMinusTensor > _LocalTensor\n"
" - FloatTensor > _PlusIsMinusTensor > _LocalTensor\n"
" - - Variable > _PlusIsMinusTensor > _LocalTensor\n"
" - FloatTensor > _PlusIsMinusTensor > _LocalTensor"
)
assert torch_utils.chain_print(x, display=False) == display
z = x.add(y)
assert (
torch_utils.chain_print(z, display=False)
== "Variable > _PlusIsMinusTensor > "
"_LocalTensor\n - FloatTensor >"
" _PlusIsMinusTensor > _LocalTensor"
)
# cut chain for the equality check
z.data.child = z.data.child.child
assert torch.equal(z.data, torch.FloatTensor([2, 2]))
z = torch.add(x, y)
# cut chain for the equality check
z.data.child = z.data.child.child
assert torch.equal(z.data, torch.FloatTensor([2, 2]))
def test_plus_is_minus_variable_remote(self):
x = sy.Variable(torch.FloatTensor([5, 6]))
y = sy.Variable(torch.FloatTensor([3, 4]))
x = sy._PlusIsMinusTensor().on(x)
y = sy._PlusIsMinusTensor().on(y)
id1 = random.randint(0, 10e10)
id2 = random.randint(0, 10e10)
id11 = random.randint(0, 10e10)
id21 = random.randint(0, 10e10)
x.send(bob, new_id=id1, new_data_id=id11)
y.send(bob, new_id=id2, new_data_id=id21)
z = x.add(y)
assert (
torch_utils.chain_print(z, display=False) == "Variable > _PointerTensor\n"
" - FloatTensor > _PointerTensor\n"
" - - Variable > _PointerTensor\n"
" - FloatTensor > _PointerTensor"
)
assert bob._objects[z.id_at_location].owner.id == "bob"
assert bob._objects[z.data.id_at_location].owner.id == "bob"
# Check chain on remote
ptr_id = x.child.id_at_location
display = (
"Variable > _PlusIsMinusTensor > _LocalTensor\n"
" - FloatTensor > _PlusIsMinusTensor > _LocalTensor\n"
" - - Variable > _PlusIsMinusTensor > _LocalTensor\n"
" - FloatTensor > _PlusIsMinusTensor > _LocalTensor"
)
assert (
torch_utils.chain_print(bob._objects[ptr_id].parent, display=False)
== display
)
# Check chain on remote
# TODO For now we don't reconstruct the grad chain one non-leaf variable (in our case a leaf
# variable is a variable that we sent), because we don't care about their gradient.
# But if we do,then this is a TODO!
ptr_id = z.child.id_at_location
display = (
"Variable > _PlusIsMinusTensor > _LocalTensor\n"
" - FloatTensor > _PlusIsMinusTensor > _LocalTensor\n"
" - - Variable > _LocalTensor\n"
" - FloatTensor > _LocalTensor"
)
assert (
torch_utils.chain_print(bob._objects[ptr_id].parent, display=False)
== display
)
z.get()
display = (
"Variable > _PlusIsMinusTensor > _LocalTensor\n"
" - FloatTensor > _PlusIsMinusTensor > _LocalTensor\n"
" - - Variable > _LocalTensor\n"
" - FloatTensor > _LocalTensor"
)
assert torch_utils.chain_print(z, display=False) == display
# cut chain for the equality check
z.data.child = z.data.child.child
assert torch.equal(z.data, torch.FloatTensor([2, 2]))
def test_plus_is_minus_backward_local(self):
x = sy.Variable(torch.FloatTensor([5, 6]), requires_grad=True)
y = sy.Variable(torch.FloatTensor([3, 4]), requires_grad=True)
x = sy._PlusIsMinusTensor().on(x)
y = sy._PlusIsMinusTensor().on(y)
z = x.add(y).sum()
z.backward()
# cut chain for the equality check
x.grad.data.child = x.grad.data.child.child
assert torch.equal(x.grad.data, torch.FloatTensor([1, 1]))
def test_plus_is_minus_backward_remote(self):
x = sy.Variable(torch.FloatTensor([5, 6]), requires_grad=True)
y = sy.Variable(torch.FloatTensor([3, 4]), requires_grad=True)
x = sy._PlusIsMinusTensor().on(x)
y = sy._PlusIsMinusTensor().on(y)
x.send(bob)
y.send(bob)
z = x.add(y).sum()
z.backward()
# cut chain for the equality check
x.get()
x.child = x.child.child
# TODO: figure out why some machines prefer one of these options
# while others prefer the other
try:
target = sy._PlusIsMinusTensor().on(torch.FloatTensor([1, 1]))
target.child = target.child.child
assert torch.equal(x.grad.data, target)
except AttributeError:
target = sy._PlusIsMinusTensor().on(torch.FloatTensor([1, 1]))
target.child = target.child
assert torch.equal(x.grad.data, target)
class TestTorchTensor(TestCase):
def test_set_id(self):
hook.local_worker.is_client_worker = False
x = torch.FloatTensor([-2, -1, 0, 1, 2, 3]).set_id("bobs tensor")
assert x.id == "bobs tensor"
assert x.child.id == "bobs tensor"
assert x.id in hook.local_worker._objects
assert list(x.child.old_ids)[0] in hook.local_worker._objects
assert list(x.child.old_ids)[0] != x.id
x = sy.Var(sy.FloatTensor([-2, -1, 0, 1, 2, 3])).set_id("bobs variable")
assert x.id == "bobs variable"
assert x.child.id == "bobs variable"
assert x.id in hook.local_worker._objects
assert list(x.child.old_ids)[0] in hook.local_worker._objects
assert list(x.child.old_ids)[0] != x.id
def test___repr__(self):
x = torch.FloatTensor([1, 2, 3, 4, 5])
# assert x.__repr__() == '\n 1\n 2\n 3\n 4\n 5\n[torch.FloatTensor of size 5]\n'
assert (
x.__repr__() == "\n 1\n 2\n 3\n 4\n 5\n["
"syft.core.frameworks.torch.tensor.FloatTensor of size 5]\n"
)
def test_send_get_tensor(self):
x = torch.FloatTensor([1, 2, 3, 4, 5])
x_id = x.id
ptr_id = random.randint(0, 10e10)
x.send(bob, ptr_id=ptr_id)
assert x_id in me._objects
ptr = me._objects[x_id]
assert x.child == ptr
assert isinstance(ptr, sy._PointerTensor)
assert ptr.id_at_location == ptr_id
assert ptr.location.id == bob.id
assert ptr_id in bob._objects
remote_x = bob._objects[ptr_id]
assert isinstance(remote_x, sy._LocalTensor)
assert torch.equal(remote_x.child, torch.FloatTensor([1, 2, 3, 4, 5]))
x.get()
# Check that it's still registered
assert x.id in me._objects
assert torch.equal(me._objects[x.id].child, x)
assert (x == torch.FloatTensor([1, 2, 3, 4, 5])).all()
# because .get_() was called, x should no longer be in the remote worker's objects dict
assert ptr_id not in bob._objects
def test_multiple_pointers_to_same_target(self):
# There are two cases:
# - You're sending a var on a loc:id you're already pointing at -> should abort
# - You're pointing at the result of an in-place remote operation like:
# x = sy.Variable(torch.FloatTensor([1, 2, -3, 4, 5])).send(bob)
# y = x.abs_() # in-place operation
# y.get()
# x.send(bob) # if x.child != y.child, x will send its old pointer
# to bob->trigger an error
# You want this to work, but don't want to create a new pointer, just
# reuse the old one.
# 1.
ptr_id = random.randint(0, 10e10)
y = torch.FloatTensor([1, 2])
y.send(bob, ptr_id=ptr_id)
x = torch.FloatTensor([1, 2, 3, 4, 5])
try:
x.send(bob, ptr_id=ptr_id)
assert False
except MemoryError:
assert True
# 2.
x = torch.FloatTensor([1, 2, -3, 4, 5]).send(bob)
x_id = x.id
y = x.abs_() # in-place operation
assert y.child == x.child
assert x.id == x_id
assert y.id == x.id
y.get()
x.send(bob)
def test_chain_send_get_tensor(self):
x = torch.FloatTensor([1, 2, 3, 4, 5])
id1 = random.randint(0, 10e10)
id2 = random.randint(0, 10e10)
id3 = random.randint(0, 10e10)
x.send(bob, ptr_id=id1)
assert id1 in bob._objects
x.send(alice, ptr_id=id2)
assert id2 in alice._objects
x.send(james, ptr_id=id3)
assert id3 in james._objects
x.get()
x.get()
x.get()
# test the get is ok
assert torch.equal(x, torch.FloatTensor([1, 2, 3, 4, 5]))
# Test that the remotes are empty
assert id1 not in bob._objects
assert id2 not in alice._objects
assert id3 not in james._objects
def test_add_remote_tensor(self):
x = sy.FloatTensor([1, 2, 3, 4])
x.send(bob, ptr_id=1000)
x.send(alice, ptr_id=2000)
y = sy.FloatTensor([2, 3, 4, 5])
y.send(bob, ptr_id=1001)
y.send(alice, ptr_id=2001)
z = torch.add(x, y)
z.get().get()
assert torch.equal(z, torch.FloatTensor([3, 5, 7, 9]))
# def test_fixed_prec_ops(self):
# hook = TorchHook(verbose=False)
# x = torch.FloatTensor([1, 2, 3, 4, 5]).set_precision(7)
# y = torch.FloatTensor([1, 2, 3, 4, 5]).set_precision(3)
# assert ((x + y).free_precision() == torch.FloatTensor([2, 4, 6, 8, 10])).all()
# assert ((x / y).free_precision() == torch.FloatTensor([1, 1, 1, 1, 1])).all()
# assert ((x * y).free_precision() == torch.FloatTensor([1, 4, 9, 16, 25])).all()
# assert ((x - y).free_precision() == torch.FloatTensor([0, 0, 0, 0, 0])).all()
# x = torch.FloatTensor([1, 2, 3, 4, 5]).set_precision(3)
# y = torch.FloatTensor([1, 2, 3, 4, 5]).set_precision(7)
# assert ((x + y).free_precision() == torch.FloatTensor([2, 4, 6, 8, 10])).all()
# assert ((x / y).free_precision() == torch.FloatTensor([1, 1, 1, 1, 1])).all()
# assert ((x * y).free_precision() == torch.FloatTensor([1, 4, 9, 16, 25])).all()
# assert ((x - y).free_precision() == torch.FloatTensor([0, 0, 0, 0, 0])).all()
# x = torch.FloatTensor([1, 2, 3, 4, 5]).set_precision(3)
# y = torch.FloatTensor([1, 2, 3, 4, 5]).set_precision(3)
# assert ((x + y).free_precision() == torch.FloatTensor([2, 4, 6, 8, 10])).all()
# assert ((x / y).free_precision() == torch.FloatTensor([1, 1, 1, 1, 1])).all()
# assert ((x * y).free_precision() == torch.FloatTensor([1, 4, 9, 16, 25])).all()
# assert ((x - y).free_precision() == torch.FloatTensor([0, 0, 0, 0, 0])).all()
def test_local_tensor_unary_methods(self):
"""Unit tests for methods mentioned on issue 1385
https://github.com/OpenMined/PySyft/issues/1385."""
x = torch.FloatTensor([1, 2, -3, 4, 5])
assert (x.abs() == torch.FloatTensor([1, 2, 3, 4, 5])).all()
assert (x.abs_() == torch.FloatTensor([1, 2, 3, 4, 5])).all()
x = x.cos()
assert (x.int() == torch.IntTensor([0, 0, 0, 0, 0])).all()
x = x.cos_()
assert (x.int() == torch.IntTensor([0, 0, 0, 0, 0])).all()
x = torch.FloatTensor([1, 2, -3, 4, 5])
assert (x.ceil() == x).all()
assert (x.ceil_() == x).all()
assert (x.cpu() == x).all()
def test_local_tensor_binary_methods(self):
"""Unit tests for methods mentioned on issue 1385
https://github.com/OpenMined/PySyft/issues/1385."""
x = torch.FloatTensor([1, 2, 3, 4])
y = torch.FloatTensor([[1, 2, 3, 4]])
z = torch.matmul(x, y.t())
assert torch.equal(z, torch.FloatTensor([30]))
z = torch.add(x, y)
assert torch.equal(z, torch.FloatTensor([[2, 4, 6, 8]]))
x = torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])
y = torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])
z = torch.cross(x, y, dim=1)
assert torch.equal(z, torch.FloatTensor([[0, 0, 0], [0, 0, 0], [0, 0, 0]]))
x = torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])
y = torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])
z = torch.dist(x, y)
assert torch.equal(torch.FloatTensor([z]), torch.FloatTensor([0]))
x = torch.FloatTensor([1, 2, 3])
y = torch.FloatTensor([1, 2, 3])
z = torch.dot(x, y)
# There is an issue with some Macs getting 0.0 instead
# Solved here: https://github.com/pytorch/pytorch/issues/5609
assert torch.equal(torch.FloatTensor([z]), torch.FloatTensor([14])), (
"There is an issue with some Macs getting 0.0 instead, "
"see https://github.com/pytorch/pytorch/issues/5609"
)
z = torch.eq(x, y)
assert torch.equal(z, torch.ByteTensor([1, 1, 1]))
z = torch.ge(x, y)
assert torch.equal(z, torch.ByteTensor([1, 1, 1]))
x = torch.FloatTensor([1, 2, 3, 4, 5])
y = torch.FloatTensor([1, 2, 3, 4, 5])
assert (x.add_(y) == torch.FloatTensor([2, 4, 6, 8, 10])).all()
def test_remote_tensor_unary_methods(self):
"""Unit tests for methods mentioned on issue 1385
https://github.com/OpenMined/PySyft/issues/1385."""
x = torch.FloatTensor([1, 2, -3, 4, 5]).send(bob)
assert (x.abs().get() == torch.FloatTensor([1, 2, 3, 4, 5])).all()
x = torch.FloatTensor([1, 2, -3, 4, 5]).send(bob)
assert (x.cos().int().get() == torch.IntTensor([0, 0, 0, 0, 0])).all()
y = x.cos_()
assert (y.cos_().int().get() == torch.IntTensor([0, 0, 0, 0, 0])).all()
x = torch.FloatTensor([1, 2, -3, 4, 5]).send(bob)
assert (x.ceil().get() == torch.FloatTensor([1, 2, -3, 4, 5])).all()
assert (x.cpu().get() == torch.FloatTensor([1, 2, -3, 4, 5])).all()
def test_remote_tensor_binary_methods(self):
x = torch.FloatTensor([1, 2, 3, 4, 5]).send(bob)
y = torch.FloatTensor([1, 2, 3, 4, 5]).send(bob)
assert (torch.add(x, y).get() == torch.FloatTensor([2, 4, 6, 8, 10])).all()
x = torch.FloatTensor([1, 2, 3, 4]).send(bob)
y = torch.FloatTensor([[1], [2], [3], [4]]).send(bob)
z = torch.matmul(x, y)
assert torch.equal(z.get(), torch.FloatTensor([30]))
x = torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]]).send(bob)
y = torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]]).send(bob)
z = torch.cross(x, y, dim=1)
assert torch.equal(
z.get(), torch.FloatTensor([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
)
x = torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]]).send(bob)
y = torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]]).send(bob)
z = torch.dist(x, y)
z.get()
assert torch.equal(z, torch.FloatTensor([0.0]))
x = torch.FloatTensor([1, 2, 3]).send(bob).send(alice)
y = torch.FloatTensor([1, 2, 3]).send(bob).send(alice)
z = torch.dot(x, y)
z.get().get()
assert torch.equal(z, torch.FloatTensor([14]))
z = torch.eq(x, y)
assert torch.equal(z.get().get(), torch.ByteTensor([1, 1, 1]))
z = torch.ge(x, y)
assert torch.equal(z.get().get(), torch.ByteTensor([1, 1, 1]))
def test_local_tensor_tertiary_methods(self):
x = torch.FloatTensor([1, 2, 3])
y = torch.FloatTensor([1, 2, 3])
z = torch.FloatTensor([1, 2, 3])
assert torch.equal(
torch.addcmul(z, 2, x, y), torch.FloatTensor([3.0, 10.0, 21.0])
)
x = torch.FloatTensor([1, 2, 3])
y = torch.FloatTensor([1, 2, 3])
z = torch.FloatTensor([1, 2, 3])
z.addcmul_(2, x, y)
assert torch.equal(z, torch.FloatTensor([3.0, 10.0, 21.0]))
x = torch.FloatTensor([[1, 2]])
y = torch.FloatTensor([[1, 2, 3], [4, 5, 6]])
z = torch.FloatTensor([1, 2, 3])
assert torch.equal(
torch.addmm(z, x, y), torch.FloatTensor([[10.0, 14.0, 18.0]])
)
def test_remote_tensor_tertiary_methods(self):
x = torch.FloatTensor([1, 2, 3]).send(bob)
y = torch.FloatTensor([1, 2, 3]).send(bob)
z = torch.FloatTensor([1, 2, 3]).send(bob)
assert torch.equal(
torch.addcmul(z, 2, x, y).get(), torch.FloatTensor([3.0, 10.0, 21.0])
)
# Uses a method
x = torch.FloatTensor([1, 2, 3]).send(bob)
y = torch.FloatTensor([1, 2, 3]).send(bob)
z = torch.FloatTensor([1, 2, 3]).send(bob)
z.addcmul_(2, x, y)
assert torch.equal(z.get(), torch.FloatTensor([3.0, 10.0, 21.0]))
x = torch.FloatTensor([[1, 2]]).send(bob)
y = torch.FloatTensor([[1, 2, 3], [4, 5, 6]]).send(bob)
z = torch.FloatTensor([1, 2, 3]).send(bob)
assert torch.equal(
torch.addmm(z, x, y).get(), torch.FloatTensor([[10.0, 14.0, 18.0]])
)
def test_local_tensor_iterable_methods(self):
x = torch.FloatTensor([1, 2, 3])
y = torch.FloatTensor([2, 3, 4])
z = torch.FloatTensor([5, 6, 7])
assert torch.equal(
torch.stack([x, y, z]), torch.FloatTensor([[1, 2, 3], [2, 3, 4], [5, 6, 7]])
)
x = torch.FloatTensor([1, 2, 3])
y = torch.FloatTensor([2, 3, 4])
z = torch.FloatTensor([5, 6, 7])
assert torch.equal(
torch.cat([x, y, z]), torch.FloatTensor([1, 2, 3, 2, 3, 4, 5, 6, 7])
)
def test_remote_tensor_iterable_methods(self):
x = torch.FloatTensor([1, 2, 3]).send(bob)
y = torch.FloatTensor([2, 3, 4]).send(bob)
z = torch.FloatTensor([5, 6, 7]).send(bob)
x.get()
y.get()
z.get()
assert torch.equal(
torch.stack([x, y, z]), torch.FloatTensor([[1, 2, 3], [2, 3, 4], [5, 6, 7]])
)
x = torch.FloatTensor([1, 2, 3]).send(bob)
y = torch.FloatTensor([2, 3, 4]).send(bob)
z = torch.FloatTensor([5, 6, 7]).send(bob)
x.get()
y.get()
z.get()
assert torch.equal(
torch.cat([x, y, z]), torch.FloatTensor([1, 2, 3, 2, 3, 4, 5, 6, 7])
)
def test_remote_tensor_unwrapped_addition(self):
x = torch.LongTensor([1, 2, 3, 4, 5]).send(bob)
y = x.child + x.child
assert (y.get() == x.get() * 2).all()
def test_end_get_tensor(self):
bob_id = random.randint(0, 10e10)
alice_id = random.randint(0, 10e10)
x = (
sy.FloatTensor([1, 2, 3, 4, 5])
.send(bob, ptr_id=bob_id)
.send(alice, ptr_id=alice_id)
)
x2 = x.end_get()
# Now alice will own the tensor that was in bob and bob won't have it anymore
assert bob_id not in bob._objects
assert alice_id in alice._objects
assert isinstance(alice._objects[alice_id], sy._LocalTensor)
assert torch.equal(x2.get(), torch.FloatTensor([1, 2, 3, 4, 5]))
class TestTorchVariable(TestCase):
def test_remote_backprop(self):
x = sy.Variable(torch.ones(2, 2), requires_grad=True).send(bob)
x2 = sy.Variable(torch.ones(2, 2) * 2, requires_grad=True).send(bob)
y = x * x2
y.sum().backward()
# remote grads should be correct
assert (
bob._objects[x2.child.id_at_location].child.grad.data == torch.ones(2, 2)
).all()
# You can call .grad on a syft tensor, which make .child and .grad commutative
assert (
bob._objects[x2.child.id_at_location].grad.child.data == torch.ones(2, 2)
).all()
assert (
bob._objects[x.child.id_at_location].child.grad.data == torch.ones(2, 2) * 2
).all()
assert (y.get().data == torch.ones(2, 2) * 2).all()
assert (x.get().data == torch.ones(2, 2)).all()
assert (x2.get().data == torch.ones(2, 2) * 2).all()
assert (x.grad.data == torch.ones(2, 2) * 2).all()
assert (x2.grad.data == torch.ones(2, 2)).all()
def test_variable_data_attribute_bug(self):
# previously, newly created Variable objects would lose their OpenMined given
# attributes on the .data python objects they contain whenever the Variable
# object is returned from a function. This bug was fixed by storing a bbackup
# pointer to the .data object (.data_backup) so that the python object doesn't
# get garbage collected. This test used to error out at the last line (as
# indcated below)
def relu(x):
"""Rectified linear activation."""
return torch.clamp(x, min=0.0)
def linear(x, w):
"""Linear transformation of x by w."""
return x.mm(w)
x = Var(torch.FloatTensor([[1, 1], [2, 2]]), requires_grad=True)
y = Var(torch.FloatTensor([[1, 1], [2, 2]]), requires_grad=True)
z = linear(x, y)
# previously we had to do the following to prevent this bug
# leaving it here for reference in case the bug returns later.
# print(z.data.is_pointer)
# before the bugfix, the following line would error out.
z = relu(z)
assert True
def test_encode_decode_json_python(self):
"""Test that the python objects are correctly encoded and decoded in
json with our encoder/JSONDecoder.
The main focus is on non-serializable objects, such as torch
Variable or tuple, or even slice().
"""
x = Var(torch.FloatTensor([[1, -1], [0, 1]]))
x.send(bob)
obj = [None, ({"marcel": (1, [1.3], x), "proust": slice(0, 2, None)}, 3)]
enc, t = encode.encode(obj)
enc = msgpack.packb(enc, use_bin_type=True)
dec1 = encode.decode(enc, me)
enc, t = encode.encode(dec1)
enc = msgpack.packb(enc, use_bin_type=True)
dec2 = encode.decode(enc, me)
assert dec1 == dec2
def test_var_gradient_keeps_id_during_send_(self):
# PyTorch has a tendency to delete var.grad python objects
# and re-initialize them (resulting in new/random ids)
# we have fixed this bug and recorded how it was fixed
# as well as the creation of this unit test in the following
# video (1:50:00 - 2:00:00) ish
# https://www.twitch.tv/videos/275838386
data = Var(torch.FloatTensor([[0, 0], [0, 1], [1, 0], [1, 1]]))
target = Var(torch.FloatTensor([[0], [0], [1], [1]]))
model = Var(torch.zeros(2, 1), requires_grad=True)
# generates grad objects on model
pred = data.mm(model)
loss = ((pred - target) ** 2).sum()
loss.backward()
# the grad's true id
original_data_id = model.data.id + 0
original_grad_id = model.grad.data.id + 0
model.send(bob)
assert model.data.id == original_data_id
assert model.grad.data.id == original_grad_id
def test_operation_with_variable_and_parameter(self):
x = sy.Parameter(sy.FloatTensor([1]))
y = sy.Variable(sy.FloatTensor([1]))
z = x * y
assert torch.equal(z, sy.Variable(sy.FloatTensor([1])))
def test_send_var_with_gradient(self):
# For now, we assume that var.grad.data does not get allocated
# a pointer because it would not get used.
data = Var(torch.FloatTensor([[0, 0], [0, 1], [1, 0], [1, 1]]))
target = Var(torch.FloatTensor([[0], [0], [1], [1]]))
model = Var(torch.zeros(2, 1), requires_grad=True)
# generates grad objects on model
pred = data.mm(model)
loss = ((pred - target) ** 2).sum()
loss.backward()
# ensure that model and all (grand)children are owned by the local worker
assert model.owner.id == me.id
assert model.data.owner.id == me.id
# if you get a failure here saying that model.grad.owners does not exist
# check in hooks.py - _hook_new_grad(). self.grad_backup has probably either
# been deleted or is being run at the wrong time (see comments there)
assert model.grad.owner.id == me.id
assert model.grad.data.owner.id == me.id
# ensure that objects are not yet pointers (haven't sent it yet)
assert not isinstance(model.child, sy._PointerTensor)
assert not isinstance(model.data.child, sy._PointerTensor)
assert not isinstance(model.grad.child, sy._PointerTensor)
assert not isinstance(model.grad.data.child, sy._PointerTensor)
model.send(bob)
assert model.location.id == bob.id
assert model.data.location.id == bob.id
assert model.grad.location.id == bob.id
assert model.grad.data.location.id == bob.id
# ensure that objects are not yet pointers (haven't sent it yet)
assert isinstance(model.child, sy._PointerTensor)
assert isinstance(model.data.child, sy._PointerTensor)
assert isinstance(model.grad.child, sy._PointerTensor)
assert isinstance(model.grad.data.child, sy._PointerTensor)
assert model.id_at_location in bob._objects
assert model.data.id_at_location in bob._objects
assert model.grad.id_at_location in bob._objects
assert model.grad.data.id_at_location in bob._objects
def test_remote_optim_step(self):
torch.manual_seed(42)
param = []
data = Var(torch.FloatTensor([[0, 0], [0, 1], [1, 0], [1, 1]])).send(bob)
target = Var(torch.FloatTensor([[0], [0], [1], [1]])).send(bob)
model = torch.nn.Linear(2, 1)
opt = torch.optim.SGD(params=model.parameters(), lr=0.1)
for i in model.parameters():
param.append(i[:])
model.send(bob)
model.zero_grad()
pred = model(data)
loss = ((pred - target) ** 2).sum()
loss.backward()
opt.step()
model.get()
for i in model.parameters():
param.append(i[:])
x = []
for i in param:
if type(i.data[0]) != float:
x.append(i.data[0][0])
x.append(i.data[0][1])
else:
x.append(i.data[0])
y = [
0.5406,
0.5869,
-0.165_655_672_550_201_42,
0.6732,
0.5103,
-0.084_136_970_341_205_6,
]
assert (self.assertAlmostEqual(X, Y) for X, Y in zip(x, y))
def test_federated_learning(self):
torch.manual_seed(42)
# hook = TorchHook(verbose=False)
# me = hook.local_worker
# me.verbose = False
#
# bob = VirtualWorker(id=1, hook=hook, verbose=False)
# alice = VirtualWorker(id=2, hook=hook, verbose=False)
# me.add_worker(bob)
# me.add_worker(alice)
# create our dataset
data = Var(torch.FloatTensor([[0, 0], [0, 1], [1, 0], [1, 1]]))
target = Var(torch.FloatTensor([[0], [0], [1], [1]]))
data_bob = (data[0:2] + 0).send(bob)
target_bob = (target[0:2] + 0).send(bob)
data_alice = data[2:].send(alice)
target_alice = target[2:].send(alice)
# create our model
model = torch.nn.Linear(2, 1)
opt = torch.optim.SGD(params=model.parameters(), lr=0.1)
datasets = [(data_bob, target_bob), (data_alice, target_alice)]
for iter in range(2):
for data, target in datasets:
model.send(data.location)
# update the model
model.zero_grad()
pred = model(data)
loss = ((pred - target) ** 2).sum()
loss.backward()
opt.step()
model.get()
if iter == 1:
final_loss = loss.get().data[0]
assert (final_loss - 0.180_852_845_311_164_86) < 0.001
def test_torch_function_on_remote_var(self):
x = sy.Variable(torch.FloatTensor([[1, 2], [3, 4]]))
y = sy.Variable(torch.FloatTensor([[1, 2], [1, 2]]))
x.send(bob)
y.send(bob)
z = torch.matmul(x, y)
z.get()
assert torch.equal(z, sy.Variable(torch.FloatTensor([[3, 6], [7, 14]])))
def test_torch_function_with_multiple_input_on_remote_var(self):
x = sy.Variable(torch.FloatTensor([1, 2]))
y = sy.Variable(torch.FloatTensor([3, 4]))
x.send(bob)
y.send(bob)
z = torch.stack([x, y])
z.get()
assert torch.equal(z, sy.Variable(torch.FloatTensor([[1, 2], [3, 4]])))
def test_torch_function_with_multiple_output_on_remote_var(self):
x = sy.Variable(torch.FloatTensor([[1, 2], [4, 3], [5, 6]]))
x.send(bob)
y, z = torch.max(x, 1)
y.get()
assert torch.equal(y, sy.Variable(torch.FloatTensor([2, 4, 6])))
def test_torch_F_relu_on_remote_var(self):
x = sy.Variable(torch.FloatTensor([[1, -1], [-1, 1]]))
x.send(bob)
x = F.relu(x)
x.get()
assert torch.equal(x, sy.Variable(torch.FloatTensor([[1, 0], [0, 1]])))
def test_torch_F_conv2d_on_remote_var(self):
x = sy.Variable(torch.FloatTensor([[[[1, -1, 2], [-1, 0, 1], [1, 0, -2]]]]))
x.send(bob)
weight = torch.nn.Parameter(torch.FloatTensor([[[[1, -1], [-1, 1]]]]))
bias = torch.nn.Parameter(torch.FloatTensor([0]))
weight.send(bob)
bias.send(bob)
conv = F.conv2d(x, weight, bias, stride=(1, 1))
conv.get()
expected_conv = sy.Variable(torch.FloatTensor([[[[3, -2], [-2, -3]]]]))
assert torch.equal(conv, expected_conv)
def test_torch_nn_conv2d_on_remote_var(self):
x = sy.Variable(torch.FloatTensor([[[[1, -1, 2], [-1, 0, 1], [1, 0, -2]]]]))
x.send(bob)
convolute = torch.nn.Conv2d(1, 1, 2, stride=1, padding=0)
convolute.weight = torch.nn.Parameter(torch.FloatTensor([[[[1, -1], [-1, 1]]]]))
convolute.bias = torch.nn.Parameter(torch.FloatTensor([0]))
convolute.send(bob)
conv = convolute(x)
conv.get()
expected_conv = sy.Variable(torch.FloatTensor([[[[3, -2], [-2, -3]]]]))
assert torch.equal(conv, expected_conv)
def test_local_var_unary_methods(self):
"""Unit tests for methods mentioned on issue 1385
https://github.com/OpenMined/PySyft/issues/1385."""
x = sy.Variable(torch.FloatTensor([1, 2, -3, 4, 5]))
assert torch.equal(x.abs(), sy.Variable(torch.FloatTensor([1, 2, 3, 4, 5])))
assert torch.equal(x.abs_(), sy.Variable(torch.FloatTensor([1, 2, 3, 4, 5])))
x = sy.Variable(torch.FloatTensor([1, 2, -3, 4, 5]))
assert torch.equal(x.cos().int(), sy.Variable(torch.IntTensor([0, 0, 0, 0, 0])))
x = sy.Variable(torch.FloatTensor([1, 2, -3, 4, 5]))
assert torch.equal(
x.cos_().int(), sy.Variable(torch.IntTensor([0, 0, 0, 0, 0]))
)
x = sy.Variable(torch.FloatTensor([1, 2, -3, 4, 5]))
assert torch.equal(x.ceil(), x)
assert torch.equal(x.ceil_(), x)
assert torch.equal(x.cpu(), x)
def test_local_var_binary_methods(self):
"""Unit tests for methods mentioned on issue 1385
https://github.com/OpenMined/PySyft/issues/1385."""
x = torch.FloatTensor([1, 2, 3, 4])
y = torch.FloatTensor([[1, 2, 3, 4]])
z = torch.matmul(x, y.t())
assert torch.equal(z, torch.FloatTensor([30]))
z = torch.add(x, y)
assert torch.equal(z, torch.FloatTensor([[2, 4, 6, 8]]))
x = sy.Variable(torch.FloatTensor([1, 2, 3, 4, 5]))
y = sy.Variable(torch.FloatTensor([1, 2, 3, 4, 5]))
assert torch.equal(x.add_(y), sy.Variable(torch.FloatTensor([2, 4, 6, 8, 10])))
x = torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])
y = torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])
z = torch.cross(x, y, dim=1)
assert torch.equal(z, torch.FloatTensor([[0, 0, 0], [0, 0, 0], [0, 0, 0]]))
x = torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])
y = torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])
z = torch.dist(x, y)
t = torch.FloatTensor([z])
assert torch.equal(t, torch.FloatTensor([0.0]))
x = torch.FloatTensor([1, 2, 3])
y = torch.FloatTensor([1, 2, 3])
z = torch.dot(x, y)
t = torch.FloatTensor([z])
assert torch.equal(t, torch.FloatTensor([14]))
z = torch.eq(x, y)
assert torch.equal(z, torch.ByteTensor([1, 1, 1]))
z = torch.ge(x, y)
assert torch.equal(z, torch.ByteTensor([1, 1, 1]))
def test_remote_var_unary_methods(self):
"""Unit tests for methods mentioned on issue 1385
https://github.com/OpenMined/PySyft/issues/1385."""
x = sy.Variable(torch.FloatTensor([1, 2, -3, 4, 5])).send(bob)
assert torch.equal(
x.abs().get(), sy.Variable(torch.FloatTensor([1, 2, 3, 4, 5]))
)
assert torch.equal(
x.abs_().get(), sy.Variable(torch.FloatTensor([1, 2, 3, 4, 5]))
)
x = sy.Variable(torch.FloatTensor([1, 2, -3, 4, 5])).send(bob)
assert torch.equal(
x.cos().int().get(), sy.Variable(torch.IntTensor([0, 0, 0, 0, 0]))
)
assert torch.equal(
x.cos_().int().get(), sy.Variable(torch.IntTensor([0, 0, 0, 0, 0]))
)
x = sy.Variable(torch.FloatTensor([1, 2, -3, 4, 5])).send(bob)
assert torch.equal(
x.ceil().get(), sy.Variable(torch.FloatTensor([1, 2, -3, 4, 5]))
)
assert torch.equal(
x.ceil_().get(), sy.Variable(torch.FloatTensor([1, 2, -3, 4, 5]))
)
x = sy.Variable(torch.FloatTensor([1, 2, -3, 4, 5])).send(bob)
assert torch.equal(
x.cpu().get(), sy.Variable(torch.FloatTensor([1, 2, -3, 4, 5]))
)
def test_remote_var_binary_methods(self):
"""Unit tests for methods mentioned on issue 1385
https://github.com/OpenMined/PySyft/issues/1385."""
x = sy.Variable(torch.FloatTensor([1, 2, 3, 4])).send(bob)
y = sy.Variable(torch.FloatTensor([[1, 2, 3, 4]])).send(bob)
z = torch.matmul(x, y.t())
assert torch.equal(z.get(), sy.Variable(torch.FloatTensor([30])))
z = torch.add(x, y)
assert torch.equal(z.get(), sy.Variable(torch.FloatTensor([[2, 4, 6, 8]])))
x = sy.Variable(torch.FloatTensor([1, 2, 3, 4, 5])).send(bob)
y = sy.Variable(torch.FloatTensor([1, 2, 3, 4, 5])).send(bob)
assert torch.equal(
x.add_(y).get(), sy.Variable(torch.FloatTensor([2, 4, 6, 8, 10]))
)
x = sy.Variable(torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])).send(bob)
y = sy.Variable(torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])).send(bob)
z = torch.cross(x, y, dim=1)
assert torch.equal(
z.get(), sy.Variable(torch.FloatTensor([[0, 0, 0], [0, 0, 0], [0, 0, 0]]))
)
x = sy.Variable(torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])).send(bob)
y = sy.Variable(torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])).send(bob)
z = torch.dist(x, y)
assert torch.equal(z.get(), sy.Variable(torch.FloatTensor([0.0])))
x = sy.Variable(torch.FloatTensor([1, 2, 3])).send(bob)
y = sy.Variable(torch.FloatTensor([1, 2, 3])).send(bob)
z = torch.dot(x, y)
assert torch.equal(z.get(), sy.Variable(torch.FloatTensor([14])))
z = torch.eq(x, y)
assert torch.equal(z.get(), sy.Variable(torch.ByteTensor([1, 1, 1])))
z = torch.ge(x, y)
assert torch.equal(z.get(), sy.Variable(torch.ByteTensor([1, 1, 1])))
class TestSNNTensor(TestCase):
def test_mpc_relu(self):
a = (torch.LongTensor([-1, 3, -5, 7])).share(alice, bob)
b = a.relu()
assert (b.get() == torch.LongTensor([0, 3, 0, 7])).all()
def test_mpc_argmax(self):
x = (
(torch.FloatTensor([[0.1, 0.2, 0.4, 0.3], [0.9, 0, 0, 0.1]]))
.fix_precision()
.share(alice, bob)
)
out = x.argmax()
assert (
out.get().decode() == torch.FloatTensor([[0, 0, 1, 0], [1, 0, 0, 0]])
).all()
def test_mpc_train(self):
# create our dataset
data = sy.FloatTensor([[0, 0], [0, 1], [1, 0], [1, 1]])
target = sy.FloatTensor([[0], [0], [1], [1]])
model = sy.zeros(2, 1)
data = data.fix_precision().share(alice, bob)
target = target.fix_precision().share(alice, bob)
model = model.fix_precision().share(alice, bob)
for i in range(10):
pred = data.mm(model)
grad = pred - target
update = data.transpose(0, 1).mm(grad)
model = model - update * 0.1
loss = grad.get().decode().abs().sum()
assert loss < 0.8
def test_mpc_scalar_mult(self):
data = torch.FloatTensor([1, 2, 3]).fix_precision().share(alice, bob)
assert ((data * 0.1).get().decode() == torch.FloatTensor([0.1, 0.2, 0.3])).all()
assert (
(data * -0.1).get().decode() == torch.FloatTensor([-0.1, -0.2, -0.3])
).all()
assert ((data * 1.1).get().decode() == torch.FloatTensor([1.1, 2.2, 3.3])).all()
class TestSPDZTensor(TestCase):
def mpc_sum(self, n1, n2):
x = torch.LongTensor([n1])
y = torch.LongTensor([n2])
x = x.share(alice, bob)
y = y.share(alice, bob)
z = x + y
z = z.get()
assert torch.eq(z, torch.LongTensor([n1 + n2])).all()
def mpc_var_sum(self, n1, n2):
x = sy.Variable(torch.LongTensor([n1]))
y = sy.Variable(torch.LongTensor([n2]))
x = x.share(alice, bob)
y = y.share(alice, bob)
z = x + y
z = z.get()
z_ = sy.Variable(torch.LongTensor([n1 + n2]))
assert torch.native_eq(z, z_).all()
def test_mpc_sum(self):
self.mpc_sum(3, 5)
self.mpc_sum(4, 0)
self.mpc_sum(5, -5)
self.mpc_sum(3, -5)
self.mpc_sum(2 ** 24, 2 ** 12)
def test_mpc_var_sum(self):
self.mpc_var_sum(3, 5)
self.mpc_var_sum(4, 0)
self.mpc_var_sum(5, -5)
self.mpc_var_sum(3, -5)
self.mpc_var_sum(2 ** 24, 2 ** 12)
def mpc_mul(self, n1, n2):
x = torch.LongTensor([n1])
y = torch.LongTensor([n2])
x = x.share(alice, bob)
y = y.share(alice, bob)
z = x * y
z = z.get()
assert torch.eq(z, torch.LongTensor([n1 * n2])).all(), (
z,
"should be",
torch.LongTensor([n1 * n2]),
)
def mpc_var_mul(self, n1, n2):
x = sy.Variable(torch.LongTensor([n1]))
y = sy.Variable(torch.LongTensor([n2]))
x = x.share(alice, bob)
y = y.share(alice, bob)
z = x * y
z = z.get()
z_ = sy.Variable(torch.LongTensor([n1 * n2]))
assert torch.native_eq(z, z_).all()
def test_mpc_mul(self):
self.mpc_mul(3, 5)
self.mpc_mul(4, 0)
self.mpc_mul(5, -5)
self.mpc_mul(3, 5)
self.mpc_mul(2 ** 12, 2 ** 12)
def test_mpc_var_mul(self):
self.mpc_var_mul(3, 5)
self.mpc_var_mul(4, 0)
self.mpc_var_mul(5, -5)
self.mpc_var_mul(3, 5)
self.mpc_var_mul(2 ** 12, 2 ** 12)
def test_mpc_scalar_mult(self):
x = torch.LongTensor([[-1, 2], [3, 4]])
x = x.share(bob, alice)
y = torch.LongTensor([[2, 2], [2, 2]]).send(bob, alice)
z = x * y
assert (z.get() == torch.LongTensor([[-2, 4], [6, 8]])).all()
x = torch.LongTensor([[-1, 2], [3, 4]])
x = x.share(bob, alice)
z = x * 2
assert (z.get() == torch.LongTensor([[-2, 4], [6, 8]])).all()
def test_spdz_matmul(self):
x = torch.LongTensor([[1, 2], [3, 4]])
y = torch.LongTensor([[5, 6], [7, 8]])
x = x.share(bob, alice)
y = y.share(bob, alice)
assert (x.mm(y).get() - torch.LongTensor([[18, 22], [43, 49]])).abs().sum() < 5
x = torch.LongTensor([[1, -2], [3, -4]])
y = torch.LongTensor([[5, 6], [7, 8]])
target = x.mm(y)
x = x.share(bob, alice)
y = y.share(bob, alice)
result = x.mm(y)
assert (result.get() - target).abs().sum() < 5
def test_spdz_negation_and_subtraction(self):
x = torch.LongTensor([[1, 2], [-3, -4]])
x = x.share(bob, alice)
z = -x
assert (z.get() == torch.LongTensor([[-1, -2], [3, 4]])).all()
x = torch.LongTensor([[1, -2], [-3, -4]])
y = torch.LongTensor([[5, 6], [7, 8]])
x = x.share(bob, alice)
y = y.share(bob, alice)
z = x - y
assert (z.get() == torch.LongTensor([[-4, -8], [-10, -12]])).all()
def test_spdz_mul_3_workers(self):
n1, n2 = (3, -5)
x = torch.LongTensor([n1])
y = torch.LongTensor([n2])
x = x.share(alice, bob, james)
y = y.share(alice, bob, james)
z = x * y
z = z.get()
assert (z == torch.LongTensor([n1 * n2])).all(), (
z,
"should be",
torch.LongTensor([n1 * n2]),
)
def test_share(self):
x = torch.LongTensor([-3])
spdz_x = x.share(alice, bob, james)
assert len(spdz_x.child.shares.child.pointer_tensor_dict.keys()) == 3
spdz_x.get()
assert sy.eq(spdz_x, sy.LongTensor([-3])).all()
def test_fix_precision_decode(self):
x = torch.FloatTensor([0.1, 0.2, 0.1, 0.2])
x = x.fix_precision()
assert (
torch_utils.chain_print(x, display=False) == display_chain.tensor.fixp_local
)
x = x.decode()
assert torch_utils.chain_print(x, display=False) == display_chain.tensor.local
x = x.fix_precision()
z = x + x
z = z.decode()
assert torch.eq(z, torch.FloatTensor([0.2, 0.4, 0.2, 0.4])).all()
z = x + x
z.decode_()
assert torch.eq(z, torch.FloatTensor([0.2, 0.4, 0.2, 0.4])).all()
x = x.decode()
x = x.fix_precision()
assert (
torch_utils.chain_print(x, display=False) == display_chain.tensor.fixp_local
)
x.decode_()
assert torch_utils.chain_print(x, display=False) == display_chain.tensor.local
def test_fix_precision_mul(self):
x = torch.FloatTensor([1, 2, 0.4])
y = torch.FloatTensor([1, 1, 2])
x = x.fix_precision(precision_fractional=3)
y = y.fix_precision(precision_fractional=3)
z = x * y
z = z.decode()
assert torch.eq(z, torch.FloatTensor([1, 2, 0.8])).all()
# with different precision fractions x's > y's
x = torch.FloatTensor([1, 2, 0.4])
y = torch.FloatTensor([1, 1, 2])
x = x.fix_precision(precision_fractional=3)
y = y.fix_precision(precision_fractional=4)
z = x * y
z = z.decode()
assert torch.eq(z, torch.FloatTensor([1, 2, 0.8])).all()
# with different precision fractions x's < y's
x = torch.FloatTensor([1, 2, 0.4])
y = torch.FloatTensor([1, 1, 2])
x = x.fix_precision(precision_fractional=3)
y = y.fix_precision(precision_fractional=2)
z = x * y
z = z.decode()
assert torch.eq(z, torch.FloatTensor([1, 2, 0.8])).all()
def test_fix_precision_add(self):
x = torch.FloatTensor([[1, 0.2], [0.9, 11]])
y = torch.FloatTensor([[0.8, 1], [1, 3]])
x = x.fix_precision()
y = y.fix_precision()
z = x + y
z = z.decode()
assert torch.eq(z, torch.FloatTensor([[1.8, 1.2], [1.9, 14]])).all()
# with different precision fractions x's > y's
x = torch.FloatTensor([[1, 0.2], [0.9, 11]])
y = torch.FloatTensor([[0.8, 1], [1, 3]])
x = x.fix_precision(precision_fractional=4)
y = y.fix_precision(precision_fractional=3)
z = x + y
z = z.decode()
assert torch.eq(z, torch.FloatTensor([[1.8, 1.2], [1.9, 14]])).all()
# with different precision fractions x's < y's
x = torch.FloatTensor([[1, 0.2], [0.9, 11]])
y = torch.FloatTensor([[0.8, 1], [1, 3]])
x = x.fix_precision(precision_fractional=3)
y = y.fix_precision(precision_fractional=4)
z = x + y
z = z.decode()
assert torch.eq(z, torch.FloatTensor([[1.8, 1.2], [1.9, 14]])).all()
def test_fix_precision_sub(self):
x = torch.FloatTensor([[1, 1.2], [1.9, 11]])
y = torch.FloatTensor([[0.8, 1], [1, 3]])
x = x.fix_precision()
y = y.fix_precision()
z = x - y
z = z.decode()
assert torch.eq(z, torch.FloatTensor([[0.2, 0.2], [0.9, 8]])).all()
# with different precision fractions x's > y's
x = torch.FloatTensor([[1, 1.2], [1.9, 11]])
y = torch.FloatTensor([[0.8, 1], [1, 3]])
x = x.fix_precision(precision_fractional=4)
y = y.fix_precision(precision_fractional=3)
z = x - y
z = z.decode()
assert torch.eq(z, torch.FloatTensor([[0.2, 0.2], [0.9, 8]])).all()
# with different precision fractions x's < y's
x = torch.FloatTensor([[1, 1.2], [1.9, 11]])
y = torch.FloatTensor([[0.8, 1], [1, 3]])
x = x.fix_precision(precision_fractional=3)
y = y.fix_precision(precision_fractional=4)
z = x - y
z = z.decode()
assert torch.eq(z, torch.FloatTensor([[0.2, 0.2], [0.9, 8]])).all()
def test_fix_precision_div(self):
x = torch.FloatTensor([[1, 1.2], [1.9, 12]])
y = torch.FloatTensor([[0.8, 0.4], [1, 3]])
x = x.fix_precision()
y = y.fix_precision()
z = x / y
z = z.decode()
assert torch.eq(z, torch.FloatTensor([[1.2500, 3], [1.9, 4]])).all()
# with different precision fractions x's > y's
x = torch.FloatTensor([[1, 1.2], [1.9, 12]])
y = torch.FloatTensor([[0.8, 0.4], [1, 3]])
x = x.fix_precision(precision_fractional=4)
y = y.fix_precision(precision_fractional=3)
z = x / y
z = z.decode()
assert torch.eq(z, torch.FloatTensor([[1.2000, 3], [1.9, 4]])).all()
# with different precision fractions x's < y's
x = torch.FloatTensor([[1, 1.2], [1.9, 12]])
y = torch.FloatTensor([[0.8, 0.4], [1, 3]])
x = x.fix_precision(precision_fractional=3)
y = y.fix_precision(precision_fractional=4)
z = x / y
z = z.decode()
assert torch.eq(z, torch.FloatTensor([[1.2500, 3], [1.9, 4]])).all()
def test_fix_precision_sum(self):
x = torch.FloatTensor([[1, 1.2], [1.9, 12]])
x = x.fix_precision(precision_fractional=4)
z = x.sum(0)
z = z.decode()
assert torch.eq(z, torch.FloatTensor([2, 13])).all()
def test_fix_precision_cumsum(self):
x = torch.FloatTensor([[1, 1.2], [1.9, 12]])
x = x.fix_precision(precision_fractional=4)
z = x.cumsum(0)
z = z.decode()
assert torch.eq(z, torch.FloatTensor([[1, 1], [2, 13]])).all()
def test_fix_precision_prod(self):
x = torch.FloatTensor([[1, 1.2], [1.9, 12]])
x = x.fix_precision(precision_fractional=4)
z = x.prod(0)
z = z.decode()
assert torch.eq(z, torch.FloatTensor([1, 14])).all()
def test_var_fix_precision_decode(self):
x = sy.Variable(torch.FloatTensor([0.1, 0.2, 0.1, 0.2]))
x = x.fix_precision()
assert torch_utils.chain_print(x, display=False) == display_chain.var.fixp_local
x = x.decode()
assert torch_utils.chain_print(x, display=False) == display_chain.var.local
x = x.fix_precision()
z = x + x
z = z.decode()
assert torch.eq(z, sy.Variable(torch.FloatTensor([0.2, 0.4, 0.2, 0.4]))).all()
z = x + x
z.decode_()
assert torch.eq(z, sy.Variable(torch.FloatTensor([0.2, 0.4, 0.2, 0.4]))).all()
x = x.decode()
x = x.fix_precision()
assert torch_utils.chain_print(x, display=False) == display_chain.var.fixp_local
x.decode_()
assert torch_utils.chain_print(x, display=False) == display_chain.var.local
def test_remote_fix_precision(self):
x = torch.FloatTensor([0.1, 0.2, 0.1, 0.2])
x = x.send(bob).fix_precision()
assert torch_utils.chain_print(x, display=False) == display_chain.tensor.pointer
x_ = bob.get_obj(x.id_at_location).parent
assert (
torch_utils.chain_print(x_, display=False)
== display_chain.tensor.fixp_local
)
z = x + x
z.get().decode_()
torch.eq(z, torch.FloatTensor([0.2, 0.4, 0.2, 0.4])).all()
x = x.get()
assert (
torch_utils.chain_print(x, display=False) == display_chain.tensor.fixp_local
)
x = x.decode()
assert torch_utils.chain_print(x, display=False) == display_chain.tensor.local
def test_var_remote_fix_precision(self):
x = sy.Variable(torch.FloatTensor([0.1, 0.2, 0.1, 0.2]))
x = x.send(bob).fix_precision()
assert torch_utils.chain_print(x, display=False) == display_chain.var.pointer
x_ = bob.get_obj(x.id_at_location).parent
assert (
torch_utils.chain_print(x_, display=False) == display_chain.var.fixp_local
)
z = x + x
z.get().decode_()
torch.eq(z, sy.Variable(torch.FloatTensor([0.2, 0.4, 0.2, 0.4]))).all()
x = x.get()
assert torch_utils.chain_print(x, display=False) == display_chain.var.fixp_local
x = x.decode()
assert torch_utils.chain_print(x, display=False) == display_chain.var.local
def test_fix_precision_share(self):
x = torch.FloatTensor([1.1, 2, 3])
x = x.fix_precision().share(alice, bob)
assert (
torch_utils.chain_print(x, display=False)
== display_chain.tensor.fixp_mpc_gpt
)
z = x + x
x = x.get()
assert (
torch_utils.chain_print(x, display=False) == display_chain.tensor.fixp_local
)
z = z.get().decode()
assert torch.eq(z, torch.FloatTensor([2.2, 4, 6])).all()
def test_var_fix_precision_share(self):
x = sy.Variable(torch.FloatTensor([1.1, 2, 3]))
x = x.fix_precision().share(alice, bob)
assert (
torch_utils.chain_print(x, display=False) == display_chain.var.fixp_mpc_gpt
)
z = x + x
x = x.get()
assert torch_utils.chain_print(x, display=False) == display_chain.var.fixp_local
z = z.get().decode()
assert torch.eq(z, sy.Variable(torch.FloatTensor([2.2, 4, 6]))).all()
def test_remote_fix_precision_share(self):
x = torch.FloatTensor([1.1, 2, 3])
x = x.send(bob).fix_precision().share(alice, bob)
assert torch_utils.chain_print(x, display=False) == display_chain.tensor.pointer
x_ = bob.get_obj(x.id_at_location).parent
assert (
torch_utils.chain_print(x_, display=False)
== display_chain.tensor.fixp_mpc_gpt
)
z = x + x
x = x.get()
assert (
torch_utils.chain_print(x, display=False)
== display_chain.tensor.fixp_mpc_gpt
)
x = x.get()
assert (
torch_utils.chain_print(x, display=False) == display_chain.tensor.fixp_local
)
x = x.decode()
assert torch_utils.chain_print(x, display=False) == display_chain.tensor.local
z = z.get().get().decode()
assert torch.eq(z, torch.FloatTensor([2.2, 4, 6])).all()
def test_var_remote_fix_precision_share(self):
x = sy.Variable(torch.FloatTensor([1.1, 2, 3]))
x = x.send(bob).fix_precision().share(alice, bob)
assert torch_utils.chain_print(x, display=False) == display_chain.var.pointer
x_ = bob.get_obj(x.id_at_location).parent
assert (
torch_utils.chain_print(x_, display=False) == display_chain.var.fixp_mpc_gpt
)
z = x + x
x = x.get()
assert (
torch_utils.chain_print(x, display=False) == display_chain.var.fixp_mpc_gpt
)
x = x.get()
assert torch_utils.chain_print(x, display=False) == display_chain.var.fixp_local
x = x.decode()
assert torch_utils.chain_print(x, display=False) == display_chain.var.local
z = z.get().get().decode()
assert torch.eq(z, sy.Variable(torch.FloatTensor([2.2, 4, 6]))).all()
def fix_precision_operation(self, l1, l2, var=False, op="plus"):
if var:
x = sy.Variable(torch.FloatTensor(l1))
y = sy.Variable(torch.FloatTensor(l2))
else:
x = torch.FloatTensor(l1)
y = torch.FloatTensor(l2)
x = x.fix_precision()
y = y.fix_precision()
if op == "plus":
z = x + y
l_res = [e1 + e2 for e1, e2 in zip(l1, l2)]
elif op == "mul":
z = x * y
l_res = [e1 * e2 for e1, e2 in zip(l1, l2)]
elif op == "matmul":
z = x.mm(y)
l_res = np.dot(np.array(l1), np.array(l2)).tolist()
else:
raise ArithmeticError("Unknown operator")
z = z.decode()
if var:
assert torch.eq(z, sy.Variable(torch.FloatTensor(l_res))).all()
else:
assert torch.eq(z, torch.FloatTensor(l_res)).all()
def test_addition_fix_precision(self):
self.fix_precision_operation([3.3], [5.1])
self.fix_precision_operation([2.5, 3.2], [5.4, -1.1])
self.fix_precision_operation([-2.8, -3.9], [-1, -1])
self.fix_precision_operation([-2, 3.3], [-1.9, 1])
self.fix_precision_operation([-19000, 3.3], [-1.9, 17654])
def test_var_addition_fix_precision(self):
self.fix_precision_operation([3.3], [5.1], var=True)
self.fix_precision_operation([2.5, 3.2], [5.4, -1.1], var=True)
self.fix_precision_operation([-2.8, -3.9], [-1, -1], var=True)
self.fix_precision_operation([-2, 3.3], [-1.9, 1], var=True)
self.fix_precision_operation([-19000, 3.3], [-1.9, 17654], var=True)
def test_mult_fix_precision(self):
self.fix_precision_operation([3.3], [5.1], op="mul")
self.fix_precision_operation([2.5, 3.2], [5.4, -1.1], op="mul")
self.fix_precision_operation([-2.8, -3.9], [-1, -1], op="mul")
self.fix_precision_operation([-2, 3.3], [-1.9, 1], op="mul")
self.fix_precision_operation([-19000, 3.3], [-1.9, 17654], op="mul")
def test_var_mult_fix_precision(self):
self.fix_precision_operation([3.3], [5.1], var=True, op="mul")
self.fix_precision_operation([2.5, 3.2], [5.4, -1.1], var=True, op="mul")
self.fix_precision_operation([-2.8, -3.9], [-1, -1], var=True, op="mul")
self.fix_precision_operation([-2, 3.3], [-1.9, 1], var=True, op="mul")
self.fix_precision_operation([-19000, 3.3], [-1.9, 17654], var=True, op="mul")
def test_matmul_fix_precision(self):
self.fix_precision_operation(
[[3.3, 2.1], [1.1, 5.2]], [[1, 2], [3, 4]], op="matmul"
)
self.fix_precision_operation(
[[-3.3, -2.1], [1.1, 5.2]], [[1, 2], [3, -4.8]], op="matmul"
)
self.fix_precision_operation(
[[1.1, -2.1], [3.2, 8.1], [3.0, -7]],
[[-3.3, -2.1], [1.1, 5.2]],
op="matmul",
)
self.fix_precision_operation(
[[-40.2, -20.1], [100.7, 51.2]], [[14.1, 21], [30, -41.8]], op="matmul"
)
def test_var_matmul_fix_precision(self):
self.fix_precision_operation(
[[3.3, 2.1], [1.1, 5.2]], [[1, 2], [3, 4]], var=True, op="matmul"
)
self.fix_precision_operation(
[[-3.3, -2.1], [1.1, 5.2]], [[1, 2], [3, -4.8]], var=True, op="matmul"
)
self.fix_precision_operation(
[[1.1, -2.1], [3.2, 8.1], [3.0, -7]],
[[-3.3, -2.1], [1.1, 5.2]],
var=True,
op="matmul",
)
self.fix_precision_operation(
[[-40.2, -20.1], [100.7, 51.2]],
[[14.1, 21], [30, -41.8]],
var=True,
op="matmul",
)
def remote_fix_precision_operation(self, l1, l2, var=False, op="plus"):
if var:
x = sy.Variable(torch.FloatTensor(l1))
y = sy.Variable(torch.FloatTensor(l2))
else:
x = torch.FloatTensor(l1)
y = torch.FloatTensor(l2)
x = x.send(bob).fix_precision()
y = y.send(bob).fix_precision()
if op == "plus":
z = x + y
l_res = [e1 + e2 for e1, e2 in zip(l1, l2)]
elif op == "mul":
z = x * y
l_res = [e1 * e2 for e1, e2 in zip(l1, l2)]
elif op == "matmul":
z = x.mm(y)
l_res = np.dot(np.array(l1), np.array(l2)).tolist()
else:
raise ArithmeticError("Unknown operator")
z = z.get().decode()
if var:
assert torch.eq(z, sy.Variable(torch.FloatTensor(l_res))).all()
else:
assert torch.eq(z, torch.FloatTensor(l_res)).all()
def test_addition_remote_fix_precision(self):
self.remote_fix_precision_operation([3.3], [5.1])
self.remote_fix_precision_operation([2.5, 3.2], [5.4, -1.1])
self.remote_fix_precision_operation([-2.8, -3.9], [-1, -1])
self.remote_fix_precision_operation([-2, 3.3], [-1.9, 1])
self.remote_fix_precision_operation([-19000, 3.3], [-1.9, 17654])
def test_var_addition_remote_fix_precision(self):
self.remote_fix_precision_operation([3.3], [5.1], var=True)
self.remote_fix_precision_operation([2.5, 3.2], [5.4, -1.1], var=True)
self.remote_fix_precision_operation([-2.8, -3.9], [-1, -1], var=True)
self.remote_fix_precision_operation([-2, 3.3], [-1.9, 1], var=True)
self.remote_fix_precision_operation([-19000, 3.3], [-1.9, 17654], var=True)
def test_mult_remote_fix_precision(self):
self.remote_fix_precision_operation([3.3], [5.1], op="mul")
self.remote_fix_precision_operation([2.5, 3.2], [5.4, -1.1], op="mul")
self.remote_fix_precision_operation([-2.8, -3.9], [-1, -1], op="mul")
self.remote_fix_precision_operation([-2, 3.3], [-1.9, 1], op="mul")
self.remote_fix_precision_operation([-19000, 3.3], [-1.9, 17654], op="mul")
def test_var_mult_remote_fix_precision(self):
self.remote_fix_precision_operation([3.3], [5.1], var=True, op="mul")
self.remote_fix_precision_operation([2.5, 3.2], [5.4, -1.1], var=True, op="mul")
self.remote_fix_precision_operation([-2.8, -3.9], [-1, -1], var=True, op="mul")
self.remote_fix_precision_operation([-2, 3.3], [-1.9, 1], var=True, op="mul")
self.remote_fix_precision_operation(
[-19000, 3.3], [-1.9, 17654], var=True, op="mul"
)
def test_matmul_remote_fix_precision(self):
self.remote_fix_precision_operation(
[[3.3, 2.1], [1.1, 5.2]], [[1, 2], [3, 4]], op="matmul"
)
self.remote_fix_precision_operation(
[[-3.3, -2.1], [1.1, 5.2]], [[1, 2], [3, -4.8]], op="matmul"
)
self.remote_fix_precision_operation(
[[1.1, -2.1], [3.2, 8.1], [3.0, -7]],
[[-3.3, -2.1], [1.1, 5.2]],
op="matmul",
)
self.remote_fix_precision_operation(
[[-40.2, -20.1], [100.7, 51.2]], [[14.1, 21], [30, -41.8]], op="matmul"
)
def test_var_matmul_remote_fix_precision(self):
self.remote_fix_precision_operation(
[[3.3, 2.1], [1.1, 5.2]], [[1, 2], [3, 4]], var=True, op="matmul"
)
self.remote_fix_precision_operation(
[[-3.3, -2.1], [1.1, 5.2]], [[1, 2], [3, -4.8]], var=True, op="matmul"
)
self.remote_fix_precision_operation(
[[1.1, -2.1], [3.2, 8.1], [3.0, -7]],
[[-3.3, -2.1], [1.1, 5.2]],
var=True,
op="matmul",
)
self.remote_fix_precision_operation(
[[-40.2, -20.1], [100.7, 51.2]],
[[14.1, 21], [30, -41.8]],
var=True,
op="matmul",
)
def remote_fix_precision_share_operation(self, l1, l2, var=False, op="plus"):
if var:
x = sy.Variable(torch.FloatTensor(l1))
y = sy.Variable(torch.FloatTensor(l2))
else:
x = torch.FloatTensor(l1)
y = torch.FloatTensor(l2)
x = x.send(bob).fix_precision().share(alice, bob)
y = y.send(bob).fix_precision().share(alice, bob)
if op == "plus":
z = x + y
l_res = [e1 + e2 for e1, e2 in zip(l1, l2)]
elif op == "mul":
z = x * y
l_res = [e1 * e2 for e1, e2 in zip(l1, l2)]
elif op == "matmul":
z = x.mm(y)
l_res = np.dot(np.array(l1), np.array(l2)).tolist()
else:
raise ArithmeticError("Unknown operator")
z = z.get().get().decode()
if var:
assert torch.eq(z, sy.Variable(torch.FloatTensor(l_res))).all()
else:
assert torch.eq(z, torch.FloatTensor(l_res)).all()
def test_addition_remote_fix_precision_share(self):
self.remote_fix_precision_share_operation([3.3], [5.1])
self.remote_fix_precision_share_operation([2.5, 3.2], [5.4, -1.1])
self.remote_fix_precision_share_operation([-2.8, -3.9], [-1, -1])
self.remote_fix_precision_share_operation([-2, 3.3], [-1.9, 1])
self.remote_fix_precision_share_operation([-190, 3.3], [-1.9, 174])
def test_var_addition_remote_fix_precision_share(self):
self.remote_fix_precision_share_operation([3.3], [5.1], var=True)
self.remote_fix_precision_share_operation([2.5, 3.2], [5.4, -1.1], var=True)
self.remote_fix_precision_share_operation([-2.8, -3.9], [-1, -1], var=True)
self.remote_fix_precision_share_operation([-2, 3.3], [-1.9, 1], var=True)
self.remote_fix_precision_share_operation([-190, 3.3], [-1.9, 174], var=True)
def test_mult_remote_fix_precision_share(self):
self.remote_fix_precision_share_operation([3.3], [5.1], op="mul")
self.remote_fix_precision_share_operation([2.5, 3.2], [5.4, -1.1], op="mul")
self.remote_fix_precision_share_operation([-2.8, -3.9], [-1, -1], op="mul")
self.remote_fix_precision_share_operation([-2, 3.3], [-1.9, 1], op="mul")
# available precision too small for this at the moment
# self.remote_fix_precision_share_operation([-190, 3.3], [-1.9, 174], op='mul')
def test_var_mult_remote_fix_precision_share(self):
self.remote_fix_precision_share_operation([3.3], [5.1], var=True, op="mul")
self.remote_fix_precision_share_operation(
[2.5, 3.2], [5.4, -1.1], var=True, op="mul"
)
self.remote_fix_precision_share_operation(
[-2.8, -3.9], [-1, -1], var=True, op="mul"
)
self.remote_fix_precision_share_operation(
[-2, 3.3], [-1.9, 1], var=True, op="mul"
)
# available precision too small for this at the moment
# self.remote_fix_precision_share_operation([-190, 3.3], [-1.9, 174], var=True, op='mul')
def test_matmul_remote_fix_precision_share(self):
self.remote_fix_precision_share_operation(
[[3.3, 2.1], [1.1, 5.2]], [[1, 2], [3, 4]], op="matmul"
)
self.remote_fix_precision_share_operation(
[[-3.3, -2.1], [1.1, 5.2]], [[1, 2], [3, -4.8]], op="matmul"
)
self.remote_fix_precision_share_operation(
[[1.1, -2.1], [3.2, 8.1], [3.0, -7]],
[[-3.3, -2.1], [1.1, 5.2]],
op="matmul",
)
# available precision too small for this at the moment
# self.remote_fix_precision_share_operation([[-40.2, -20.1],
# [10.7, 21.2]],
# [[14.1, 21],
# [10, -11.8]], op='matmul')
def test_var_matmul_remote_fix_precision_share(self):
self.remote_fix_precision_share_operation(
[[3.3, 2.1], [1.1, 5.2]], [[1, 2], [3, 4]], var=True, op="matmul"
)
# self.remote_fix_precision_share_operation([[-3.3, -2.1],
# [1.1, 5.2]],
# [[1, 2],
# [3, -4.8]], var=True, op='matmul')
# self.remote_fix_precision_share_operation([[1.1, -2.1],
# [3.2, 8.1],
# [3.0, -7]],
# [[-3.3, -2.1],
# [1.1, 5.2]], var=True, op='matmul')
# available precision too small for this at the moment
# self.remote_fix_precision_share_operation([[-40.2, -20.1],
# [10.7, 21.2]],
# [[14.1, 21],
# [10, -11.8]], op='matmul')
class TestGPCTensor(TestCase):
def test_gpc_add(self):
x = torch.LongTensor([1, 2, 3, 4, 5])
y = torch.LongTensor([1, 2, 3, 4, 5])
x.send(bob)
y.send(alice)
x_pointer_tensor_dict = {alice: y.child, bob: x.child}
x_gp = _GeneralizedPointerTensor(
x_pointer_tensor_dict, torch_type="syft.LongTensor"
).wrap(True)
y = x_gp + x_gp
results = y.get()
assert (results[0] == (x.get() * 2)).all()
def test_gpc_unwrapped_add(self):
x = torch.LongTensor([1, 2, 3, 4, 5])
y = torch.LongTensor([1, 2, 3, 4, 5])
x.send(bob)
y.send(alice)
x_pointer_tensor_dict = {alice: y.child, bob: x.child}
x_gp = _GeneralizedPointerTensor(
x_pointer_tensor_dict, torch_type="syft.LongTensor"
).wrap(True)
y = x_gp.child + x_gp.child
results = y.get()
assert (results[0] == (x.get() * 2)).all()
def test_gpc_workers(self):
x = torch.LongTensor([1, 2, 3, 4, 5])
y = torch.LongTensor([1, 2, 3, 4, 5])
x.send(bob)
y.send(alice)
x_pointer_tensor_dict = {alice: y.child, bob: x.child}
x_gp = _GeneralizedPointerTensor(x_pointer_tensor_dict)
results = x_gp.workers()
assert results == [k.id for k in x_pointer_tensor_dict.keys()]
if __name__ == "__main__":
unittest.main()
| [
"torch.nn.Linear",
"torch.cat",
"torch.stack",
"torch.ones",
"torch.LongTensor",
"torch.IntTensor",
"torch.FloatTensor",
"torch.ByteTensor",
"torch.manual_seed",
"torch.nn.functional.relu",
"torch.ge",
"torch.nn.functional.conv2d",
"torch.equal",
"torch.zeros",
"torch.max",
"torch.clamp",
"torch.nn.Conv2d",
"torch.cross",
"torch.matmul",
"torch.addmm",
"torch.addcmul",
"torch.eq",
"torch.native_eq",
"torch.add",
"torch.dot",
"torch.dist"
] | 0.3.1 | jjmachan/PySyft | 41a525443881bfd94ccb488d7a24765c1778ac05 |
1.7 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii Inc. All rights reserved.
import math
from loguru import logger
import torch
import torch.nn as nn
import torch.nn.functional as F
from yolox.utils import bboxes_iou, meshgrid
from .losses import IOUloss
from .network_blocks import BaseConv, DWConv
class YOLOXHead(nn.Module):
def __init__(
self,
num_classes,
width=1.0,
strides=[8, 16, 32],
in_channels=[256, 512, 1024],
act="silu",
depthwise=False,
):
"""
Args:
act (str): activation type of conv. Defalut value: "silu".
depthwise (bool): whether apply depthwise conv in conv branch. Defalut value: False.
"""
super().__init__()
self.n_anchors = 1
self.num_classes = num_classes
self.decode_in_inference = True # for deploy, set to False
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
self.cls_preds = nn.ModuleList()
self.reg_preds = nn.ModuleList()
self.obj_preds = nn.ModuleList()
self.stems = nn.ModuleList()
Conv = DWConv if depthwise else BaseConv
for i in range(len(in_channels)):
self.stems.append(
BaseConv(
in_channels=int(in_channels[i] * width),
out_channels=int(256 * width),
ksize=1,
stride=1,
act=act,
)
)
self.cls_convs.append(
nn.Sequential(
*[
Conv(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
Conv(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
]
)
)
self.reg_convs.append(
nn.Sequential(
*[
Conv(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
Conv(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
]
)
)
self.cls_preds.append(
nn.Conv2d(
in_channels=int(256 * width),
out_channels=self.n_anchors * self.num_classes,
kernel_size=1,
stride=1,
padding=0,
)
)
self.reg_preds.append(
nn.Conv2d(
in_channels=int(256 * width),
out_channels=4,
kernel_size=1,
stride=1,
padding=0,
)
)
self.obj_preds.append(
nn.Conv2d(
in_channels=int(256 * width),
out_channels=self.n_anchors * 1,
kernel_size=1,
stride=1,
padding=0,
)
)
self.use_l1 = False
self.l1_loss = nn.L1Loss(reduction="none")
self.bcewithlog_loss = nn.BCEWithLogitsLoss(reduction="none")
self.iou_loss = IOUloss(reduction="none")
self.strides = strides
self.grids = [torch.zeros(1)] * len(in_channels)
def initialize_biases(self, prior_prob):
for conv in self.cls_preds:
b = conv.bias.view(self.n_anchors, -1)
b.data.fill_(-math.log((1 - prior_prob) / prior_prob))
conv.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
for conv in self.obj_preds:
b = conv.bias.view(self.n_anchors, -1)
b.data.fill_(-math.log((1 - prior_prob) / prior_prob))
conv.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
def forward(self, xin, labels=None, imgs=None):
outputs = []
origin_preds = []
x_shifts = []
y_shifts = []
expanded_strides = []
for k, (cls_conv, reg_conv, stride_this_level, x) in enumerate(
zip(self.cls_convs, self.reg_convs, self.strides, xin)
):
x = self.stems[k](x)
cls_x = x
reg_x = x
cls_feat = cls_conv(cls_x)
cls_output = self.cls_preds[k](cls_feat)
reg_feat = reg_conv(reg_x)
reg_output = self.reg_preds[k](reg_feat)
obj_output = self.obj_preds[k](reg_feat)
if self.training:
output = torch.cat([reg_output, obj_output, cls_output], 1)
output, grid = self.get_output_and_grid(
output, k, stride_this_level, xin[0].type()
)
x_shifts.append(grid[:, :, 0])
y_shifts.append(grid[:, :, 1])
expanded_strides.append(
torch.zeros(1, grid.shape[1])
.fill_(stride_this_level)
.type_as(xin[0])
)
if self.use_l1:
batch_size = reg_output.shape[0]
hsize, wsize = reg_output.shape[-2:]
reg_output = reg_output.view(
batch_size, self.n_anchors, 4, hsize, wsize
)
reg_output = reg_output.permute(0, 1, 3, 4, 2).reshape(
batch_size, -1, 4
)
origin_preds.append(reg_output.clone())
else:
output = torch.cat(
[reg_output, obj_output.sigmoid(), cls_output.sigmoid()], 1
)
outputs.append(output)
if self.training:
return self.get_losses(
imgs,
x_shifts,
y_shifts,
expanded_strides,
labels,
torch.cat(outputs, 1),
origin_preds,
dtype=xin[0].dtype,
)
else:
self.hw = [x.shape[-2:] for x in outputs]
# [batch, n_anchors_all, 85]
outputs = torch.cat(
[x.flatten(start_dim=2) for x in outputs], dim=2
).permute(0, 2, 1)
if self.decode_in_inference:
return self.decode_outputs(outputs, dtype=xin[0].type())
else:
return outputs
def get_output_and_grid(self, output, k, stride, dtype):
grid = self.grids[k]
batch_size = output.shape[0]
n_ch = 5 + self.num_classes
hsize, wsize = output.shape[-2:]
if grid.shape[2:4] != output.shape[2:4]:
yv, xv = meshgrid([torch.arange(hsize), torch.arange(wsize)])
grid = torch.stack((xv, yv), 2).view(1, 1, hsize, wsize, 2).type(dtype)
self.grids[k] = grid
output = output.view(batch_size, self.n_anchors, n_ch, hsize, wsize)
output = output.permute(0, 1, 3, 4, 2).reshape(
batch_size, self.n_anchors * hsize * wsize, -1
)
grid = grid.view(1, -1, 2)
output[..., :2] = (output[..., :2] + grid) * stride
output[..., 2:4] = torch.exp(output[..., 2:4]) * stride
return output, grid
def decode_outputs(self, outputs, dtype):
grids = []
strides = []
for (hsize, wsize), stride in zip(self.hw, self.strides):
yv, xv = meshgrid([torch.arange(hsize), torch.arange(wsize)])
grid = torch.stack((xv, yv), 2).view(1, -1, 2)
grids.append(grid)
shape = grid.shape[:2]
strides.append(torch.full((*shape, 1), stride))
grids = torch.cat(grids, dim=1).type(dtype)
strides = torch.cat(strides, dim=1).type(dtype)
outputs[..., :2] = (outputs[..., :2] + grids) * strides
outputs[..., 2:4] = torch.exp(outputs[..., 2:4]) * strides
return outputs
def get_losses(
self,
imgs,
x_shifts,
y_shifts,
expanded_strides,
labels,
outputs,
origin_preds,
dtype,
):
bbox_preds = outputs[:, :, :4] # [batch, n_anchors_all, 4]
obj_preds = outputs[:, :, 4].unsqueeze(-1) # [batch, n_anchors_all, 1]
cls_preds = outputs[:, :, 5:] # [batch, n_anchors_all, n_cls]
# calculate targets
nlabel = (labels.sum(dim=2) > 0).sum(dim=1) # number of objects
total_num_anchors = outputs.shape[1]
x_shifts = torch.cat(x_shifts, 1) # [1, n_anchors_all]
y_shifts = torch.cat(y_shifts, 1) # [1, n_anchors_all]
expanded_strides = torch.cat(expanded_strides, 1)
if self.use_l1:
origin_preds = torch.cat(origin_preds, 1)
cls_targets = []
reg_targets = []
l1_targets = []
obj_targets = []
fg_masks = []
num_fg = 0.0
num_gts = 0.0
for batch_idx in range(outputs.shape[0]):
num_gt = int(nlabel[batch_idx])
num_gts += num_gt
if num_gt == 0:
cls_target = outputs.new_zeros((0, self.num_classes))
reg_target = outputs.new_zeros((0, 4))
l1_target = outputs.new_zeros((0, 4))
obj_target = outputs.new_zeros((total_num_anchors, 1))
fg_mask = outputs.new_zeros(total_num_anchors).bool()
else:
gt_bboxes_per_image = labels[batch_idx, :num_gt, 1:5]
gt_classes = labels[batch_idx, :num_gt, 0]
bboxes_preds_per_image = bbox_preds[batch_idx]
try:
(
gt_matched_classes,
fg_mask,
pred_ious_this_matching,
matched_gt_inds,
num_fg_img,
) = self.get_assignments( # noqa
batch_idx,
num_gt,
total_num_anchors,
gt_bboxes_per_image,
gt_classes,
bboxes_preds_per_image,
expanded_strides,
x_shifts,
y_shifts,
cls_preds,
bbox_preds,
obj_preds,
labels,
imgs,
)
except RuntimeError as e:
# TODO: the string might change, consider a better way
if "CUDA out of memory. " not in str(e):
raise # RuntimeError might not caused by CUDA OOM
logger.error(
"OOM RuntimeError is raised due to the huge memory cost during label assignment. \
CPU mode is applied in this batch. If you want to avoid this issue, \
try to reduce the batch size or image size."
)
torch.cuda.empty_cache()
(
gt_matched_classes,
fg_mask,
pred_ious_this_matching,
matched_gt_inds,
num_fg_img,
) = self.get_assignments( # noqa
batch_idx,
num_gt,
total_num_anchors,
gt_bboxes_per_image,
gt_classes,
bboxes_preds_per_image,
expanded_strides,
x_shifts,
y_shifts,
cls_preds,
bbox_preds,
obj_preds,
labels,
imgs,
"cpu",
)
torch.cuda.empty_cache()
num_fg += num_fg_img
cls_target = F.one_hot(
gt_matched_classes.to(torch.int64), self.num_classes
) * pred_ious_this_matching.unsqueeze(-1)
obj_target = fg_mask.unsqueeze(-1)
reg_target = gt_bboxes_per_image[matched_gt_inds]
if self.use_l1:
l1_target = self.get_l1_target(
outputs.new_zeros((num_fg_img, 4)),
gt_bboxes_per_image[matched_gt_inds],
expanded_strides[0][fg_mask],
x_shifts=x_shifts[0][fg_mask],
y_shifts=y_shifts[0][fg_mask],
)
cls_targets.append(cls_target)
reg_targets.append(reg_target)
obj_targets.append(obj_target.to(dtype))
fg_masks.append(fg_mask)
if self.use_l1:
l1_targets.append(l1_target)
cls_targets = torch.cat(cls_targets, 0)
reg_targets = torch.cat(reg_targets, 0)
obj_targets = torch.cat(obj_targets, 0)
fg_masks = torch.cat(fg_masks, 0)
if self.use_l1:
l1_targets = torch.cat(l1_targets, 0)
num_fg = max(num_fg, 1)
loss_iou = (
self.iou_loss(bbox_preds.view(-1, 4)[fg_masks], reg_targets)
).sum() / num_fg
loss_obj = (
self.bcewithlog_loss(obj_preds.view(-1, 1), obj_targets)
).sum() / num_fg
loss_cls = (
self.bcewithlog_loss(
cls_preds.view(-1, self.num_classes)[fg_masks], cls_targets
)
).sum() / num_fg
if self.use_l1:
loss_l1 = (
self.l1_loss(origin_preds.view(-1, 4)[fg_masks], l1_targets)
).sum() / num_fg
else:
loss_l1 = 0.0
reg_weight = 5.0
loss = reg_weight * loss_iou + loss_obj + loss_cls + loss_l1
return (
loss,
reg_weight * loss_iou,
loss_obj,
loss_cls,
loss_l1,
num_fg / max(num_gts, 1),
)
def get_l1_target(self, l1_target, gt, stride, x_shifts, y_shifts, eps=1e-8):
l1_target[:, 0] = gt[:, 0] / stride - x_shifts
l1_target[:, 1] = gt[:, 1] / stride - y_shifts
l1_target[:, 2] = torch.log(gt[:, 2] / stride + eps)
l1_target[:, 3] = torch.log(gt[:, 3] / stride + eps)
return l1_target
@torch.no_grad()
def get_assignments(
self,
batch_idx,
num_gt,
total_num_anchors,
gt_bboxes_per_image,
gt_classes,
bboxes_preds_per_image,
expanded_strides,
x_shifts,
y_shifts,
cls_preds,
bbox_preds,
obj_preds,
labels,
imgs,
mode="gpu",
):
if mode == "cpu":
print("------------CPU Mode for This Batch-------------")
gt_bboxes_per_image = gt_bboxes_per_image.cpu().float()
bboxes_preds_per_image = bboxes_preds_per_image.cpu().float()
gt_classes = gt_classes.cpu().float()
expanded_strides = expanded_strides.cpu().float()
x_shifts = x_shifts.cpu()
y_shifts = y_shifts.cpu()
fg_mask, is_in_boxes_and_center = self.get_in_boxes_info(
gt_bboxes_per_image,
expanded_strides,
x_shifts,
y_shifts,
total_num_anchors,
num_gt,
)
bboxes_preds_per_image = bboxes_preds_per_image[fg_mask]
cls_preds_ = cls_preds[batch_idx][fg_mask]
obj_preds_ = obj_preds[batch_idx][fg_mask]
num_in_boxes_anchor = bboxes_preds_per_image.shape[0]
if mode == "cpu":
gt_bboxes_per_image = gt_bboxes_per_image.cpu()
bboxes_preds_per_image = bboxes_preds_per_image.cpu()
pair_wise_ious = bboxes_iou(gt_bboxes_per_image, bboxes_preds_per_image, False)
gt_cls_per_image = (
F.one_hot(gt_classes.to(torch.int64), self.num_classes)
.float()
.unsqueeze(1)
.repeat(1, num_in_boxes_anchor, 1)
)
pair_wise_ious_loss = -torch.log(pair_wise_ious + 1e-8)
if mode == "cpu":
cls_preds_, obj_preds_ = cls_preds_.cpu(), obj_preds_.cpu()
with torch.cuda.amp.autocast(enabled=False):
cls_preds_ = (
cls_preds_.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()
* obj_preds_.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()
)
pair_wise_cls_loss = F.binary_cross_entropy(
cls_preds_.sqrt_(), gt_cls_per_image, reduction="none"
).sum(-1)
del cls_preds_
cost = (
pair_wise_cls_loss
+ 3.0 * pair_wise_ious_loss
+ 100000.0 * (~is_in_boxes_and_center)
)
(
num_fg,
gt_matched_classes,
pred_ious_this_matching,
matched_gt_inds,
) = self.dynamic_k_matching(cost, pair_wise_ious, gt_classes, num_gt, fg_mask)
del pair_wise_cls_loss, cost, pair_wise_ious, pair_wise_ious_loss
if mode == "cpu":
gt_matched_classes = gt_matched_classes.cpu()
fg_mask = fg_mask.cpu()
pred_ious_this_matching = pred_ious_this_matching.cpu()
matched_gt_inds = matched_gt_inds.cpu()
return (
gt_matched_classes,
fg_mask,
pred_ious_this_matching,
matched_gt_inds,
num_fg,
)
def get_in_boxes_info(
self,
gt_bboxes_per_image,
expanded_strides,
x_shifts,
y_shifts,
total_num_anchors,
num_gt,
):
expanded_strides_per_image = expanded_strides[0]
x_shifts_per_image = x_shifts[0] * expanded_strides_per_image
y_shifts_per_image = y_shifts[0] * expanded_strides_per_image
x_centers_per_image = (
(x_shifts_per_image + 0.5 * expanded_strides_per_image)
.unsqueeze(0)
.repeat(num_gt, 1)
) # [n_anchor] -> [n_gt, n_anchor]
y_centers_per_image = (
(y_shifts_per_image + 0.5 * expanded_strides_per_image)
.unsqueeze(0)
.repeat(num_gt, 1)
)
gt_bboxes_per_image_l = (
(gt_bboxes_per_image[:, 0] - 0.5 * gt_bboxes_per_image[:, 2])
.unsqueeze(1)
.repeat(1, total_num_anchors)
)
gt_bboxes_per_image_r = (
(gt_bboxes_per_image[:, 0] + 0.5 * gt_bboxes_per_image[:, 2])
.unsqueeze(1)
.repeat(1, total_num_anchors)
)
gt_bboxes_per_image_t = (
(gt_bboxes_per_image[:, 1] - 0.5 * gt_bboxes_per_image[:, 3])
.unsqueeze(1)
.repeat(1, total_num_anchors)
)
gt_bboxes_per_image_b = (
(gt_bboxes_per_image[:, 1] + 0.5 * gt_bboxes_per_image[:, 3])
.unsqueeze(1)
.repeat(1, total_num_anchors)
)
b_l = x_centers_per_image - gt_bboxes_per_image_l
b_r = gt_bboxes_per_image_r - x_centers_per_image
b_t = y_centers_per_image - gt_bboxes_per_image_t
b_b = gt_bboxes_per_image_b - y_centers_per_image
bbox_deltas = torch.stack([b_l, b_t, b_r, b_b], 2)
is_in_boxes = bbox_deltas.min(dim=-1).values > 0.0
is_in_boxes_all = is_in_boxes.sum(dim=0) > 0
# in fixed center
center_radius = 2.5
gt_bboxes_per_image_l = (gt_bboxes_per_image[:, 0]).unsqueeze(1).repeat(
1, total_num_anchors
) - center_radius * expanded_strides_per_image.unsqueeze(0)
gt_bboxes_per_image_r = (gt_bboxes_per_image[:, 0]).unsqueeze(1).repeat(
1, total_num_anchors
) + center_radius * expanded_strides_per_image.unsqueeze(0)
gt_bboxes_per_image_t = (gt_bboxes_per_image[:, 1]).unsqueeze(1).repeat(
1, total_num_anchors
) - center_radius * expanded_strides_per_image.unsqueeze(0)
gt_bboxes_per_image_b = (gt_bboxes_per_image[:, 1]).unsqueeze(1).repeat(
1, total_num_anchors
) + center_radius * expanded_strides_per_image.unsqueeze(0)
c_l = x_centers_per_image - gt_bboxes_per_image_l
c_r = gt_bboxes_per_image_r - x_centers_per_image
c_t = y_centers_per_image - gt_bboxes_per_image_t
c_b = gt_bboxes_per_image_b - y_centers_per_image
center_deltas = torch.stack([c_l, c_t, c_r, c_b], 2)
is_in_centers = center_deltas.min(dim=-1).values > 0.0
is_in_centers_all = is_in_centers.sum(dim=0) > 0
# in boxes and in centers
is_in_boxes_anchor = is_in_boxes_all | is_in_centers_all
is_in_boxes_and_center = (
is_in_boxes[:, is_in_boxes_anchor] & is_in_centers[:, is_in_boxes_anchor]
)
return is_in_boxes_anchor, is_in_boxes_and_center
def dynamic_k_matching(self, cost, pair_wise_ious, gt_classes, num_gt, fg_mask):
# Dynamic K
# ---------------------------------------------------------------
matching_matrix = torch.zeros_like(cost, dtype=torch.uint8)
ious_in_boxes_matrix = pair_wise_ious
n_candidate_k = min(10, ious_in_boxes_matrix.size(1))
topk_ious, _ = torch.topk(ious_in_boxes_matrix, n_candidate_k, dim=1)
dynamic_ks = torch.clamp(topk_ious.sum(1).int(), min=1)
dynamic_ks = dynamic_ks.tolist()
for gt_idx in range(num_gt):
_, pos_idx = torch.topk(
cost[gt_idx], k=dynamic_ks[gt_idx], largest=False
)
matching_matrix[gt_idx][pos_idx] = 1
del topk_ious, dynamic_ks, pos_idx
anchor_matching_gt = matching_matrix.sum(0)
if (anchor_matching_gt > 1).sum() > 0:
_, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0)
matching_matrix[:, anchor_matching_gt > 1] *= 0
matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1
fg_mask_inboxes = matching_matrix.sum(0) > 0
num_fg = fg_mask_inboxes.sum().item()
fg_mask[fg_mask.clone()] = fg_mask_inboxes
matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0)
gt_matched_classes = gt_classes[matched_gt_inds]
pred_ious_this_matching = (matching_matrix * pair_wise_ious).sum(0)[
fg_mask_inboxes
]
return num_fg, gt_matched_classes, pred_ious_this_matching, matched_gt_inds
| [
"torch.zeros",
"torch.cat",
"torch.stack",
"torch.cuda.amp.autocast",
"torch.nn.ModuleList",
"torch.min",
"torch.arange",
"torch.no_grad",
"torch.nn.L1Loss",
"torch.cuda.empty_cache",
"torch.full",
"torch.nn.BCEWithLogitsLoss",
"torch.zeros_like",
"torch.log",
"torch.exp",
"torch.topk"
] | 1.7 | zhanglirong1999/YOLOX | 8b96c9c954e773a68cb439506bedd3b80406cc7d |
1.7 | import os
from options.train_options import TrainOptions
from models import create_model
from util.visualizer import save_images
from util import html
from PIL import Image
import string
import torch
import torchvision
import torchvision.transforms as transforms
import coremltools as ct
from util import util
import numpy as np
opt = TrainOptions().gather_options()
opt.isTrain = True
opt.name = "siggraph_caffemodel"
opt.mask_cent = 0
# opt.name = "siggraph_retrained"
opt.gpu_ids = []
opt.load_model = True
opt.num_threads = 1 # test code only supports num_threads = 1
opt.batch_size = 1 # test code only supports batch_size = 1
opt.display_id = -1 # no visdom display
opt.phase = 'val'
opt.dataroot = './dataset/ilsvrc2012/%s/' % opt.phase
opt.serial_batches = True
opt.aspect_ratio = 1.
# process opt.suffix
if opt.suffix:
suffix = ('_' + opt.suffix.format(**vars(opt))
) if opt.suffix != '' else ''
opt.name = opt.name + suffix
opt.A = 2 * opt.ab_max / opt.ab_quant + 1
opt.B = opt.A
class Colorization(torch.nn.Module):
def __init__(self):
super(Colorization, self).__init__()
model = create_model(opt)
model.setup(opt)
model.eval()
self.model = model
def forward(self, image, hint):
data = {
"A": image[:, 0:1, :, :],
"B": image[:, 1:3, :, :],
"hint_B": hint[:, 0:2, :, :],
"mask_B": hint[:, 2:3, :, :]
}
# with torch.no_grad():
self.model.set_input(data)
self.model.forward()
fake_reg = torch.cat((self.model.real_A, self.model.fake_B_reg), dim=1)
return fake_reg
image_path = "./large.JPG"
image = Image.open(image_path)
image = transforms.Compose([
transforms.Resize(512),
transforms.ToTensor(),
])(image)
image = image.view(1, *image.shape)
image = util.crop_mult(image, mult=8, HWmax=[4032, 4032])
transforms.ToPILImage()(image[0]).show(command='fim')
data = util.get_colorization_data(
[image], opt, ab_thresh=0., p=0.125)
img = torch.cat((data["A"], data["B"]), dim=1)
hint = torch.cat((data["hint_B"], data["mask_B"]), dim=1)
# print(data["mask_B"], data["hint_B"])
# data["hint_B"] = torch.zeros_like(data["hint_B"])
# data["mask_B"] = torch.zeros_like(data["mask_B"])
# model = Colorization()
with torch.no_grad():
model = Colorization()
model.eval()
for param in model.parameters():
param.requires_grad = False
model.model.set_requires_grad(model.model.netG)
# model(data)
# transforms.ToPILImage()(image[0]).show(command='fim')
# to_visualize = ['gray', 'hint', 'hint_ab', 'fake_entr',
# 'real', 'fake_reg', 'real_ab', 'fake_ab_reg', ]
# visuals = util.get_subset_dict(
# model.model.get_current_visuals(), to_visualize)
# for key, value in visuals.items():
# print(key)
# transforms.ToPILImage()(value[0]).show(command='fim')
output = model(img, hint)
output = util.lab2rgb(output, opt=opt)
transforms.ToPILImage()(output[0]).show(command='fim')
traced_model = torch.jit.trace(
model, (img, hint), check_trace=False)
mlmodel = ct.convert(model=traced_model, inputs=[
ct.TensorType(name="image", shape=ct.Shape(
shape=(1, 3, ct.RangeDim(1, 4096), ct.RangeDim(1, 4096)))),
ct.TensorType(name="hint", shape=ct.Shape(
shape=(1, 3, ct.RangeDim(1, 4096), ct.RangeDim(1, 4096)))),
])
mlmodel.save("~/color.mlmodel")
| [
"torch.cat",
"torch.no_grad",
"torch.jit.trace"
] | 1.7.1 | zengxinzhy/colorization-pytorch | a41e61dfc0d99532728af3cbfd21efbdaf6086c5 |
1.7 | from abc import ABC, abstractmethod
from typing import Union, Sized, List, Tuple
from copy import deepcopy
import torch
from torch import nn as nn
from ..nn.linear import DenseLinear
from ..nn.conv2d import DenseConv2d
from .utils import collect_leaf_modules, is_parameterized
class BaseModel(nn.Module, ABC):
def __init__(self):
super(BaseModel, self).__init__()
self.prunable_layers: list = []
self.prunable_layer_prefixes: list = []
def clone_from_model(self, original_model: nn.Module = None):
# copying all submodules from original model
for name, module in original_model._modules.items():
self.add_module(name, deepcopy(module))
def collect_prunable_layers(self) -> None:
self.prunable_layers, self.prunable_layer_prefixes = self.find_layers(lambda x: is_parameterized(x))
def convert_eligible_layers(self):
# changing all conv2d and linear layers to customized ones
for module_name, old_module in zip(self.prunable_layer_prefixes, self.prunable_layers):
if isinstance(old_module, nn.Linear):
self.set_module_by_name(module_name, DenseLinear.from_linear(old_module))
elif isinstance(old_module, nn.Conv2d):
self.set_module_by_name(module_name, DenseConv2d.from_conv2d(old_module))
def find_layers(self, criterion) -> Tuple[List, List]:
layers, names = [], []
collect_leaf_modules(self, criterion, layers, names)
return layers, names
@abstractmethod
def forward(self, inputs) -> torch.Tensor:
pass
def prune_by_threshold(self, thr_arg: Union[int, float, Sized]):
prunable_layers = self.prunable_layers
if isinstance(thr_arg, Sized):
assert len(prunable_layers) == len(thr_arg)
else:
thr_arg = [thr_arg] * len(prunable_layers)
for thr, layer in zip(thr_arg, prunable_layers):
if thr is not None:
layer.prune_by_threshold(thr)
return self
def prune_by_rank(self, rank_arg: Union[int, float, Sized]):
prunable_layers = self.prunable_layers
if isinstance(rank_arg, Sized):
assert len(prunable_layers) == len(rank_arg)
else:
rank_arg = [rank_arg] * len(prunable_layers)
for rank, layer in zip(rank_arg, prunable_layers):
if rank is not None:
layer.prune_by_rank(rank)
return self
def prune_by_pct(self, pct_arg: Union[int, float, Sized]):
prunable_layers = self.prunable_layers
if isinstance(pct_arg, Sized):
assert len(prunable_layers) == len(pct_arg)
else:
pct_arg = [pct_arg] * len(prunable_layers)
for pct, layer in zip(pct_arg, prunable_layers):
if pct is not None:
layer.prune_by_pct(pct)
return self
def random_prune_by_pct(self, pct_arg: Union[int, float, Sized]):
prunable_layers = self.prunable_layers
if isinstance(pct_arg, Sized):
assert len(prunable_layers) == len(pct_arg)
else:
pct_arg = [pct_arg] * len(prunable_layers)
for pct, layer in zip(pct_arg, prunable_layers):
if pct is not None:
layer.random_prune_by_pct(pct)
return self
def calc_num_prunable_params(self, count_bias=True, display=False):
total_param_in_use = 0
total_param = 0
for layer, layer_prefix in zip(self.prunable_layers, self.prunable_layer_prefixes):
num_bias = layer.bias.nelement() if layer.bias is not None and count_bias else 0
num_weight = layer.num_weight
num_params_in_use = num_weight + num_bias
num_params = layer.weight.nelement() + num_bias
total_param_in_use += num_params_in_use
total_param += num_params
if display:
print("Layer name: {}. remaining/all: {}/{} = {}".format(layer_prefix, num_params_in_use, num_params,
num_params_in_use / num_params))
if display:
print("Total: remaining/all: {}/{} = {}".format(total_param_in_use, total_param,
total_param_in_use / total_param))
return total_param_in_use, total_param
def nnz(self, count_bias=True):
# number of parameters in use in prunable layers
return self.calc_num_prunable_params(count_bias=count_bias)[0]
def nelement(self, count_bias=True):
# number of all parameters in prunable layers
return self.calc_num_prunable_params(count_bias=count_bias)[1]
def density(self, count_bias=True):
total_param_in_use, total_param = self.calc_num_prunable_params(count_bias=count_bias)
return total_param_in_use / total_param
def _get_module_by_list(self, module_names: List):
module = self
for name in module_names:
module = getattr(module, name)
return module
def get_module_by_name(self, module_name: str):
return self._get_module_by_list(module_name.split('.'))
def set_module_by_name(self, module_name: str, new_module):
splits = module_name.split('.')
self._get_module_by_list(splits[:-1]).__setattr__(splits[-1], new_module)
def get_mask_by_name(self, param_name: str):
if param_name.endswith("bias"): # todo
return None
module = self._get_module_by_list(param_name.split('.')[:-1])
return module.mask if hasattr(module, "mask") else None
@torch.no_grad()
def reinit_from_model(self, final_model):
assert isinstance(final_model, self.__class__)
for self_layer, layer in zip(self.prunable_layers, final_model.prunable_layers):
self_layer.mask = layer.mask.clone().to(self_layer.mask.device)
def to_sparse(self):
self_copy = deepcopy(self)
for module_name, old_module in zip(self.prunable_layer_prefixes, self.prunable_layers):
self_copy.set_module_by_name(module_name, old_module.to_sparse())
self.collect_prunable_layers()
return self_copy
def to(self, *args, **kwargs):
device = torch._C._nn._parse_to(*args, **kwargs)[0]
if device is not None:
# move masks to device
for m in self.prunable_layers:
m.move_data(device)
return super(BaseModel, self).to(*args, **kwargs)
| [
"torch.no_grad",
"torch._C._nn._parse_to"
] | 1.7.1 | jiangyuang/ModelPruningLibrary | 9c8ba5a3c5d118f37768d5d42254711f48d88745 |
0.4 | from easydict import EasyDict as edict
from pathlib import Path
import torch
from torch.nn import CrossEntropyLoss
def get_config(training=True):
conf = edict()
conf.data_path = Path('models/data')
conf.work_path = Path('weights/')
conf.model_path = conf.work_path / 'models'
conf.log_path = conf.work_path / 'log'
conf.save_path = conf.work_path
conf.input_size = [112, 112]
conf.embedding_size = 512
conf.use_mobilfacenet = False
conf.net_depth = 50
conf.drop_ratio = 0.6
conf.net_mode = 'ir_se' # or 'ir'
conf.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
conf.data_mode = 'emore'
conf.vgg_folder = conf.data_path / 'faces_vgg_112x112'
conf.ms1m_folder = conf.data_path / 'faces_ms1m_112x112'
conf.emore_folder = conf.data_path / 'faces_emore'
conf.batch_size = 100 # irse net depth 50
# conf.batch_size = 200 # mobilefacenet
# --------------------Training Config ------------------------
if training:
conf.log_path = conf.work_path / 'log'
conf.save_path = conf.work_path / 'save'
# conf.weight_decay = 5e-4
conf.lr = 1e-3
conf.momentum = 0.9
conf.pin_memory = True
# conf.num_workers = 4 # when batchsize is 200
conf.num_workers = 3
conf.ce_loss = CrossEntropyLoss()
# --------------------Inference Config ------------------------
else:
conf.facebank_path = conf.data_path / 'facebank'
conf.threshold = 1.5
conf.face_limit = 10
# when inference, at maximum detect 10 faces in one image, my laptop is slow
return conf
| [
"torch.cuda.is_available",
"torch.nn.CrossEntropyLoss"
] | 0.4.0 | zakerifahimeh/FaceLib | bf8eadc26baf04907e3800ada02896ac7056080c |
1.4 | import torch
from torchvision import datasets, transforms
import numpy as np
from os.path import join
from .namers import attack_file_namer
def tiny_imagenet(args):
data_dir = join(args.directory, 'data')
train_dir = join(data_dir, "original_datasets",
"tiny-imagenet-200", "train")
test_dir = join(data_dir, "original_datasets",
"tiny-imagenet-200", "val")
transform_train = transforms.Compose(
[
transforms.RandomCrop(64, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]
)
transform_test = transforms.Compose([transforms.ToTensor()])
trainset = datasets.ImageFolder(train_dir, transform=transform_train)
train_loader = torch.utils.data.DataLoader(
trainset, batch_size=args.neural_net.train_batch_size, shuffle=True, num_workers=2
)
testset = datasets.ImageFolder(test_dir, transform=transform_test)
test_loader = torch.utils.data.DataLoader(
testset, batch_size=args.neural_net.test_batch_size, shuffle=False, num_workers=2
)
return train_loader, test_loader
def tiny_imagenet_from_file(args):
use_cuda = args.use_gpu and torch.cuda.is_available()
kwargs = {"num_workers": 1, "pin_memory": True} if use_cuda else {}
# Read
if args.adv_testing.method == "transfer":
filepath = join(
args.directory, 'data',
'attacked_datasets', args.dataset.name, args.adv_testing.transfer_file
)
else:
filepath = attack_file_namer(args)
test_images = np.load(filepath)
data_dir = join(args.directory, 'data')
test_dir = join(data_dir, "original_datasets",
"tiny-imagenet-200", "val")
transform_test = transforms.Compose([transforms.ToTensor()])
testset = datasets.ImageFolder(test_dir, transform=transform_test)
test_loader = torch.utils.data.DataLoader(
testset, batch_size=args.neural_net.test_batch_size, shuffle=False, num_workers=2
)
tensor_x = torch.Tensor(test_images / np.max(test_images))
tensor_y = torch.Tensor(test_loader.dataset.targets).long()
tensor_data = torch.utils.data.TensorDataset(tensor_x, tensor_y)
attack_loader = torch.utils.data.DataLoader(
tensor_data, batch_size=args.neural_net.test_batch_size, shuffle=False, **kwargs
)
return attack_loader
def imagenette(args):
data_dir = join(args.directory, 'data')
train_dir = join(data_dir, "original_datasets",
"imagenette2-160", "train")
test_dir = join(data_dir, "original_datasets",
"imagenette2-160", "val")
use_cuda = args.use_gpu and torch.cuda.is_available()
kwargs = {"num_workers": 4, "pin_memory": True} if use_cuda else {}
transform_train = transforms.Compose(
[
transforms.RandomCrop((160), padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]
)
transform_test = transforms.Compose(
[transforms.CenterCrop(160), transforms.ToTensor()]
)
trainset = datasets.ImageFolder(train_dir, transform=transform_train)
train_loader = torch.utils.data.DataLoader(
trainset, batch_size=args.neural_net.train_batch_size, shuffle=True, num_workers=2
)
testset = datasets.ImageFolder(test_dir, transform=transform_test)
test_loader = torch.utils.data.DataLoader(
testset, batch_size=args.neural_net.test_batch_size, shuffle=False, num_workers=2
)
return train_loader, test_loader
def imagenette_from_file(args):
use_cuda = args.use_gpu and torch.cuda.is_available()
kwargs = {"num_workers": 1, "pin_memory": True} if use_cuda else {}
# Read
if args.adv_testing.method == "transfer":
filepath = join(
args.directory, 'data', 'attacked_datasets', args.dataset.name, args.adv_testing.transfer_file
)
else:
filepath = attack_file_namer(args)
test_images = np.load(filepath)
data_dir = join(args.directory, 'data')
test_dir = join(data_dir, "original_datasets",
"imagenette2-160", "val")
transform_test = transforms.Compose([transforms.ToTensor()])
testset = datasets.ImageFolder(test_dir, transform=transform_test)
test_loader = torch.utils.data.DataLoader(
testset, batch_size=args.neural_net.test_batch_size, shuffle=False, num_workers=2
)
tensor_x = torch.Tensor(test_images / np.max(test_images))
tensor_y = torch.Tensor(test_loader.dataset.targets).long()
tensor_data = torch.utils.data.TensorDataset(tensor_x, tensor_y)
attack_loader = torch.utils.data.DataLoader(
tensor_data, batch_size=args.neural_net.test_batch_size, shuffle=False, **kwargs
)
return attack_loader
def cifar10(args):
use_cuda = args.use_gpu and torch.cuda.is_available()
kwargs = {"num_workers": 4, "pin_memory": True} if use_cuda else {}
transform_train = transforms.Compose(
[
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]
)
transform_test = transforms.Compose([transforms.ToTensor()])
trainset = datasets.CIFAR10(
root=join(args.directory, 'data', 'original_datasets'),
train=True,
download=True,
transform=transform_train,
)
train_loader = torch.utils.data.DataLoader(
trainset, batch_size=args.neural_net.train_batch_size, shuffle=True, num_workers=2
)
testset = datasets.CIFAR10(
root=join(args.directory, 'data', 'original_datasets'),
train=False,
download=True,
transform=transform_test,
)
test_loader = torch.utils.data.DataLoader(
testset, batch_size=args.neural_net.test_batch_size, shuffle=False, num_workers=2
)
return train_loader, test_loader
def cifar10_from_file(args):
use_cuda = args.use_gpu and torch.cuda.is_available()
kwargs = {"num_workers": 1, "pin_memory": True} if use_cuda else {}
# Read
if args.adv_testing.method == "transfer":
filepath = join(
args.directory, 'data', 'attacked_datasets', args.dataset.name, args.adv_testing.transfer_file
)
else:
filepath = attack_file_namer(args)
test_images = np.load(filepath)
cifar10 = datasets.CIFAR10(
join(args.directory, 'data', 'original_datasets'),
train=False,
transform=None,
target_transform=None,
download=False,
)
tensor_x = torch.Tensor(test_images / np.max(test_images))
tensor_y = torch.Tensor(cifar10.targets).long()
tensor_data = torch.utils.data.TensorDataset(tensor_x, tensor_y)
attack_loader = torch.utils.data.DataLoader(
tensor_data, batch_size=args.neural_net.test_batch_size, shuffle=False, **kwargs
)
return attack_loader
def imagenet(args):
data_dir = join(args.directory, 'data')
train_dir = join(data_dir, "original_datasets", "imagenet", "train")
test_dir = join(data_dir, "original_datasets", "imagenet", "val")
use_cuda = args.use_gpu and torch.cuda.is_available()
kwargs = {"num_workers": 4, "pin_memory": True} if use_cuda else {}
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
transform_train = transforms.Compose(
[
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
)
transform_test = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]
)
trainset = datasets.ImageFolder(train_dir, transform=transform_train)
train_loader = torch.utils.data.DataLoader(
trainset,
batch_size=args.neural_net.train_batch_size,
shuffle=True,
num_workers=4,
pin_memory=True,
)
testset = datasets.ImageFolder(test_dir, transform=transform_test)
test_loader = torch.utils.data.DataLoader(
testset,
batch_size=args.neural_net.test_batch_size,
shuffle=False,
num_workers=4,
pin_memory=True,
)
return train_loader, test_loader
def read_dataset(args):
if args.dataset.name == "CIFAR10":
train_loader, test_loader = cifar10(args)
elif args.dataset.name == "Tiny-ImageNet":
train_loader, test_loader = tiny_imagenet(args)
elif args.dataset.name == "Imagenette":
train_loader, test_loader = imagenette(args)
elif args.dataset.name == "Imagenet":
train_loader, test_loader = imagenet(args)
else:
raise NotImplementedError
return train_loader, test_loader
def read_test_dataset_from_file(args):
if args.dataset.name == "CIFAR10":
test_loader = cifar10_from_file(args)
elif args.dataset.name == "Tiny-ImageNet":
test_loader = tiny_imagenet_from_file(args)
elif args.dataset.name == "Imagenette":
test_loader = imagenette_from_file(args)
else:
raise NotImplementedError
return test_loader
| [
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.Tensor",
"torch.utils.data.TensorDataset"
] | 1.4.0 | canbakiskan/sparse_coding_frontend | 1f62b54824785aa441317ddab1baa3012f2fb401 |
1.4 | from os.path import join
import matplotlib.pyplot as plt
import torch
from ..models.resnet import ResNetWide
from ..utils.read_datasets import cifar10
from ..utils.plot_settings import *
from ..utils.get_modules import load_frontend
import numpy as np
from ..parameters import get_arguments
args = get_arguments()
device = "cuda"
classifier = ResNetWide(num_outputs=10).to(device)
classifier.load_state_dict(
torch.load(join(
'checkpoints', 'classifiers', 'CIFAR10', 'resnetwide_sgd_cyc_0.0500_NT_ep_100.pt'),
map_location=torch.device(device),
)
)
classifier.to(device)
train_loader, test_loader = cifar10(args)
frontend = load_frontend(args)
plt.figure(figsize=(10, 10))
weights = classifier.conv1.weight.clone().reshape(160, -1)
weights /= torch.norm(weights, dim=1, keepdim=True)
weights = weights.detach().cpu().numpy()
asd = weights @ weights.transpose()
plt.imshow(
asd,
cmap=cm,
vmin=-np.abs(asd).max(),
vmax=np.abs(asd).max(),
interpolation="nearest",
)
plt.xticks([])
plt.yticks([])
plt.savefig(join('figs', 'inner_cnn.pdf'))
plt.close()
plt.figure(figsize=(10, 5))
plt.hist(asd.flatten(), 50)
plt.savefig(join('figs', 'hist_cnn.pdf'))
plt.close()
nb_cols = 2
nb_rows = 5
plt.figure(figsize=(10 * nb_cols, 4 * nb_rows))
for i in range(nb_cols * nb_rows):
plt.subplot(nb_rows, nb_cols, i + 1)
img_index = np.random.choice(50000)
print(f"image: {img_index},", end=" ")
img, _ = train_loader.dataset[img_index]
img = img.to(device)
classifier_out = classifier.norm(img.unsqueeze(0))
classifier_out = classifier.conv1(classifier_out)
# classifier_out = classifier.conv1(img.unsqueeze(0))
classifier_out /= torch.norm(classifier.conv1.weight.view(160, -1), dim=1).view(
1, 160, 1, 1
)
frontend_out = frontend.encoder.conv(img.unsqueeze(0))
# print(f"===={out[0,0,0,0]}")
# xlims = [-2.6, 2.6]
patch_index = (np.random.choice(range(1, 30, 2)),
np.random.choice(range(1, 30, 2)))
# patch_index = (22, 23)
print(f"patch: {patch_index}")
classifier_patch = classifier_out.squeeze().detach().cpu().numpy()[
:, patch_index]
frontend_patch = (
frontend_out.squeeze()
.detach()
.cpu()
.numpy()[:, patch_index[0] // 2, patch_index[1] // 2]
)
abs_max = max(np.abs(classifier_patch).max(), np.abs(frontend_patch).max())
xlims = (-abs_max, abs_max)
bin_edges = np.linspace(*xlims, 50)
hist, _ = np.histogram(classifier_patch, bin_edges, density=True)
# breakpoint()
color, edgecolor = ("orange", "darkorange")
plt.bar(
bin_edges[:-1] + np.diff(bin_edges) / 2,
hist,
width=(bin_edges[1] - bin_edges[0]),
alpha=0.5,
edgecolor="none",
color=color,
)
plt.step(
np.array([*bin_edges, bin_edges[-1] + (bin_edges[1] - bin_edges[0])]),
np.array([0, *hist, 0]),
label=r"CNN $1^{st}$ layer",
where="pre",
color=edgecolor,
)
ax = plt.gca()
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.get_yaxis().set_visible(False)
# print(f"===={out[0,0,0,0]}")
hist, _ = np.histogram(frontend_patch, bin_edges, density=True)
# bin_edges, hist = np.histogram(out.squeeze().detach().cpu().numpy()[
# :, np.random.choice(32), np.random.choice(32)], 50)
color, edgecolor = ("steelblue", "steelblue")
plt.bar(
bin_edges[:-1] + np.diff(bin_edges) / 2,
hist,
width=(bin_edges[1] - bin_edges[0]),
alpha=0.5,
edgecolor="none",
color=color,
)
plt.step(
np.array([*bin_edges, bin_edges[-1] + (bin_edges[1] - bin_edges[0])]),
np.array([0, *hist, 0]),
label=r"Overcomplete dictionary",
where="pre",
color=edgecolor,
)
ax = plt.gca()
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.get_yaxis().set_visible(False)
# ax = plt.gca()
# ax.xaxis.set_major_locator(ticker.MultipleLocator(18))
# ax.xaxis.set_minor_locator(ticker.MultipleLocator(4.5))
# ax.xaxis.set_major_formatter(ticker.PercentFormatter(xmax=225, decimals=0))
# plt.gca().get_xaxis().set_major_formatter(
# FuncFormatter(lambda x, p: format((x / 225), ".2"))
# )
# fontsize = 21
# plt.xlabel("Correlation value", fontsize=fontsize)
# plt.ylabel("Histogram density", fontsize=fontsize)
# plt.xlim(xlims)
# plt.legend(loc="upper center", fontsize=fontsize)
plt.tight_layout()
plt.savefig(join('figs', 'more_correlations_normalized.pdf'))
plt.close()
| [
"torch.device",
"torch.norm"
] | 1.4.0 | canbakiskan/sparse_coding_frontend | 1f62b54824785aa441317ddab1baa3012f2fb401 |
1.6 | import logging
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data.dataset import Dataset
from ...tokenization_roberta import RobertaTokenizer, RobertaTokenizerFast
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_xlm_roberta import XLMRobertaTokenizer
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
logger = logging.getLogger(__name__)
@dataclass
class GlueDataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
task_name: str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys())})
data_dir: str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."}
)
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
def __post_init__(self):
self.task_name = self.task_name.lower()
class Split(Enum):
train = "train"
dev = "dev"
test = "test"
class GlueDataset(Dataset):
"""
This will be superseded by a framework-agnostic approach
soon.
"""
args: GlueDataTrainingArguments
output_mode: str
features: List[InputFeatures]
def __init__(
self,
args: GlueDataTrainingArguments,
tokenizer: PreTrainedTokenizer,
limit_length: Optional[int] = None,
mode: Union[str, Split] = Split.train,
):
self.args = args
self.processor = glue_processors[args.task_name]()
self.output_mode = glue_output_modes[args.task_name]
if isinstance(mode, str):
try:
mode = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name")
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}_{}_{}".format(
mode.value, tokenizer.__class__.__name__, str(args.max_seq_length), args.task_name,
),
)
label_list = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
self.label_list = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lock_path = cached_features_file + ".lock"
with FileLock(lock_path):
if os.path.exists(cached_features_file) and not args.overwrite_cache:
start = time.time()
self.features = torch.load(cached_features_file)
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
)
else:
logger.info(f"Creating features from dataset file at {args.data_dir}")
if mode == Split.dev:
examples = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
examples = self.processor.get_test_examples(args.data_dir)
else:
examples = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
examples = examples[:limit_length]
self.features = glue_convert_examples_to_features(
examples,
tokenizer,
max_length=args.max_seq_length,
label_list=label_list,
output_mode=self.output_mode,
)
start = time.time()
torch.save(self.features, cached_features_file)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
"Saving features into cached file %s [took %.3f s]", cached_features_file, time.time() - start
)
def __len__(self):
return len(self.features)
def __getitem__(self, i) -> InputFeatures:
return self.features[i]
def get_labels(self):
return self.label_list
| [
"torch.save",
"torch.load"
] | 1.6.0 | jsteggink/trankit | 61ef593999bfa29751990d0d4bcf259daed05db4 |
1.7 | import torch
import torch.nn as nn
import torch.nn.functional as F
from dmb.modeling.stereo.layers.basic_layers import conv_bn, conv_bn_relu, BasicBlock
from dmb.modeling.stereo.layers.basic_layers import conv_in_relu, BasicBlock_IN
class PSM_Encoder_Instance(nn.Module):
"""
Backbone proposed in PSMNet.
Args:
in_planes (int): the channels of input
batch_norm (bool): whether use batch normalization layer, default True
Inputs:
l_img (Tensor): left image, in [BatchSize, 3, Height, Width] layout
r_img (Tensor): right image, in [BatchSize, 3, Height, Width] layout
Outputs:
l_fms (Tensor): left image feature maps, in [BatchSize, 32, Height//4, Width//4] layout
r_fms (Tensor): right image feature maps, in [BatchSize, 32, Height//4, Width//4] layout
"""
def __init__(self, in_planes=3, batch_norm=True):
super(PSM_Encoder_Instance, self).__init__()
self.in_planes = in_planes
self.batch_norm = batch_norm
self.firstconv = nn.Sequential(
conv_in_relu(batch_norm, self.in_planes, 32, 3, 2, 1, 1, bias=False),
conv_in_relu(batch_norm, 32, 32, 3, 1, 1, 1, bias=False),
conv_in_relu(batch_norm, 32, 32, 3, 1, 1, 1, bias=False),
)
# For building Basic Block
self.in_planes = 32
# BasicBlock_IN
self.layer1 = self._make_layer(batch_norm, BasicBlock_IN, 32, 3, 1, 1, 1)
self.layer2 = self._make_layer(batch_norm, BasicBlock, 64, 16, 2, 1, 1)
self.layer3 = self._make_layer(batch_norm, BasicBlock, 128, 3, 1, 1, 1)
self.layer4 = self._make_layer(batch_norm, BasicBlock, 128, 3, 1, 2, 2)
self.branch1 = nn.Sequential(
nn.AvgPool2d((64, 64), stride=(64, 64)),
conv_bn_relu(batch_norm, 128, 32, 1, 1, 0, 1, bias=False),
)
self.branch2 = nn.Sequential(
nn.AvgPool2d((32, 32), stride=(32, 32)),
conv_bn_relu(batch_norm, 128, 32, 1, 1, 0, 1, bias=False),
)
self.branch3 = nn.Sequential(
nn.AvgPool2d((16, 16), stride=(16, 16)),
conv_bn_relu(batch_norm, 128, 32, 1, 1, 0, 1, bias=False),
)
self.branch4 = nn.Sequential(
nn.AvgPool2d((8, 8), stride=(8, 8)),
conv_bn_relu(batch_norm, 128, 32, 1, 1, 0, 1, bias=False),
)
self.lastconv = nn.Sequential(
conv_bn_relu(batch_norm, 320, 128, 3, 1, 1, 1, bias=False),
nn.Conv2d(128, 32, kernel_size=1, padding=0, stride=1, dilation=1, bias=False)
)
def _make_layer(self, batch_norm, block, out_planes, blocks, stride, padding, dilation):
downsample = None
if stride != 1 or self.in_planes != out_planes * block.expansion:
downsample = conv_bn(
batch_norm, self.in_planes, out_planes * block.expansion,
kernel_size=1, stride=stride, padding=0, dilation=1
)
layers = []
layers.append(
block(batch_norm, self.in_planes, out_planes, stride, downsample, padding, dilation)
)
self.in_planes = out_planes * block.expansion
for i in range(1, blocks):
layers.append(
block(batch_norm, self.in_planes, out_planes, 1, None, padding, dilation)
)
return nn.Sequential(*layers)
def _forward(self, x):
w_arr = []
for i in range(len(self.firstconv)):
x = self.firstconv[i](x)
w_arr.append(x)
for i in range(len(self.layer1)):
x = self.layer1[i](x)
w_arr.append(x)
output_2_1 = x
output_4_0 = self.layer2(output_2_1)
output_4_1 = self.layer3(output_4_0)
output_8 = self.layer4(output_4_1)
output_branch1 = self.branch1(output_8)
output_branch1 = F.interpolate(
output_branch1, (output_8.size()[2], output_8.size()[3]),
mode='bilinear', align_corners=True
)
output_branch2 = self.branch2(output_8)
output_branch2 = F.interpolate(
output_branch2, (output_8.size()[2], output_8.size()[3]),
mode='bilinear', align_corners=True
)
output_branch3 = self.branch3(output_8)
output_branch3 = F.interpolate(
output_branch3, (output_8.size()[2], output_8.size()[3]),
mode='bilinear', align_corners=True
)
output_branch4 = self.branch4(output_8)
output_branch4 = F.interpolate(
output_branch4, (output_8.size()[2], output_8.size()[3]),
mode='bilinear', align_corners=True
)
output_feature = torch.cat(
(output_4_0, output_8, output_branch4, output_branch3, output_branch2, output_branch1), 1)
output_feature = self.lastconv(output_feature)
return output_feature, w_arr
def forward(self, input):
fms, w_arr = self._forward(input)
return [fms, w_arr]
class FCPSMNetBackbone(nn.Module):
"""
Backbone proposed in PSMNet.
Args:
in_planes (int): the channels of input
batch_norm (bool): whether use batch normalization layer, default True
Inputs:
l_img (Tensor): left image, in [BatchSize, 3, Height, Width] layout
r_img (Tensor): right image, in [BatchSize, 3, Height, Width] layout
Outputs:
l_fms (Tensor): left image feature maps, in [BatchSize, 32, Height//4, Width//4] layout
r_fms (Tensor): right image feature maps, in [BatchSize, 32, Height//4, Width//4] layout
"""
def __init__(self, in_planes=3, batch_norm=True, m=0.999):
super(FCPSMNetBackbone, self).__init__()
self.in_planes = in_planes
self.m = m
print('m:{}'.format(m))
self.encoder_q = PSM_Encoder_Instance(in_planes, batch_norm)
self.encoder_k = PSM_Encoder_Instance(in_planes, batch_norm)
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data.copy_(param_q.data) # initialize
param_k.requires_grad = False # not update by gradient
@torch.no_grad()
def _momentum_update_key_encoder(self):
"""
Momentum update of the key encoder
"""
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data = param_k.data * self.m + param_q.data * (1. - self.m)
def forward(self, *input):
if len(input) != 2:
raise ValueError('expected input length 2 (got {} length input)'.format(len(input)))
l_img, r_img = input
l_fms, l_w_arr = self.encoder_q(l_img)
if self.training:
with torch.no_grad(): # no gradient to keys
self._momentum_update_key_encoder() # update the key encoder
r_fms, r_w_arr = self.encoder_k(r_img)
if isinstance(r_fms, list):
r_fms[0] = r_fms[0].detach()
else:
r_fms = r_fms.detach()
else:
r_fms, r_w_arr = self.encoder_q(r_img)
return [l_fms, l_w_arr], [r_fms, r_w_arr] | [
"torch.cat",
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.no_grad",
"torch.nn.Conv2d"
] | 1.7.1 | jiaw-z/FCStereo | f76c3317e0951986b49a3bb794028a8ae067d410 |
1.9 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# Copyright (c) 2019 Baidu.com, Inc. All Rights Reserved
#
################################################################################
"""
File: source/encoders/embedder.py
"""
import logging
import torch
import torch.nn as nn
logger = logging.getLogger(__name__)
class Embedder(nn.Embedding):
"""
Embedder
"""
def load_embeddings(self, embeds, scale=0.05):
"""
load_embeddings
"""
assert len(embeds) == self.num_embeddings
embeds = torch.tensor(embeds)
num_known = 0
for i in range(len(embeds)):
# If no pretrained embedding for this token, randomly generate one.
if len(embeds[i].nonzero()) == 0:
nn.init.uniform_(embeds[i], -scale, scale)
else:
num_known += 1
self.weight.data.copy_(embeds)
logger.info("{} words have pretrained embeddings"
" (coverage: {:.3f})".format(
num_known, num_known / self.num_embeddings))
| [
"torch.nn.init.uniform_",
"torch.tensor"
] | 1.9 | LinjianLi/Seq2Seq-PyTorch | 671bd10ac1a2620fb4d5ceaacdff9c0e9f4738a2 |
0.3 | """
Loss Utils.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
from . import geom_utils
import numpy as np
def mask_dt_loss(proj_verts, dist_transf):
"""
proj_verts: B x N x 2
(In normalized coordinate [-1, 1])
dist_transf: B x 1 x N x N
Computes the distance transform at the points where vertices land.
"""
# Reshape into B x 1 x N x 2
sample_grid = proj_verts.unsqueeze(1)
# B x 1 x 1 x N
dist_transf = torch.nn.functional.grid_sample(dist_transf, sample_grid, padding_mode='border')
return dist_transf.mean()
def texture_dt_loss(texture_flow, dist_transf, vis_rend=None, cams=None, verts=None, tex_pred=None):
"""
texture_flow: B x F x T x T x 2
(In normalized coordinate [-1, 1])
dist_transf: B x 1 x N x N
Similar to geom_utils.sample_textures
But instead of sampling image, it samples dt values.
"""
# Reshape into B x F x T*T x 2
T = texture_flow.size(-2)
F = texture_flow.size(1)
flow_grid = texture_flow.view(-1, F, T * T, 2)
# B x 1 x F x T*T
dist_transf = torch.nn.functional.grid_sample(dist_transf, flow_grid)
if vis_rend is not None:
# Visualize the error!
# B x 3 x F x T*T
dts = dist_transf.repeat(1, 3, 1, 1)
# B x 3 x F x T x T
dts = dts.view(-1, 3, F, T, T)
# B x F x T x T x 3
dts = dts.permute(0, 2, 3, 4, 1)
dts = dts.unsqueeze(4).repeat(1, 1, 1, 1, T, 1) / dts.max()
from ..utils import bird_vis
for i in range(dist_transf.size(0)):
rend_dt = vis_rend(verts[i], cams[i], dts[i])
rend_img = bird_vis.tensor2im(tex_pred[i].data)
import matplotlib.pyplot as plt
plt.ion()
fig=plt.figure(1)
plt.clf()
ax = fig.add_subplot(121)
ax.imshow(rend_dt)
ax = fig.add_subplot(122)
ax.imshow(rend_img)
import ipdb; ipdb.set_trace()
return dist_transf.mean()
def texture_loss(img_pred, img_gt, mask_pred, mask_gt):
"""
Input:
img_pred, img_gt: B x 3 x H x W
mask_pred, mask_gt: B x H x W
"""
mask_pred = mask_pred.unsqueeze(1)
mask_gt = mask_gt.unsqueeze(1)
# masked_rend = (img_pred * mask)[0].data.cpu().numpy()
# masked_gt = (img_gt * mask)[0].data.cpu().numpy()
# import matplotlib.pyplot as plt
# plt.ion()
# plt.figure(1)
# plt.clf()
# fig = plt.figure(1)
# ax = fig.add_subplot(121)
# ax.imshow(np.transpose(masked_rend, (1, 2, 0)))
# ax = fig.add_subplot(122)
# ax.imshow(np.transpose(masked_gt, (1, 2, 0)))
# import ipdb; ipdb.set_trace()
return torch.nn.L1Loss()(img_pred * mask_pred, img_gt * mask_gt)
def camera_loss(cam_pred, cam_gt, margin):
"""
cam_* are B x 7, [sc, tx, ty, quat]
Losses are in similar magnitude so one margin is ok.
"""
# CH: camera loss always 0
return 0
# CH: comment out old loss code
# rot_pred = cam_pred[:, -4:]
# rot_gt = cam_gt[:, -4:]
# rot_loss = hinge_loss(quat_loss_geodesic(rot_pred, rot_gt), margin)
# # Scale and trans.
# st_loss = (cam_pred[:, :3] - cam_gt[:, :3])**2
# st_loss = hinge_loss(st_loss.view(-1), margin)
# return rot_loss.mean() + st_loss.mean()
def hinge_loss(loss, margin):
# Only penalize if loss > margin
zeros = torch.autograd.Variable(torch.zeros(1).cuda(), requires_grad=False)
return torch.max(loss - margin, zeros)
def quat_loss_geodesic(q1, q2):
'''
Geodesic rotation loss.
Args:
q1: N X 4
q2: N X 4
Returns:
loss : N x 1
'''
q1 = torch.unsqueeze(q1, 1)
q2 = torch.unsqueeze(q2, 1)
q2_conj = torch.cat([ q2[:, :, [0]] , -1*q2[:, :, 1:4] ], dim=-1)
q_rel = geom_utils.hamilton_product(q1, q2_conj)
q_loss = 1 - torch.abs(q_rel[:, :, 0])
# we can also return q_loss*q_loss
return q_loss
def quat_loss(q1, q2):
'''
Anti-podal squared L2 loss.
Args:
q1: N X 4
q2: N X 4
Returns:
loss : N x 1
'''
q_diff_loss = (q1-q2).pow(2).sum(1)
q_sum_loss = (q1+q2).pow(2).sum(1)
q_loss, _ = torch.stack((q_diff_loss, q_sum_loss), dim=1).min(1)
return q_loss
def triangle_loss(verts, edge2verts):
"""
Encourages dihedral angle to be 180 degrees.
Args:
verts: B X N X 3
edge2verts: B X E X 4
Returns:
loss : scalar
"""
indices_repeat = torch.stack([edge2verts, edge2verts, edge2verts], dim=2) # B X E X 3 X 4
verts_A = torch.gather(verts, 1, indices_repeat[:, :, :, 0])
verts_B = torch.gather(verts, 1, indices_repeat[:, :, :, 1])
verts_C = torch.gather(verts, 1, indices_repeat[:, :, :, 2])
verts_D = torch.gather(verts, 1, indices_repeat[:, :, :, 3])
# n1 = cross(ad, ab)
# n2 = cross(ab, ac)
n1 = geom_utils.cross_product(verts_D - verts_A, verts_B - verts_A)
n2 = geom_utils.cross_product(verts_B - verts_A, verts_C - verts_A)
n1 = torch.nn.functional.normalize(n1, dim=2)
n2 = torch.nn.functional.normalize(n2, dim=2)
dot_p = (n1 * n2).sum(2)
loss = ((1 - dot_p)**2).mean()
return loss
def deform_l2reg(V):
"""
l2 norm on V = B x N x 3
"""
V = V.view(-1, V.size(2))
return torch.mean(torch.norm(V, p=2, dim=1))
def entropy_loss(A):
"""
Input is K x N
Each column is a prob of vertices being the one for k-th keypoint.
We want this to be sparse = low entropy.
"""
entropy = -torch.sum(A * torch.log(A), 1)
# Return avg entropy over
return torch.mean(entropy)
def kp_l2_loss(kp_pred, kp_gt):
"""
L2 loss between visible keypoints.
\Sum_i [0.5 * vis[i] * (kp_gt[i] - kp_pred[i])^2] / (|vis|)
"""
# CH: Do not consider keypoint loss
return 0
# criterion = torch.nn.MSELoss()
# vis = (kp_gt[:, :, 2, None] > 0).float()
# # This always has to be (output, target), not (target, output)
# return criterion(vis * kp_pred, vis * kp_gt[:, :, :2])
def lsgan_loss(score_real, score_fake):
"""
DELETE ME.
Label 0=fake, 1=real.
score_real is B x 1, score for real samples
score_fake is B x 1, score for fake samples
Returns loss for discriminator and encoder.
"""
disc_loss_real = torch.mean((score_real - 1)**2)
disc_loss_fake = torch.mean((score_fake)**2)
disc_loss = disc_loss_real + disc_loss_fake
enc_loss = torch.mean((score_fake - 1)**2)
return disc_loss, enc_loss
class EdgeLoss(object):
"""
Edge length should not diverge from the original edge length.
On initialization computes the current edge lengths.
"""
def __init__(self, verts, edges2verts, margin=2, use_bad_edge=False, use_l2=False):
# Input:
# verts: B x N x 3
# edeges2verts: B x E x 4
# (only using the first 2 columns)
self.use_l2 = use_l2
# B x E x 2
edge_verts = edges2verts[:, :, :2]
self.indices = torch.stack([edge_verts, edge_verts, edge_verts], dim=2)
V_copy = torch.autograd.Variable(verts.data, requires_grad=False)
if V_copy.dim() == 2:
# N x 3 (mean shape) -> B x N x 3
V_copy = V_copy.unsqueeze(0).repeat(edges2verts.size(0), 1, 1)
if use_bad_edge:
self.log_e0 = torch.log(self.compute_edgelength(V_copy))
else:
# e0 is the mean over all edge lengths!
e0 = self.compute_edgelength(V_copy).mean(1).view(-1, 1)
self.log_e0 = torch.log(e0)
self.margin = np.log(margin)
self.zeros = torch.autograd.Variable(torch.zeros(1).cuda(), requires_grad=False)
# For visualization
self.v1 = edges2verts[0, :, 0].data.cpu().numpy()
self.v2 = edges2verts[0, :, 1].data.cpu().numpy()
def __call__(self, verts):
e1 = self.compute_edgelength(verts)
if self.use_l2:
dist = (torch.log(e1) - self.log_e0)**2
self.dist = torch.max(dist - self.margin**2, self.zeros)
else:
dist = torch.abs(torch.log(e1) - self.log_e0)
self.dist = torch.max(dist - self.margin, self.zeros)
return self.dist.mean()
def compute_edgelength(self, V):
v1 = torch.gather(V, 1, self.indices[:, :, :, 0])
v2 = torch.gather(V, 1, self.indices[:, :, :, 1])
elengths = torch.sqrt(((v1 - v2)**2).sum(2))
# B x E
return elengths
def visualize(self, verts, F_np, mv=None):
from psbody.mesh import Mesh
V = verts[0].data.cpu().numpy()
mesh = Mesh(V, F_np)
dist = self.dist[0].data.cpu().numpy()
v_weights = np.zeros((V.shape[0]))
for e_id, (v1_id, v2_id) in enumerate(zip(self.v1, self.v2)):
v_weights[v1_id] += dist[e_id]
v_weights[v2_id] += dist[e_id]
mesh.set_vertex_colors_from_weights(v_weights)
if mv is not None:
mv.set_dynamic_meshes([mesh])
else:
mesh.show()
import ipdb; ipdb.set_trace()
class LaplacianLoss(object):
"""
Encourages minimal mean curvature shapes.
"""
def __init__(self, faces):
# Input:
# faces: B x F x 3
from ..nnutils.laplacian import Laplacian
# V x V
self.laplacian = Laplacian(faces)
self.Lx = None
def __call__(self, verts):
self.Lx = self.laplacian(verts)
# Reshape to BV x 3
Lx = self.Lx.view(-1, self.Lx.size(2))
loss = torch.norm(Lx, p=2, dim=1).mean()
return loss
def visualize(self, verts, mv=None):
# Visualizes the laplacian.
# Verts is B x N x 3 Variable
Lx = self.Lx[0].data.cpu().numpy()
V = verts[0].data.cpu().numpy()
from psbody.mesh import Mesh
F = self.laplacian.F_np[0]
mesh = Mesh(V, F)
weights = np.linalg.norm(Lx, axis=1)
mesh.set_vertex_colors_from_weights(weights)
if mv is not None:
mv.set_dynamic_meshes([mesh])
else:
mesh.show()
import ipdb; ipdb.set_trace()
class PerceptualTextureLoss(object):
def __init__(self):
from ..nnutils.perceptual_loss import PerceptualLoss
self.perceptual_loss = PerceptualLoss()
def __call__(self, img_pred, img_gt, mask_pred, mask_gt):
"""
Input:
img_pred, img_gt: B x 3 x H x W
mask_pred, mask_gt: B x H x W
"""
mask_pred = mask_pred.unsqueeze(1)
mask_gt = mask_gt.unsqueeze(1)
# masked_rend = (img_pred * mask_pred)[0].data.cpu().numpy()
# masked_gt = (img_gt * mask_gt)[0].data.cpu().numpy()
# import matplotlib.pyplot as plt
# plt.ion()
# plt.figure(1)
# plt.clf()
# fig = plt.figure(1)
# ax = fig.add_subplot(121)
# ax.imshow(np.transpose(masked_rend, (1, 2, 0)))
# ax = fig.add_subplot(122)
# ax.imshow(np.transpose(masked_gt, (1, 2, 0)))
# import ipdb; ipdb.set_trace()
# Only use mask_gt..
dist = self.perceptual_loss(img_pred * mask_gt, img_gt * mask_gt)
return dist.mean()
| [
"torch.cat",
"torch.stack",
"torch.gather",
"torch.autograd.Variable",
"torch.norm",
"torch.unsqueeze",
"torch.abs",
"torch.zeros",
"torch.max",
"torch.log",
"torch.nn.functional.normalize",
"torch.nn.L1Loss",
"torch.nn.functional.grid_sample",
"torch.mean"
] | 0.3.1 | MayankR/cmr | 6c898a5294954899334d430ec71e0a0692a0d99e |
0.3 | """
Mesh net model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import os
import os.path as osp
import numpy as np
import torch
import torchvision
import torch.nn as nn
from torch.autograd import Variable
from ..utils import mesh
from ..utils import geometry as geom_utils
from . import net_blocks as nb
#-------------- flags -------------#
#----------------------------------#
flags.DEFINE_boolean('symmetric', True, 'Use symmetric mesh or not')
flags.DEFINE_integer('nz_feat', 200, 'Encoded feature size')
flags.DEFINE_boolean('texture', True, 'if true uses texture!')
flags.DEFINE_boolean('symmetric_texture', True, 'if true texture is symmetric!')
flags.DEFINE_integer('tex_size', 6, 'Texture resolution per face')
flags.DEFINE_integer('subdivide', 3, '# to subdivide icosahedron, 3=642verts, 4=2562 verts')
flags.DEFINE_boolean('use_deconv', False, 'If true uses Deconv')
flags.DEFINE_string('upconv_mode', 'bilinear', 'upsample mode')
flags.DEFINE_boolean('only_mean_sym', False, 'If true, only the meanshape is symmetric')
flags.DEFINE_boolean('deeper_shape_predictor', False, 'Use 2 layer shape deformation predictor')
#------------- Modules ------------#
#----------------------------------#
class ResNetConv(nn.Module):
def __init__(self, n_blocks=4):
super(ResNetConv, self).__init__()
self.resnet = torchvision.models.resnet18(pretrained=True)
self.n_blocks = n_blocks
def forward(self, x):
n_blocks = self.n_blocks
x = self.resnet.conv1(x)
x = self.resnet.bn1(x)
x = self.resnet.relu(x)
x = self.resnet.maxpool(x)
if n_blocks >= 1:
x = self.resnet.layer1(x)
if n_blocks >= 2:
x = self.resnet.layer2(x)
if n_blocks >= 3:
x = self.resnet.layer3(x)
if n_blocks >= 4:
x = self.resnet.layer4(x)
return x
class Encoder(nn.Module):
"""
Current:
Resnet with 4 blocks (x32 spatial dim reduction)
Another conv with stride 2 (x64)
This is sent to 2 fc layers with final output nz_feat.
"""
def __init__(self, input_shape, n_blocks=4, nz_feat=100, batch_norm=True):
super(Encoder, self).__init__()
self.resnet_conv = ResNetConv(n_blocks=4)
self.enc_conv1 = nb.conv2d(batch_norm, 512, 256, stride=2, kernel_size=4)
nc_input = 256 * (input_shape[0] // 64) * (input_shape[1] // 64)
self.enc_fc = nb.fc_stack(nc_input, nz_feat, 2)
nb.net_init(self.enc_conv1)
def forward(self, img):
resnet_feat = self.resnet_conv.forward(img)
out_enc_conv1 = self.enc_conv1(resnet_feat)
out_enc_conv1 = out_enc_conv1.view(img.size(0), -1)
feat = self.enc_fc.forward(out_enc_conv1)
return feat
class TexturePredictorUV(nn.Module):
"""
Outputs mesh texture
"""
def __init__(self, nz_feat, uv_sampler, opts, img_H=64, img_W=128, n_upconv=5, nc_init=256, predict_flow=False, symmetric=False, num_sym_faces=624):
super(TexturePredictorUV, self).__init__()
self.feat_H = img_H // (2 ** n_upconv)
self.feat_W = img_W // (2 ** n_upconv)
self.nc_init = nc_init
self.symmetric = symmetric
self.num_sym_faces = num_sym_faces
self.F = uv_sampler.size(1)
self.T = uv_sampler.size(2)
self.predict_flow = predict_flow
# B x F x T x T x 2 --> B x F x T*T x 2
self.uv_sampler = uv_sampler.view(-1, self.F, self.T*self.T, 2)
self.enc = nb.fc_stack(nz_feat, self.nc_init*self.feat_H*self.feat_W, 2)
if predict_flow:
nc_final=2
else:
nc_final=3
self.decoder = nb.decoder2d(n_upconv, None, nc_init, init_fc=False, nc_final=nc_final, use_deconv=opts.use_deconv, upconv_mode=opts.upconv_mode)
def forward(self, feat):
# pdb.set_trace()
uvimage_pred = self.enc.forward(feat)
uvimage_pred = uvimage_pred.view(uvimage_pred.size(0), self.nc_init, self.feat_H, self.feat_W)
# B x 2 or 3 x H x W
self.uvimage_pred = self.decoder.forward(uvimage_pred)
self.uvimage_pred = torch.nn.functional.tanh(self.uvimage_pred)
tex_pred = torch.nn.functional.grid_sample(self.uvimage_pred, self.uv_sampler)
tex_pred = tex_pred.view(uvimage_pred.size(0), -1, self.F, self.T, self.T).permute(0, 2, 3, 4, 1)
if self.symmetric:
# Symmetrize.
tex_left = tex_pred[:, -self.num_sym_faces:]
return torch.cat([tex_pred, tex_left], 1)
else:
# Contiguous Needed after the permute..
return tex_pred.contiguous()
class ShapePredictor(nn.Module):
"""
Outputs mesh deformations
"""
def __init__(self, nz_feat, num_verts):
super(ShapePredictor, self).__init__()
# self.pred_layer = nb.fc(True, nz_feat, num_verts)
self.pred_layer = nn.Linear(nz_feat, num_verts * 3)
# Initialize pred_layer weights to be small so initial def aren't so big
self.pred_layer.weight.data.normal_(0, 0.0001)
def forward(self, feat):
# pdb.set_trace()
delta_v = self.pred_layer.forward(feat)
# Make it B x num_verts x 3
delta_v = delta_v.view(delta_v.size(0), -1, 3)
# print('shape: ( Mean = {}, Var = {} )'.format(delta_v.mean().data[0], delta_v.var().data[0]))
return delta_v
class DeeperShapePredictor(nn.Module):
"""
Outputs mesh deformations
"""
def __init__(self, nz_feat, num_verts):
super(DeeperShapePredictor, self).__init__()
# self.pred_layer = nb.fc(True, nz_feat, num_verts)
self.hidden_layer = nn.Linear(nz_feat, num_verts)
self.relu_act = nn.LeakyReLU(0.2,inplace=True)
self.pred_layer = nn.Linear(num_verts, num_verts * 3)
# Initialize pred_layer weights to be small so initial def aren't so big
self.pred_layer.weight.data.normal_(0, 0.0001)
def forward(self, feat):
# pdb.set_trace()
feat = self.hidden_layer.forward(feat)
feat = self.relu_act(feat)
delta_v = self.pred_layer.forward(feat)
# Make it B x num_verts x 3
delta_v = delta_v.view(delta_v.size(0), -1, 3)
# print('shape: ( Mean = {}, Var = {} )'.format(delta_v.mean().data[0], delta_v.var().data[0]))
return delta_v
class QuatPredictor(nn.Module):
def __init__(self, nz_feat, nz_rot=4, classify_rot=False):
super(QuatPredictor, self).__init__()
self.pred_layer = nn.Linear(nz_feat, nz_rot)
self.classify_rot = classify_rot
def forward(self, feat):
quat = self.pred_layer.forward(feat)
if self.classify_rot:
quat = torch.nn.functional.log_softmax(quat)
else:
quat = torch.nn.functional.normalize(quat)
return quat
class ScalePredictor(nn.Module):
def __init__(self, nz):
super(ScalePredictor, self).__init__()
self.pred_layer = nn.Linear(nz, 1)
def forward(self, feat):
scale = self.pred_layer.forward(feat) + 1 #biasing the scale to 1
scale = torch.nn.functional.relu(scale) + 1e-12
# print('scale: ( Mean = {}, Var = {} )'.format(scale.mean().data[0], scale.var().data[0]))
return scale
class TransPredictor(nn.Module):
"""
Outputs [tx, ty] or [tx, ty, tz]
"""
def __init__(self, nz, orth=True):
super(TransPredictor, self).__init__()
if orth:
self.pred_layer = nn.Linear(nz, 2)
else:
self.pred_layer = nn.Linear(nz, 3)
def forward(self, feat):
trans = self.pred_layer.forward(feat)
# print('trans: ( Mean = {}, Var = {} )'.format(trans.mean().data[0], trans.var().data[0]))
return trans
class CodePredictor(nn.Module):
def __init__(self, nz_feat=100, num_verts=1000, deeper_shape_predictor=False):
super(CodePredictor, self).__init__()
# self.quat_predictor = QuatPredictor(nz_feat)
if deeper_shape_predictor:
print("Using deeper shape predictor")
self.shape_predictor = DeeperShapePredictor(nz_feat, num_verts=num_verts)
else:
print("Using shallow shape predictor")
self.shape_predictor = ShapePredictor(nz_feat, num_verts=num_verts)
# self.scale_predictor = ScalePredictor(nz_feat)
# self.trans_predictor = TransPredictor(nz_feat)
def forward(self, feat):
shape_pred = self.shape_predictor.forward(feat)
scale_pred = 0 #self.scale_predictor.forward(feat)
quat_pred = 0 #self.quat_predictor.forward(feat)
trans_pred = 0 #self.trans_predictor.forward(feat)
return shape_pred, scale_pred, trans_pred, quat_pred
#------------ Mesh Net ------------#
#----------------------------------#
class MeshNet(nn.Module):
def __init__(self, input_shape, opts, nz_feat=100, num_kps=0, sfm_mean_shape=None):
# Input shape is H x W of the image.
super(MeshNet, self).__init__()
self.opts = opts
self.pred_texture = opts.texture
self.symmetric = opts.symmetric
self.symmetric_texture = opts.symmetric_texture
# Mean shape.
verts, faces = mesh.create_sphere(opts.subdivide)
num_verts = verts.shape[0]
if self.symmetric:
verts, faces, num_indept, num_sym, num_indept_faces, num_sym_faces = mesh.make_symmetric(verts, faces)
if sfm_mean_shape is not None:
verts = geom_utils.project_verts_on_mesh(verts, sfm_mean_shape[0], sfm_mean_shape[1])
num_sym_output = num_indept + num_sym
if opts.only_mean_sym:
print('Only the mean shape is symmetric!')
self.num_output = num_verts
else:
self.num_output = num_sym_output
self.num_sym = num_sym
self.num_indept = num_indept
self.num_indept_faces = num_indept_faces
self.num_sym_faces = num_sym_faces
# mean shape is only half.
self.mean_v = nn.Parameter(torch.Tensor(verts[:num_sym_output]))
# Needed for symmetrizing..
self.flip = Variable(torch.ones(1, 3).cuda(), requires_grad=False)
self.flip[0, 0] = -1
else:
if sfm_mean_shape is not None:
verts = geom_utils.project_verts_on_mesh(verts, sfm_mean_shape[0], sfm_mean_shape[1])
self.mean_v = nn.Parameter(torch.Tensor(verts))
self.num_output = num_verts
verts_np = verts
faces_np = faces
self.og_faces = faces
self.faces = Variable(torch.LongTensor(faces).cuda(), requires_grad=False)
self.edges2verts = mesh.compute_edges2verts(verts, faces)
# vert2kp_init = torch.Tensor(np.ones((num_kps, num_verts)) / float(num_verts))
# Remember initial vert2kp (after softmax)
# self.vert2kp_init = torch.nn.functional.softmax(Variable(vert2kp_init.cuda(), requires_grad=False), dim=1)
# self.vert2kp = nn.Parameter(vert2kp_init)
self.encoder = Encoder(input_shape, n_blocks=4, nz_feat=nz_feat)
self.code_predictor = CodePredictor(nz_feat=nz_feat, num_verts=self.num_output, deeper_shape_predictor=opts.deeper_shape_predictor)
if self.pred_texture:
if self.symmetric_texture:
num_faces = self.num_indept_faces + self.num_sym_faces
else:
num_faces = faces.shape[0]
uv_sampler = mesh.compute_uvsampler(verts_np, faces_np[:num_faces], tex_size=opts.tex_size)
# F' x T x T x 2
uv_sampler = Variable(torch.FloatTensor(uv_sampler).cuda(), requires_grad=False)
# B x F' x T x T x 2
uv_sampler = uv_sampler.unsqueeze(0).repeat(self.opts.batch_size, 1, 1, 1, 1)
img_H = int(2**np.floor(np.log2(np.sqrt(num_faces) * opts.tex_size)))
img_W = 2 * img_H
self.texture_predictor = TexturePredictorUV(
nz_feat, uv_sampler, opts, img_H=img_H, img_W=img_W, predict_flow=True, symmetric=opts.symmetric_texture, num_sym_faces=self.num_sym_faces)
nb.net_init(self.texture_predictor)
def forward(self, img):
img_feat = self.encoder.forward(img)
codes_pred = self.code_predictor.forward(img_feat)
if self.pred_texture:
texture_pred = self.texture_predictor.forward(img_feat)
return codes_pred, texture_pred
else:
return codes_pred
def symmetrize(self, V):
"""
Takes num_indept+num_sym verts and makes it
num_indept + num_sym + num_sym
Is identity if model is not symmetric
"""
if self.symmetric:
if V.dim() == 2:
# No batch
V_left = self.flip * V[-self.num_sym:]
return torch.cat([V, V_left], 0)
else:
# With batch
V_left = self.flip * V[:, -self.num_sym:]
return torch.cat([V, V_left], 1)
else:
return V
def get_mean_shape(self):
return self.symmetrize(self.mean_v)
| [
"torch.nn.Linear",
"torch.nn.functional.normalize",
"torch.cat",
"torch.nn.LeakyReLU",
"torch.FloatTensor",
"torch.nn.functional.log_softmax",
"torch.ones",
"torch.nn.functional.grid_sample",
"torch.LongTensor",
"torch.nn.functional.relu",
"torch.Tensor",
"torch.nn.functional.tanh"
] | 0.3.1 | MayankR/cmr | 6c898a5294954899334d430ec71e0a0692a0d99e |
1.5 | import torch
from torch import nn, einsum
import torch.nn.functional as F
import math
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
from siren.init import siren_uniform_
def sine_init(x):
siren_uniform_(x, mode='fan_in', c=6)
class Sine(nn.Module):
def __init__(self, w0 = 1.):
super().__init__()
self.w0 = w0
def forward(self, x):
return torch.sin(self.w0 * x)
class Siren(nn.Module):
def __init__(self, dim_in, dim_out, w0 = 1., c = 6., is_first = False, use_bias = True, activation = None):
super().__init__()
self.dim_in = dim_in
self.is_first = is_first
weight = torch.zeros(dim_out, dim_in)
bias = torch.zeros(dim_out) if use_bias else None
self.init_(weight, bias, c = c, w0 = w0)
self.weight = nn.Parameter(weight)
self.bias = nn.Parameter(bias) if use_bias else None
self.activation = Sine(w0) if activation is None else activation
def init_(self, weight, bias, c, w0):
dim = self.dim_in
w_std = (1 / dim) if self.is_first else (math.sqrt(c / dim) / w0)
weight.uniform_(-w_std, w_std)
if bias is not None:
bias.uniform_(-w_std, w_std)
def forward(self, x):
out = F.linear(x, self.weight, self.bias)
out = self.activation(out)
return out
class SinLayerClass(nn.Module):
def __init__(self, dim, hidden_dim, num_heads, dropout = 0.2):
super().__init__()
internal_state_dim = int(hidden_dim//2)
self.net = nn.Sequential(
Siren(dim, hidden_dim),
nn.Dropout(dropout),
nn.Linear(hidden_dim, internal_state_dim),
nn.GELU(),
nn.Linear(internal_state_dim, num_heads)
)
def forward(self, x):
return self.net(x)
class SinLayer(nn.Module):
def __init__(self, dim, hidden_dim, num_heads, dropout = 0.2):
super().__init__()
internal_state_dim = int(hidden_dim//2)
internal_state_dim2 = int(internal_state_dim//2)
self.net = nn.Sequential(
Siren(dim, hidden_dim),
nn.Dropout(dropout),
Siren(hidden_dim, internal_state_dim),
nn.Dropout(dropout),
nn.Linear(internal_state_dim, internal_state_dim2),
nn.GELU(),
nn.Linear(internal_state_dim2, num_heads)
)
def forward(self, x):
return self.net(x)
class MLP(nn.Module):
def __init__(self, dim, hidden_dim, num_heads, dropout = 0.):
super().__init__()
internal_state_dim = int(hidden_dim//2)
self.net = nn.Sequential(
nn.Linear(dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, internal_state_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(internal_state_dim, num_heads),
)
def forward(self, x):
return self.net(x) | [
"torch.zeros",
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.sin",
"torch.nn.Parameter",
"torch.nn.functional.linear",
"torch.nn.GELU"
] | 1.5.0 | cankocagil/TT-SRN | 83eb03a9393442e6b09aa736862b3a2d5bdcf5b6 |
1.1 |
from typing import List, Tuple, Any
import numpy as np
from collections import defaultdict
from e2cnn.gspaces import *
from e2cnn.nn import FieldType
from e2cnn.nn import GeometricTensor
from ..equivariant_module import EquivariantModule
import torch
from torch.nn import Parameter
__all__ = ["GatedNonLinearity1", "GATED_ID", "GATES_ID"]
GATED_ID = "gated"
GATES_ID = "gate"
class GatedNonLinearity1(EquivariantModule):
def __init__(self,
in_type: FieldType,
gates: List = None,
drop_gates: bool = True,
**kwargs
):
r"""
Gated non-linearities.
This module applies a bias and a sigmoid function of the gates fields and, then, multiplies each gated
field by one of the gates.
The input representation of the gated fields is preserved by this operation while the gate fields are
discarded.
The gates and the gated fields are provided in one unique input tensor and, therefore, :attr:`in_repr` should
be the representation of the fiber containing both gates and gated fields.
Moreover, the parameter :attr:`gates` needs to be set with a list long as the total number of fields,
containing in a position ``i`` the string ``"gate"`` if the ``i``-th field is a gate or the string ``"gated"``
if the ``i``-th field is a gated field. No other strings are allowed.
By default (``gates = None``), the first half of the fields is assumed to contain the gates (and, so, these
fields have to be trivial fields) while the second one is assumed to contain the gated fields.
In any case, the number of gates and the number of gated fields have to match (therefore, the number of
fields has to be an even number).
Args:
in_type (FieldType): the input field type
gates (list, optional): list of strings specifying which field in input is a gate and which is a gated field
drop_gates (bool, optional): if ``True`` (default), drop the trivial fields after using them to compute
the gates. If ``False``, the gates are stacked with the gated fields in the output
"""
assert isinstance(in_type.gspace, GeneralOnR2)
if gates is None:
assert len(in_type) % 2 == 0
g = len(in_type) // 2
gates = [GATES_ID]*g + [GATED_ID]*g
assert len(gates) == len(in_type)
super(GatedNonLinearity1, self).__init__()
self.space = in_type.gspace
self.in_type = in_type
self.drop_gates = drop_gates
self._contiguous = {}
_input_indices = defaultdict(lambda: [])
_output_indices = defaultdict(lambda: [])
self._nfields = defaultdict(int)
self.branching = None
for g, r in zip(gates, in_type.representations):
if g == GATES_ID:
# assert GATES_ID in r.supported_nonlinearities, \
assert r.is_trivial(), \
"Error! Representation \"{}\" can't be a \"gate\"".format(r.name)
elif g == GATED_ID:
assert GATED_ID in r.supported_nonlinearities, \
'Error! Representation "{}" does not support "gated" non-linearity'.format(r.name)
else:
raise ValueError('Error! "{}" type not recognized'.format(g))
ngates = len([g for g in gates if g == GATES_ID])
ngated = len([g for g in gates if g == GATED_ID])
assert ngates == ngated, \
'Error! Number of gates ({}) does not match the number of gated non-linearities required ({})' \
.format(ngates, ngated)
if self.drop_gates:
# only gated fields are preserved
# therefore, the output representation is computed from the input one, removing the gates
self.out_type = in_type.index_select([i for i, g in enumerate(gates) if g == GATED_ID])
else:
self.out_type = in_type
in_last_position = 0
out_last_position = 0
last_type = None
# group fields by their type (gated or gate) and their size, check if fields of the same type are
# contiguous and retrieve the indices of the fields
for g, r in zip(gates, in_type.representations):
if g == GATES_ID:
type = g
else:
type = r.size
self._nfields[r.size] += 1
if type != last_type:
if not type in self._contiguous:
self._contiguous[type] = True
else:
self._contiguous[type] = False
last_type = type
_input_indices[type] += list(range(in_last_position, in_last_position + r.size))
in_last_position += r.size
if g != GATES_ID or not self.drop_gates:
# since gates are discarded in output, the position on the output fiber is shifted
# only when a gated field is met
_output_indices[type] += list(range(out_last_position, out_last_position + r.size))
out_last_position += r.size
_input_indices = dict(_input_indices)
# if self.drop_gates:
_output_indices = dict(_output_indices)
# else:
# self._output_indices = self._input_indices
for t, contiguous in self._contiguous.items():
if contiguous:
# for contiguous fields, only the first and last indices are kept
_input_indices[t] = torch.LongTensor([min(_input_indices[t]), max(_input_indices[t]) + 1])
if t != GATES_ID or not self.drop_gates:
_output_indices[t] = torch.LongTensor([min(_output_indices[t]), max(_output_indices[t]) + 1])
else:
# otherwise, transform the list of indices into a tensor
_input_indices[t] = torch.LongTensor(_input_indices[t])
if t != GATES_ID or not self.drop_gates:
_output_indices[t] = torch.LongTensor(_output_indices[t])
# register the indices tensors as parameters of this module
self.register_buffer('input_indices_{}'.format(t), _input_indices[t])
if t != GATES_ID or not self.drop_gates:
self.register_buffer('output_indices_{}'.format(t), _output_indices[t])
# gates need to be distinguished from gated fields
_gates_indices = _input_indices.pop(GATES_ID)
self.register_buffer('gates_indices', _gates_indices)
# build a sorted list of the fields groups, such that every time they are iterated through in the same order
self._order = sorted(_input_indices.keys())
# the bias for the gates
self.bias = Parameter(torch.randn(1, ngates, 1, 1, dtype=torch.float), requires_grad=True)
def forward(self, input: GeometricTensor) -> GeometricTensor:
r"""
Apply the gated non-linearity to the input feature map.
Args:
input (GeometricTensor): the input feature map
Returns:
the resulting feature map
"""
assert isinstance(input, GeometricTensor)
assert input.type == self.in_type
# retrieve the gates
if self._contiguous[GATES_ID]:
gates = input.tensor[:, self.gates_indices[0]:self.gates_indices[1], ...]
else:
gates = input.tensor[:, self.gates_indices, ...]
# retrieving only gated fields from the joint tensor is worthless
input = input.tensor
# transform the gates
gates = torch.sigmoid(gates - self.bias)
b, c, h, w = input.shape
# build the output tensor
output = torch.empty(b, self.out_type.size, h, w, dtype=torch.float, device=self.bias.device)
if not self.drop_gates:
# copy the gates in the output
if self._contiguous[GATES_ID]:
output[:, self.gates_indices[0]:self.gates_indices[1], ...] = gates
else:
output[:, self.gates_indices, ...] = gates
next_gate = 0
# for each field size
for size in self._order:
# retrieve the needed gates
g = gates[:, next_gate:next_gate + self._nfields[size], ...].view(b, -1, 1, h, w)
input_indices = getattr(self, f"input_indices_{size}")
output_indices = getattr(self, f"output_indices_{size}")
if self._contiguous[size]:
# if the fields were contiguous, we can use slicing
output[:, output_indices[0]:output_indices[1], ...] =\
(
input[:, input_indices[0]:input_indices[1], ...]
.view(b, -1, size, h, w)
* g
).view(b, -1, h, w)
else:
# otherwise we have to use indexing
output[:, output_indices, :, :] = \
(
input[:, input_indices, ...]
.view(b, -1, size, h, w)
* g
).view(b, -1, h, w)
# shift the position on the gates fiber
next_gate += self._nfields[size]
# wrap the result in a GeometricTensor
return GeometricTensor(output, self.out_type)
def evaluate_output_shape(self, input_shape: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]:
assert len(input_shape) == 4
assert input_shape[1] == self.in_type.size
b, c, hi, wi = input_shape
return b, self.out_type.size, hi, wi
def check_equivariance(self, atol: float = 1e-6, rtol: float = 1e-5) -> List[Tuple[Any, float]]:
c = self.in_type.size
x = torch.randn(3, c, 10, 10)
x = GeometricTensor(x, self.in_type)
errors = []
for el in self.space.testing_elements:
out1 = self(x).transform_fibers(el)
out2 = self(x.transform_fibers(el))
errs = (out1.tensor - out2.tensor).detach().numpy()
errs = np.abs(errs).reshape(-1)
print(el, errs.max(), errs.mean(), errs.var())
assert torch.allclose(out1.tensor, out2.tensor, atol=atol, rtol=rtol), \
'The error found during equivariance check with element "{}" is too high: max = {}, mean = {} var ={}' \
.format(el, errs.max(), errs.mean(), errs.var())
errors.append((el, errs.mean()))
return errors
| [
"torch.sigmoid",
"torch.LongTensor",
"torch.allclose",
"torch.empty",
"torch.randn"
] | 1.1 | steven-lang/e2cnn | 48f49760766ec958b52d0dd7b02483886dfa2096 |
1.0 | # coding=utf-8
# Copyright 2020 Google and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.configuration_pegasus import DEFAULTS, task_specific_params
PATTERNS = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def rename_state_dict_key(k):
for pegasus_name, hf_name in PATTERNS:
k = k.replace(pegasus_name, hf_name)
return k
# See appendix C of paper for all hyperparams
def convert_pegasus(tf_weights: dict, cfg_updates: dict) -> PegasusForConditionalGeneration:
cfg_kwargs = DEFAULTS.copy()
cfg_kwargs.update(cfg_updates)
cfg = PegasusConfig(**cfg_kwargs)
torch_model = PegasusForConditionalGeneration(cfg)
sd = torch_model.model.state_dict()
mapping = {}
for k, v in tf_weights.items():
new_k = rename_state_dict_key(k)
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})")
if "dense" in k or "proj" in new_k:
v = v.T
mapping[new_k] = torch.tensor(v, dtype=sd[new_k].dtype)
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
mapping["shared.weight"][cfg.pad_token_id] = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1])
mapping["encoder.embed_tokens.weight"] = mapping["shared.weight"]
mapping["decoder.embed_tokens.weight"] = mapping["shared.weight"]
empty_biases = {k: torch.zeros_like(v) for k, v in sd.items() if k.endswith("bias") and k not in mapping}
mapping.update(**empty_biases)
missing, extra = torch_model.model.load_state_dict(mapping, strict=False)
unexpected_missing = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def get_tf_weights_as_numpy(path="./ckpt/aeslc/model.ckpt-32000") -> Dict:
init_vars = tf.train.list_variables(path)
tf_weights = {}
ignore_name = ["Adafactor", "global_step"]
for name, shape in tqdm(init_vars, desc="converting tf checkpoint to dict"):
skip_key = any([pat in name for pat in ignore_name])
if skip_key:
continue
array = tf.train.load_variable(path, name)
tf_weights[name] = array
return tf_weights
def convert_pegasus_ckpt_to_pytorch(ckpt_path: str, save_dir: str):
# save tokenizer first
dataset = Path(ckpt_path).parent.name
desired_max_model_length = task_specific_params[f"summarization_{dataset}"]["max_position_embeddings"]
tok = PegasusTokenizer.from_pretrained("sshleifer/pegasus", model_max_length=desired_max_model_length)
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(save_dir)
# convert model
tf_weights = get_tf_weights_as_numpy(ckpt_path)
cfg_updates = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
cfg_updates["task_specific_params"] = task_specific_params
torch_model = convert_pegasus(tf_weights, cfg_updates)
torch_model.save_pretrained(save_dir)
sd = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight")
sd.pop("model.encoder.embed_positions.weight")
torch.save(sd, Path(save_dir) / "pytorch_model.bin")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
args = parser.parse_args()
if args.save_dir is None:
dataset = Path(args.tf_ckpt_path).parent.name
args.save_dir = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| [
"torch.zeros_like",
"torch.tensor"
] | 1.0 | suliuzh/transformers | f34372a9ff99f6bc8619ac83dc07f7afe6b92141 |
1.0 | # coding=utf-8
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright (c) HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch MMBT model. """
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss, MSELoss
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable, replace_return_docstrings
from .modeling_outputs import BaseModelOutputWithPooling, SequenceClassifierOutput
from .modeling_utils import ModuleUtilsMixin
from .utils import logging
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "MMBTConfig"
class ModalEmbeddings(nn.Module):
"""Generic Modal Embeddings which takes in an encoder, and a transformer embedding."""
def __init__(self, config, encoder, embeddings):
super().__init__()
self.config = config
self.encoder = encoder
self.proj_embeddings = nn.Linear(config.modal_hidden_size, config.hidden_size)
self.position_embeddings = embeddings.position_embeddings
self.token_type_embeddings = embeddings.token_type_embeddings
self.word_embeddings = embeddings.word_embeddings
self.LayerNorm = embeddings.LayerNorm
self.dropout = nn.Dropout(p=config.hidden_dropout_prob)
def forward(self, input_modal, start_token=None, end_token=None, position_ids=None, token_type_ids=None):
token_embeddings = self.proj_embeddings(self.encoder(input_modal))
seq_length = token_embeddings.size(1)
if start_token is not None:
start_token_embeds = self.word_embeddings(start_token)
seq_length += 1
token_embeddings = torch.cat([start_token_embeds.unsqueeze(1), token_embeddings], dim=1)
if end_token is not None:
end_token_embeds = self.word_embeddings(end_token)
seq_length += 1
token_embeddings = torch.cat([token_embeddings, end_token_embeds.unsqueeze(1)], dim=1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_modal.device)
position_ids = position_ids.unsqueeze(0).expand(input_modal.size(0), seq_length)
if token_type_ids is None:
token_type_ids = torch.zeros(
(input_modal.size(0), seq_length), dtype=torch.long, device=input_modal.device
)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = token_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
MMBT_START_DOCSTRING = r"""
MMBT model was proposed in
`Supervised Multimodal Bitransformers for Classifying Images and Text <https://github.com/facebookresearch/mmbt>`__
by Douwe Kiela, Suvrat Bhooshan, Hamed Firooz, Davide Testuggine.
It's a supervised multimodal bitransformer model that fuses information from text and other image encoders,
and obtain state-of-the-art performance on various multimodal classification benchmark tasks.
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.MMBTConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
transformer (:class: `~nn.Module`): A text transformer that is used by MMBT.
It should have embeddings, encoder, and pooler attributes.
encoder (:class: `~nn.Module`): Encoder for the second modality.
It should take in a batch of modal inputs and return k, n dimension embeddings.
"""
MMBT_INPUTS_DOCSTRING = r"""
Args:
input_modal (``torch.FloatTensor`` of shape ``(batch_size, ***)``):
The other modality data. It will be the shape that the encoder for that type expects.
e.g. With an Image Encoder, the shape would be (batch_size, channels, height, width)
input_ids (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``):
Indices of input sequence tokens in the vocabulary.
It does not expect [CLS] token to be added as it's appended to the end of other modality embeddings.
Indices can be obtained using :class:`~transformers.BertTokenizer`.
See :meth:`transformers.PreTrainedTokenizer.encode` and
:meth:`transformers.PreTrainedTokenizer.__call__` for details.
`What are input IDs? <../glossary.html#input-ids>`__
modal_start_tokens (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):
Optional start token to be added to Other Modality Embedding. [CLS] Most commonly used for classification
tasks.
modal_end_tokens (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):
Optional end token to be added to Other Modality Embedding. [SEP] Most commonly used.
attention_mask (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
modal_token_type_ids (`optional`) ``torch.LongTensor`` of shape ``(batch_size, modal_sequence_length)``:
Segment token indices to indicate different portions of the non-text modality.
The embeddings from these tokens will be summed with the respective token embeddings for the non-text modality.
position_ids (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`__
modal_position_ids (``torch.LongTensor`` of shape ``(batch_size, modal_sequence_length)``, `optional`):
Indices of positions of each input sequence tokens in the position embeddings for the non-text modality.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`__
head_mask (``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``, `optional`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (``torch.FloatTensor`` of shape ``(batch_size, sequence_length, embedding_dim)``, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
encoder_hidden_states (``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask
is used in the cross-attention if the model is configured as a decoder.
Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare MMBT Model outputting raw hidden-states without any specific head on top.",
MMBT_START_DOCSTRING,
)
class MMBTModel(nn.Module, ModuleUtilsMixin):
def __init__(self, config, transformer, encoder):
super().__init__()
self.config = config
self.transformer = transformer
self.modal_encoder = ModalEmbeddings(config, encoder, transformer.embeddings)
@add_start_docstrings_to_callable(MMBT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_modal,
input_ids=None,
modal_start_tokens=None,
modal_end_tokens=None,
attention_mask=None,
token_type_ids=None,
modal_token_type_ids=None,
position_ids=None,
modal_position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Returns:
Examples::
# For example purposes. Not runnable.
transformer = BertModel.from_pretrained('bert-base-uncased')
encoder = ImageEncoder(args)
mmbt = MMBTModel(config, transformer, encoder)
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_txt_shape = input_ids.size()
elif inputs_embeds is not None:
input_txt_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
modal_embeddings = self.modal_encoder(
input_modal,
start_token=modal_start_tokens,
end_token=modal_end_tokens,
position_ids=modal_position_ids,
token_type_ids=modal_token_type_ids,
)
input_modal_shape = modal_embeddings.size()[:-1]
if token_type_ids is None:
token_type_ids = torch.ones(input_txt_shape, dtype=torch.long, device=device)
txt_embeddings = self.transformer.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
embedding_output = torch.cat([modal_embeddings, txt_embeddings], 1)
input_shape = embedding_output.size()[:-1]
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
else:
attention_mask = torch.cat(
[torch.ones(input_modal_shape, device=device, dtype=torch.long), attention_mask], dim=1
)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(input_shape, device=device)
else:
encoder_attention_mask = torch.cat(
[torch.ones(input_modal_shape, device=device), encoder_attention_mask], dim=1
)
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, self.device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
encoder_outputs = self.transformer.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.transformer.pooler(sequence_output)
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
@add_start_docstrings(
"""MMBT Model with a sequence classification/regression head on top (a linear layer on top of
the pooled output)""",
MMBT_START_DOCSTRING,
MMBT_INPUTS_DOCSTRING,
)
class MMBTForClassification(nn.Module):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification (or regression if config.num_labels==1) loss.
**logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
Classification (or regression if config.num_labels==1) scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
# For example purposes. Not runnable.
transformer = BertModel.from_pretrained('bert-base-uncased')
encoder = ImageEncoder(args)
model = MMBTForClassification(config, transformer, encoder)
outputs = model(input_modal, input_ids, labels=labels)
loss, logits = outputs[:2]
"""
def __init__(self, config, transformer, encoder):
super().__init__()
self.num_labels = config.num_labels
self.mmbt = MMBTModel(config, transformer, encoder)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
def forward(
self,
input_modal,
input_ids=None,
modal_start_tokens=None,
modal_end_tokens=None,
attention_mask=None,
token_type_ids=None,
modal_token_type_ids=None,
position_ids=None,
modal_position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.mmbt(
input_modal=input_modal,
input_ids=input_ids,
modal_start_tokens=modal_start_tokens,
modal_end_tokens=modal_end_tokens,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
modal_token_type_ids=modal_token_type_ids,
position_ids=position_ids,
modal_position_ids=modal_position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.cat",
"torch.nn.MSELoss",
"torch.arange",
"torch.ones",
"torch.nn.CrossEntropyLoss"
] | 1.0 | suliuzh/transformers | f34372a9ff99f6bc8619ac83dc07f7afe6b92141 |
1.0 | # as due to their complexity multi-gpu tests could impact other tests, and to aid debug we have those in a separate module.
import logging
import os
import sys
from pathlib import Path
import pytest
import torch
from transformers.testing_utils import TestCasePlus, require_torch_multigpu
from .utils import load_json
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
CUDA_AVAILABLE = torch.cuda.is_available()
CHEAP_ARGS = {
"max_tokens_per_batch": None,
"supervise_forward": True,
"normalize_hidden": True,
"label_smoothing": 0.2,
"eval_max_gen_length": None,
"eval_beams": 1,
"val_metric": "loss",
"save_top_k": 1,
"adafactor": True,
"early_stopping_patience": 2,
"logger_name": "default",
"length_penalty": 0.5,
"cache_dir": "",
"task": "summarization",
"num_workers": 2,
"alpha_hid": 0,
"freeze_embeds": True,
"enc_only": False,
"tgt_suffix": "",
"resume_from_checkpoint": None,
"sortish_sampler": True,
"student_decoder_layers": 1,
"val_check_interval": 1.0,
"output_dir": "",
"fp16": False, # TODO(SS): set this to CUDA_AVAILABLE if ci installs apex or start using native amp
"no_teacher": False,
"fp16_opt_level": "O1",
"gpus": 1 if CUDA_AVAILABLE else 0,
"n_tpu_cores": 0,
"max_grad_norm": 1.0,
"do_train": True,
"do_predict": True,
"accumulate_grad_batches": 1,
"server_ip": "",
"server_port": "",
"seed": 42,
"model_name_or_path": "sshleifer/bart-tiny-random",
"config_name": "",
"tokenizer_name": "facebook/bart-large",
"do_lower_case": False,
"learning_rate": 0.3,
"lr_scheduler": "linear",
"weight_decay": 0.0,
"adam_epsilon": 1e-08,
"warmup_steps": 0,
"max_epochs": 1,
"train_batch_size": 2,
"eval_batch_size": 2,
"max_source_length": 12,
"max_target_length": 12,
"val_max_target_length": 12,
"test_max_target_length": 12,
"fast_dev_run": False,
"no_cache": False,
"n_train": -1,
"n_val": -1,
"n_test": -1,
"student_encoder_layers": 1,
"freeze_encoder": False,
"auto_scale_batch_size": False,
}
def _dump_articles(path: Path, articles: list):
content = "\n".join(articles)
Path(path).open("w").writelines(content)
ARTICLES = [" Sam ate lunch today.", "Sams lunch ingredients."]
SUMMARIES = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"]
T5_TINY = "patrickvonplaten/t5-tiny-random"
BART_TINY = "sshleifer/bart-tiny-random"
MBART_TINY = "sshleifer/tiny-mbart"
MARIAN_TINY = "sshleifer/tiny-marian-en-de"
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
def make_test_data_dir(tmp_dir):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(tmp_dir, f"{split}.source"), ARTICLES)
_dump_articles(os.path.join(tmp_dir, f"{split}.target"), SUMMARIES)
return tmp_dir
# XXX: a candidate for testing_utils (python>=3.6)
# https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa
class RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> RunOutput:
if echo:
print(cmd)
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout)),
_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:")),
],
timeout=timeout,
)
# XXX: warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch s/wait/communicate/ - so perhaps for debug we will enable
# `wait` as it's easier to see in real time, but for normal runs use `communicate`
return RunOutput(await p.wait(), out, err)
def execute_async_std(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
)
return result
class TestSummarizationDistillerMultiGPU(TestCasePlus):
@classmethod
def setUpClass(cls):
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
return cls
@require_torch_multigpu
def test_multigpu(self):
updates = dict(
no_teacher=True,
freeze_encoder=True,
gpus=2,
overwrite_output_dir=True,
sortish_sampler=True,
)
self._test_distiller_cli_fork(updates, check_contents=False)
def _test_distiller_cli_fork(self, updates, check_contents=True):
default_updates = dict(
label_smoothing=0.0,
early_stopping_patience=-1,
train_batch_size=1,
eval_batch_size=2,
max_epochs=2,
alpha_mlm=0.2,
alpha_ce=0.8,
do_predict=True,
model_name_or_path="sshleifer/tinier_bart",
teacher=CHEAP_ARGS["model_name_or_path"],
val_check_interval=0.5,
)
default_updates.update(updates)
args_d: dict = CHEAP_ARGS.copy()
tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
output_dir = self.get_auto_remove_tmp_dir()
args_d.update(data_dir=tmp_dir, output_dir=output_dir, **default_updates)
def convert(k, v):
if k in ["tgt_suffix", "server_ip", "server_port", "out", "n_tpu_cores"]:
return ""
if v is False or v is None:
return ""
if v is True: # or len(str(v))==0:
return f"--{k}"
return f"--{k}={v}"
cli_args = [x for x in (convert(k, v) for k, v in args_d.items()) if len(x)]
cmd = [sys.executable, "./examples/seq2seq/distillation.py"] + cli_args
print("\nRunning: ", " ".join(cmd))
path = Path(__file__).resolve()
examples_path = path.parents[1]
src_path = f"{path.parents[2]}/src"
env = os.environ.copy()
env["PYTHONPATH"] = f"{examples_path}:{src_path}:{env.get('PYTHONPATH', '')}"
result = execute_async_std(cmd, env=env, stdin=None, timeout=180, quiet=False, echo=False)
assert result.stdout, "produced no output"
if result.returncode > 0:
pytest.fail(f"failed with returncode {result.returncode}")
contents = os.listdir(output_dir)
contents = {os.path.basename(p) for p in contents}
ckpt_files = [p for p in contents if p.endswith("ckpt")]
assert len(ckpt_files) > 0
self.assertIn("test_generations.txt", contents)
self.assertIn("test_results.txt", contents)
# get the following from the module, (we don't have access to `model` here)
metrics_save_path = os.path.join(output_dir, "metrics.json")
val_metric = "rouge2"
metrics = load_json(metrics_save_path)
# {'test': [{'test_avg_loss': 10.63731575012207, 'test_avg_rouge1': 0.0, 'test_avg_rouge2': 0.0, 'test_avg_rougeL': 0.0, 'test_avg_gen_time': 0.1822289228439331, 'test_avg_gen_len': 142.0, 'step_count': 1}]}
print(metrics)
last_step_stats = metrics["val"][-1]
self.assertGreaterEqual(last_step_stats["val_avg_gen_time"], 0.01)
self.assertGreaterEqual(1.0, last_step_stats["val_avg_gen_time"])
self.assertIsInstance(last_step_stats[f"val_avg_{val_metric}"], float)
self.assertEqual(len(metrics["test"]), 1)
desired_n_evals = int(args_d["max_epochs"] * (1 / args_d["val_check_interval"]) / 2 + 1)
self.assertEqual(len(metrics["val"]), desired_n_evals)
| [
"torch.cuda.is_available"
] | 1.0 | suliuzh/transformers | f34372a9ff99f6bc8619ac83dc07f7afe6b92141 |
1.7 | import torch
import torch.nn as nn
from time import time
import numpy as np
from models.pytorch_revgrad import RevGrad
class DoubleConvBN(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, kernel_size, dropout):
super().__init__()
self.conv1 = nn.Conv2d(
in_channels, out_channels, kernel_size=kernel_size, padding=int(kernel_size / 2)
)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=1, padding=0)
self.bn2 = nn.BatchNorm2d(out_channels)
self.conv3 = nn.Conv2d(
out_channels, out_channels, kernel_size=kernel_size, padding=int(kernel_size / 2)
)
self.bn3 = nn.BatchNorm2d(out_channels)
self.drop1 = nn.Dropout(dropout)
self.drop2 = nn.Dropout(dropout)
self.drop3 = nn.Dropout(dropout)
def forward(self, x):
x = self.bn1(torch.relu(self.conv1(x)))
x = x = self.drop1(x)
identity_full = x
x = self.bn2(torch.relu(self.conv2(x)))
x = self.drop2(x)
x += identity_full
identity_1 = x
x = self.bn3(torch.relu(self.conv3(x)))
x = x = self.drop3(x)
x += identity_full
x += identity_1
return x
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, kernel_size, dropout):
super().__init__()
self.conv1 = nn.Conv2d(
in_channels, out_channels, kernel_size=kernel_size, padding=int(kernel_size / 2)
)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=1, padding=0)
self.conv3 = nn.Conv2d(
out_channels, out_channels, kernel_size=kernel_size, padding=int(kernel_size / 2)
)
self.drop1 = nn.Dropout(dropout)
self.drop2 = nn.Dropout(dropout)
self.drop3 = nn.Dropout(dropout)
def forward(self, x):
x = self.drop1(torch.relu(self.conv1(x)))
identity_full = x
x = self.drop2(torch.relu(self.conv2(x)))
x += identity_full
identity_1 = x
x = self.drop3(torch.relu(self.conv3(x)))
x += identity_full
x += identity_1
return x
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels, kernel_size, dropout):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2), DoubleConvBN(in_channels, out_channels, kernel_size, dropout)
)
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels, kernel_size, dropout, bilinear=False):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(scale_factor=2)
self.conv = DoubleConv(in_channels, out_channels, kernel_size, dropout) # , in_channels // 2)
else:
self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels, kernel_size, dropout)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = torch.nn.functional.pad(x1, [diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2])
# if you have padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
class UNet(nn.Module):
def __init__(self, hparams, bilinear=False):
super(UNet, self).__init__()
self.hparams = hparams
self.n_channels = self.hparams['in_channels']
self.n_classes = self.hparams['n_classes']
self.bilinear = bilinear
factor = 2 if bilinear else 1
self.inc = DoubleConv(
self.n_channels,
self.hparams['n_filters_input'],
self.hparams['kernel_size'],
self.hparams['dropout_rate'],
)
self.down1 = Down(
self.hparams['n_filters_input'],
self.hparams['n_filters_input'] * 2,
self.hparams['kernel_size'],
self.hparams['dropout_rate'],
)
self.down2 = Down(
self.hparams['n_filters_input'] * 2,
self.hparams['n_filters_input'] * 4,
self.hparams['kernel_size'],
self.hparams['dropout_rate'],
)
self.down3 = Down(
self.hparams['n_filters_input'] * 4,
self.hparams['n_filters_input'] * 8,
self.hparams['kernel_size'],
self.hparams['dropout_rate'],
)
self.down4 = Down(
self.hparams['n_filters_input'] * 8,
self.hparams['n_filters_input'] * 16 // factor,
self.hparams['kernel_size'],
self.hparams['dropout_rate'],
)
self.down5 = Down(
self.hparams['n_filters_input'] * 16,
self.hparams['n_filters_input'] * 32 // factor,
self.hparams['kernel_size'],
self.hparams['dropout_rate'],
)
self.up1 = Up(
self.hparams['n_filters_input'] * 32,
self.hparams['n_filters_input'] * 16 // factor,
self.hparams['kernel_size'],
self.hparams['dropout_rate'],
bilinear,
)
self.up2 = Up(
self.hparams['n_filters_input'] * 16,
self.hparams['n_filters_input'] * 8 // factor,
self.hparams['kernel_size'],
self.hparams['dropout_rate'],
bilinear,
)
self.up3 = Up(
self.hparams['n_filters_input'] * 8,
self.hparams['n_filters_input'] * 4 // factor,
self.hparams['kernel_size'],
self.hparams['dropout_rate'],
bilinear,
)
self.up4 = Up(
self.hparams['n_filters_input'] * 4,
self.hparams['n_filters_input'] * 2,
self.hparams['kernel_size'],
self.hparams['dropout_rate'],
bilinear,
)
self.up5 = Up(
self.hparams['n_filters_input'] * 2,
self.hparams['n_filters_input'],
self.hparams['kernel_size'],
self.hparams['dropout_rate'],
bilinear,
)
self.outc = OutConv(self.hparams['n_filters_input'], self.n_classes)
# gradient reversal layer
self.rever1_6 = RevGrad()
self.rever1_7 = RevGrad()
self.rever2_6 = RevGrad()
self.rever2_7 = RevGrad()
n_filt = self.hparams['n_filters_input'] * (2 ** 5) * 4
self.adv_fc1 = nn.Linear(n_filt, 300)
self.adv_fc2 = nn.Linear(300, 300)
self.adv_fc3 = nn.Linear(300, 300)
self.adv_fc4 = nn.Linear(300, 1)
def forward(self, x1, x2=None, train=False):
if train:
# main head (predictive)
out, decoder_x = self.predictive_network(x1)
# additional head (adversarial)
out_s = self.adversarial_network(decoder_x, x2)
return out, out_s
else:
# main head (predictive)
out, _ = self.predictive_network(x1)
return out
def encoder(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x6 = self.down5(x5)
return x1, x2, x3, x4, x5, x6
def decoder(self, x1, x2, x3, x4, x5, x6):
# x = self.up1(x5, x4)
# x = self.up2(x, x3)
# x = self.up3(x, x2)
# x = self.up4(x, x1)
x = self.up1(x6, x5)
x = self.up2(x, x4)
x = self.up3(x, x3)
x = self.up4(x, x2)
x = self.up5(x, x1)
return x
def adversarial_network(self, x, x_s):
x1, x2, x3, x4, x5, x6 = self.encoder(x_s)
# x_s = self.decoder(x1, x2, x3, x4, x5)
x6_s = self.rever1_6(x6).mean(dim=2).mean(dim=2)
x7_s = self.rever1_6(x6).std(dim=2).std(dim=2)
x6_p = self.rever2_6(x[5]).mean(dim=2).mean(dim=2)
x7_p = self.rever2_6(x[5]).std(dim=(2)).std(dim=2)
x = torch.cat([x6_s, x7_s, x6_p, x7_p], dim=1)
x = torch.relu(self.adv_fc1(x))
# x = torch.relu(self.adv_fc2(x))
# x = torch.relu(self.adv_fc3(x))
x = torch.sigmoid(self.adv_fc4(x))
return x
def predictive_network(self, x):
x1, x2, x3, x4, x5, x6 = self.encoder(x)
x = self.decoder(x1, x2, x3, x4, x5, x6)
logits = self.outc(x)
logits = torch.nn.functional.softmax(logits, dim=1)
return logits, [x1, x2, x3, x4, x5, x6]
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.Dropout",
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ConvTranspose2d",
"torch.nn.Upsample",
"torch.nn.Conv2d",
"torch.nn.functional.softmax",
"torch.nn.functional.pad"
] | 1.7.0 | neurips2021vat/Variance-Aware-Training | 2dcd017ef06e81e299448bdd9da65fa682835127 |
1.7 | import torch
import torch.nn as nn
from time import time
import numpy as np
class DoubleConvBN(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, kernel_size, dropout):
super().__init__()
self.conv1 = nn.Conv2d(
in_channels, out_channels, kernel_size=kernel_size, padding=int(kernel_size / 2)
)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=1, padding=0)
self.bn2 = nn.BatchNorm2d(out_channels)
self.conv3 = nn.Conv2d(
out_channels, out_channels, kernel_size=kernel_size, padding=int(kernel_size / 2)
)
self.bn3 = nn.BatchNorm2d(out_channels)
self.drop1 = nn.Dropout(dropout)
self.drop2 = nn.Dropout(dropout)
self.drop3 = nn.Dropout(dropout)
def forward(self, x):
x = self.bn1(torch.relu(self.conv1(x)))
x = x = self.drop1(x)
identity_full = x
x = self.bn2(torch.relu(self.conv2(x)))
x = self.drop2(x)
x += identity_full
identity_1 = x
x = self.bn3(torch.relu(self.conv3(x)))
x = x = self.drop3(x)
x += identity_full
x += identity_1
return x
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, kernel_size, dropout):
super().__init__()
self.conv1 = nn.Conv2d(
in_channels, out_channels, kernel_size=kernel_size, padding=int(kernel_size / 2)
)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=1, padding=0)
self.conv3 = nn.Conv2d(
out_channels, out_channels, kernel_size=kernel_size, padding=int(kernel_size / 2)
)
self.drop1 = nn.Dropout(dropout)
self.drop2 = nn.Dropout(dropout)
self.drop3 = nn.Dropout(dropout)
def forward(self, x):
x = self.drop1(torch.relu(self.conv1(x)))
identity_full = x
x = self.drop2(torch.relu(self.conv2(x)))
x += identity_full
identity_1 = x
x = self.drop3(torch.relu(self.conv3(x)))
x += identity_full
x += identity_1
return x
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels, kernel_size, dropout):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2), DoubleConvBN(in_channels, out_channels, kernel_size, dropout)
)
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels, kernel_size, dropout, bilinear=False):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(scale_factor=2)
self.conv = DoubleConv(in_channels, out_channels, kernel_size, dropout) # , in_channels // 2)
else:
self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels, kernel_size, dropout)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = torch.nn.functional.pad(x1, [diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2])
# if you have padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
class mySequential(nn.Sequential):
def forward(self, *input):
for module in self._modules.values():
input = module(*input)
return input
class Encoder_rotation(nn.Module):
def __init__(self, hparams, bilinear=False):
super(Encoder_rotation, self).__init__()
self.hparams = hparams
self.n_channels = self.hparams['in_channels']
self.emb_dim = self.hparams['emb_dim']
self.bilinear = bilinear
self.factor = 2 if bilinear else 1
self.inc = DoubleConv(
self.n_channels,
self.hparams['n_filters_input'],
self.hparams['kernel_size'],
self.hparams['dropout_rate'],
)
self.down1 = Down(
self.hparams['n_filters_input'],
self.hparams['n_filters_input'] * 2,
self.hparams['kernel_size'],
self.hparams['dropout_rate'],
)
self.down2 = Down(
self.hparams['n_filters_input'] * 2,
self.hparams['n_filters_input'] * 4,
self.hparams['kernel_size'],
self.hparams['dropout_rate'],
)
self.down3 = Down(
self.hparams['n_filters_input'] * 4,
self.hparams['n_filters_input'] * 8,
self.hparams['kernel_size'],
self.hparams['dropout_rate'],
)
self.down4 = Down(
self.hparams['n_filters_input'] * 8,
self.hparams['n_filters_input'] * 16 // self.factor,
self.hparams['kernel_size'],
self.hparams['dropout_rate'],
)
self.down5 = Down(
self.hparams['n_filters_input'] * 16,
self.hparams['n_filters_input'] * 32 // self.factor,
self.hparams['kernel_size'],
self.hparams['dropout_rate'],
)
self.fc1 = nn.Linear(
self.hparams['n_filters_input'] * (2 ** 5), self.hparams['n_filters_input'] * (2 ** 5)
)
self.fc2 = nn.Linear(self.hparams['n_filters_input'] * (2 ** 5), self.hparams['n_classes'])
# self.fc3 = nn.Linear(self.hparams['n_filters_input'] * (2 ** 5), 128)#self.emb_dim)
def forward(self, x):
_, _, _, _, _, x = self.encoder(x)
x = torch.mean(x, dim=2)
x = torch.mean(x, dim=2)
x = torch.relu(self.fc1(x))
x = torch.softmax(self.fc2(x), dim=1)
# logits = self.fc3(x)
return x
def encoder(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x6 = self.down5(x5)
return x1, x2, x3, x4, x5, x6
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.Dropout",
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ConvTranspose2d",
"torch.nn.Upsample",
"torch.nn.Conv2d",
"torch.nn.functional.pad",
"torch.mean"
] | 1.7.0 | neurips2021vat/Variance-Aware-Training | 2dcd017ef06e81e299448bdd9da65fa682835127 |
1.7 | # basic libs
import numpy as np
import torch
import cv2
import imutils
# pytorch
import torch
from torch.utils.data import Dataset
import albumentations as A
# custom modules
np.random.seed(42)
class Dataset_train(Dataset):
def __init__(self, volums_list, aug, n_classes, dataset):
self.n_classes = n_classes
self.volums_list = volums_list
self.preprocessing = Preprocessing(aug, dataset)
def __len__(self):
return len(self.volums_list)
def __getitem__(self, idx):
X, y = self.load_data(idx)
X = torch.tensor(X, dtype=torch.float)
y = torch.tensor(y, dtype=torch.float)
return X, y
def load_data(self, id):
X = np.load(self.volums_list[id]).astype(np.float32)
y = np.random.choice([0, 90, 180, 270])
X = self.preprocessing.run(X=X)
X = self.rotate_image(X, y)
y_one_hot = np.zeros((4))
y_one_hot[[0, 90, 180, 270] == y] = 1
return X, y_one_hot
def one_hot_voxel(self, y):
y = np.transpose(y.astype(np.int32), (1, 2, 0))
y = np.eye(self.n_classes)[y[:, :, -1].astype(np.int32)]
y = np.transpose(y.astype(np.float32), (2, 0, 1))
return y
def rotate_image(self, image, angle):
image = np.transpose(image.astype(np.float32), (1, 2, 0))
if angle == 90:
image = cv2.rotate(image, cv2.ROTATE_90_CLOCKWISE)
elif angle == 180:
image = cv2.rotate(image, cv2.ROTATE_180)
elif angle == 270:
image = cv2.rotate(image, cv2.ROTATE_90_COUNTERCLOCKWISE)
if len(image.shape) < 3:
image = np.expand_dims(image, axis=2)
return np.transpose(image.astype(np.float32), (2, 0, 1))
# image_center = tuple(np.array(image.shape[1::-1]) / 2)
# rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
# result = cv2.warpAffine(image, rot_mat, image.shape)#[1::-1], flags=cv2.INTER_LINEAR)
# result = np.transpose(result.astype(np.float32), (2, 0, 1))
# return result
class Preprocessing:
def __init__(self, aug, dataset):
self.aug = aug
self.augmentations = Augmentations(dataset)
self.dataset = dataset
def run(self, X):
if self.dataset.find('RSNA') == -1:
X = np.transpose(X.astype(np.float32), (2, 0, 1))
if self.aug:
X = self.augmentations.run(X)
X = self.imagenet_normalize(X)
return X
def standard_scaling(self, X):
X = X.astype(np.float32)
for i in range(X.shape[0]):
std = np.std(X[i, :, :])
mean = np.mean(X[i, :, :])
if std > 0:
X[i, :, :] = (X[i, :, :] - mean) / std
else:
X[i, :, :] = X[i, :, :] - mean
return X
def imagenet_normalize(self, X):
X = X / 255.0
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
for i in range(len(mean)):
X[:, :, i] = (X[:, :, i] - mean[i]) / std[i]
return X
def minmax_scaling(self, X):
min = np.min(X)
max = np.max(X)
if max > min + 1e-3:
X = (X - min) / (max - min)
else:
X = X - np.mean(X)
return X
def padding(self, X, y):
max_shape = 256 # np.max(X.shape)
X = np.concatenate([X, np.zeros((X.shape[0], X.shape[1], X.shape[2], X.shape[3] // 2))], axis=-1)
X = np.concatenate(
[np.zeros((X.shape[0], X.shape[1], X.shape[2], max_shape - X.shape[3])), X], axis=-1
)
y = np.concatenate([y, np.zeros((y.shape[0], y.shape[1], y.shape[2], y.shape[3] // 2))], axis=-1)
y = np.concatenate(
[np.zeros((y.shape[0], y.shape[1], y.shape[2], max_shape - y.shape[3])), y], axis=-1
)
return X, y
def crop(self, X, y, cropsize=128):
X_pos = np.random.choice(X.shape[1] - cropsize)
Y_pos = np.random.choice(X.shape[2] - cropsize)
Z_pos = np.random.choice(X.shape[3] - cropsize)
X = X[:, X_pos : X_pos + cropsize, Y_pos : Y_pos + cropsize, Z_pos : Z_pos + cropsize]
y = y[:, X_pos : X_pos + cropsize, Y_pos : Y_pos + cropsize, Z_pos : Z_pos + cropsize]
return X, y
class Augmentations:
def __init__(self, dataset):
prob = 0.5
if dataset == 'HIST':
self.augs = A.Compose(
[
# A.HorizontalFlip(p=prob),
# A.VerticalFlip(p=prob),
A.Rotate(limit=5, p=prob),
# # A.GlassBlur(sigma=1),
# # A.GridDistortion(distort_limit=0.3),
# # A.ElasticTransform(alpha=0.05, p=prob),
A.RandomSizedCrop(min_max_height=(65, 80), height=96, width=96, p=prob),
A.RandomGamma(gamma_limit=(80, 120), p=prob),
# # A.RandomBrightness(limit=0.2, p=prob)
]
)
elif dataset == 'APTOS':
self.augs = A.Compose(
[
# A.HorizontalFlip(p=prob),
# A.VerticalFlip(p=prob),
A.Rotate(limit=5, p=prob),
# A.GlassBlur(sigma=1),
# A.GridDistortion(distort_limit=0.3),
# A.ElasticTransform(alpha=0.05, p=prob),
A.RandomSizedCrop(min_max_height=(180, 220), height=256, width=256, p=prob),
A.RandomGamma(gamma_limit=(80, 120), p=prob),
]
)
# A.RandomBrightness(limit=0.2, p=prob)
def run(self, image):
image = np.transpose(image.astype(np.float32), (1, 2, 0))
# apply augs
augmented = self.augs(image=image)
image = augmented['image']
image = np.transpose(image.astype(np.float32), (2, 0, 1))
return image
| [
"torch.tensor"
] | 1.7.0 | neurips2021vat/Variance-Aware-Training | 2dcd017ef06e81e299448bdd9da65fa682835127 |
1.4 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from contextlib import contextmanager, suppress
from functools import partial, update_wrapper
from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple, Union
import numpy as np
import torch
from torch.optim import Optimizer
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.plugins import ParallelPlugin
from pytorch_lightning.trainer.connectors.logger_connector.result import ResultCollection
from pytorch_lightning.trainer.supporters import TensorRunningAccum
from pytorch_lightning.utilities import _TPU_AVAILABLE, AMPType, DeviceType
from pytorch_lightning.utilities.distributed import rank_zero_info
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.finite_checks import detect_nan_parameters
from pytorch_lightning.utilities.grads import grad_norm
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.parsing import AttributeDict
from pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature
from pytorch_lightning.utilities.warnings import WarningCache
class TrainLoop:
def __init__(
self,
trainer,
max_epochs: Optional[int],
min_epochs: Optional[int],
max_steps: Optional[int],
min_steps: Optional[int],
num_sanity_val_steps: int,
):
self.trainer = trainer
self.accumulated_loss = None
self.warning_cache = WarningCache()
self.running_loss = TensorRunningAccum(window_length=20)
self._skip_backward = False
self._optimizer_freq_cumsum = None
self._hiddens = None
self.global_step = 0
self.current_epoch = 0
self.trainer.should_stop = False
# the total batch index across all epochs
self.total_batch_idx = 0
# the current batch index in the loop that runs over the dataloader(s)
self.batch_idx = 0
# the current split index when the batch gets split into chunks in truncated backprop through time
self.split_idx = None
self.trainer.num_training_batches = 0
self.trainer.train_dataloader = None
# If neither max_epochs or max_steps is set, then use existing default of max_epochs = 1000
self.max_epochs = 1000 if (max_epochs is None and max_steps is None) else max_epochs
# If neither min_epochs or min_steps is set, then use existing default of min_epochs = 1
self.min_epochs = 1 if (min_epochs is None and min_steps is None) else min_epochs
self.max_steps = max_steps
self.min_steps = min_steps
if num_sanity_val_steps == -1:
self.trainer.num_sanity_val_steps = float("inf")
else:
self.trainer.num_sanity_val_steps = num_sanity_val_steps
self.results = ResultCollection(training=True)
@property
def num_active_optimizers(self) -> int:
return len(self.get_active_optimizers())
@property
def optimizer_freq_cumsum(self):
if self._optimizer_freq_cumsum is None:
self._optimizer_freq_cumsum = np.cumsum(self.trainer.optimizer_frequencies)
return self._optimizer_freq_cumsum
def should_skip_training(self) -> bool:
should_by_max_steps = self.max_steps is not None and self.global_step >= self.max_steps
should_by_epoch = self.max_epochs is not None and self.current_epoch >= self.max_epochs
return should_by_max_steps or should_by_epoch or self.trainer.num_training_batches == 0
def on_train_start(self):
self.results.to(device=self.trainer.lightning_module.device)
self.trainer.call_hook("on_train_start")
def on_train_end(self):
# trigger checkpoint check. need to temporarily decrease the global step to avoid saving duplicates
# when a checkpoint was saved at the last step
self.global_step -= 1
self.check_checkpoint_callback(should_update=True, is_last=True)
self.global_step += 1
# hook
self.trainer.call_hook("on_train_end")
# todo: TPU 8 cores hangs in flush with TensorBoard. Might do for all loggers.
# It might be related to xla tensors blocked when moving the cpu
# kill loggers
if self.trainer.logger is not None:
self.trainer.logger.finalize("success")
# summarize profile results
self.trainer.profiler.describe()
# give accelerators a chance to finish
self.trainer.accelerator.on_train_end()
# reset bookkeeping
self.trainer.state.stage = None
def check_checkpoint_callback(self, should_update, is_last=False):
# TODO bake this logic into the ModelCheckpoint callback
if should_update and self.trainer.checkpoint_connector.has_trained:
callbacks = self.trainer.checkpoint_callbacks
if is_last and any(cb.save_last and cb.verbose for cb in callbacks):
rank_zero_info("Saving latest checkpoint...")
model = self.trainer.lightning_module
for cb in callbacks:
cb.on_validation_end(self.trainer, model)
def on_train_epoch_start(self, epoch):
# update training progress in trainer
self.current_epoch = epoch
model = self.trainer.lightning_module
# reset train dataloader
if epoch != 0 and self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_train_dataloader(model)
# todo: specify the possible exception
with suppress(Exception):
# set seed for distributed sampler (enables shuffling for each epoch)
self.trainer.train_dataloader.sampler.set_epoch(epoch)
# changing gradient according accumulation_scheduler
self.trainer.accumulation_scheduler.on_train_epoch_start(self.trainer, self.trainer.lightning_module)
# stores accumulated grad fractions per batch
self.accumulated_loss = TensorRunningAccum(window_length=self.trainer.accumulate_grad_batches)
# hook
self.trainer.logger_connector.on_epoch_start()
self.trainer.call_hook("on_epoch_start")
self.trainer.call_hook("on_train_epoch_start")
def on_train_batch_end(self, epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx):
batch_end_outputs = [opt_idx_out for opt_idx_out in batch_end_outputs if len(opt_idx_out)]
processed_batch_end_outputs = TrainLoop._prepare_outputs(batch_end_outputs, batch_mode=True)
# hook
self.trainer.call_hook('on_train_batch_end', processed_batch_end_outputs, batch, batch_idx, dataloader_idx)
self.trainer.call_hook('on_batch_end')
self.trainer.logger_connector.on_batch_end()
# figure out what to track for epoch end
self.track_epoch_end_reduce_metrics(epoch_output, batch_end_outputs)
def reset_train_val_dataloaders(self, model) -> None:
"""
Resets train and val dataloaders if none are attached to the trainer.
The val dataloader must be initialized before training loop starts, as the training loop
inspects the val dataloader to determine whether to run the evaluation loop.
"""
if self.trainer.train_dataloader is None:
self.trainer.reset_train_dataloader(model)
if self.trainer.val_dataloaders is None:
self.trainer.reset_val_dataloader(model)
def track_epoch_end_reduce_metrics(self, epoch_output, batch_end_outputs):
hook_overridden = self._should_add_batch_output_to_epoch_output()
if not hook_overridden:
return
# track the outputs to reduce at the end of the epoch
for opt_idx, opt_outputs in enumerate(batch_end_outputs):
# with 1 step (no tbptt) don't use a sequence at epoch end
if (
isinstance(opt_outputs, list) and len(opt_outputs) == 1
and not isinstance(opt_outputs[0], ResultCollection)
):
opt_outputs = opt_outputs[0]
epoch_output[opt_idx].append(opt_outputs)
def _should_add_batch_output_to_epoch_output(self) -> bool:
# We add to the epoch outputs if
# 1. The model defines training_epoch_end OR
# 2. The model overrides on_train_epoch_end which has `outputs` in the signature
# TODO: in v1.5 this only needs to check if training_epoch_end is overridden
lightning_module = self.trainer.lightning_module
if is_overridden("training_epoch_end", lightning_module):
return True
if is_overridden("on_train_epoch_end", lightning_module):
model_hook_fx = getattr(lightning_module, "on_train_epoch_end")
if is_param_in_hook_signature(model_hook_fx, "outputs"):
return True
return False
def get_active_optimizers(self, batch_idx: Optional[int] = None) -> List[Tuple[int, Optimizer]]:
"""
Returns the currently active optimizers. When multiple optimizers are used with different frequencies,
only one of the optimizers is active at a time.
Returns:
A list of tuples (opt_idx, optimizer) of currently active optimizers.
"""
if not self.trainer.optimizer_frequencies:
# call training_step once per optimizer
return list(enumerate(self.trainer.optimizers))
batch_idx = self.total_batch_idx if batch_idx is None else batch_idx
optimizers_loop_length = self.optimizer_freq_cumsum[-1]
current_place_in_loop = batch_idx % optimizers_loop_length
# find optimzier index by looking for the first {item > current_place} in the cumsum list
opt_idx = int(np.argmax(self.optimizer_freq_cumsum > current_place_in_loop))
return [(opt_idx, self.trainer.optimizers[opt_idx])]
def on_after_backward(self, batch_idx, untouched_loss):
# insert after step hook
self.trainer.call_hook("on_after_backward")
# when in dev debugging track the losses
self.trainer.dev_debugger.track_train_loss_history(batch_idx, untouched_loss.detach())
def _check_training_step_output(self, training_step_output):
if isinstance(training_step_output, torch.Tensor) and not self.trainer.lightning_module.automatic_optimization:
if training_step_output.grad_fn is None:
# TODO: Find why - RuntimeError: Expected to mark a variable ready only once ...
raise MisconfigurationException("In manual optimization, `training_step` should not return a Tensor")
elif self.trainer.lightning_module.automatic_optimization:
if not any((
isinstance(training_step_output, torch.Tensor),
(isinstance(training_step_output, Mapping)
and 'loss' in training_step_output), training_step_output is None
)):
raise MisconfigurationException(
"In automatic optimization, `training_step` must either return a Tensor, "
"a dict with key 'loss' or None (where the step will be skipped)."
)
def training_step(self, split_batch, batch_idx, opt_idx, hiddens):
# give the PL module a result for logging
model_ref = self.trainer.lightning_module
with self.trainer.profiler.profile("model_forward"):
step_kwargs = self._build_kwargs(split_batch, batch_idx, opt_idx, hiddens)
# manually capture logged metrics
model_ref._current_fx_name = 'training_step'
with self.trainer.profiler.profile("training_step"):
training_step_output = self.trainer.accelerator.training_step(step_kwargs)
self.trainer.accelerator.post_training_step()
training_step_output = self.trainer.call_hook("training_step_end", training_step_output)
self._check_training_step_output(training_step_output)
training_step_output = self._process_training_step_output(training_step_output)
if training_step_output is None:
return
closure_loss = None
loss = None
if self.trainer.lightning_module.automatic_optimization:
# accumulate loss. if accumulate_grad_batches==1, no effect
closure_loss = training_step_output.minimize / self.trainer.accumulate_grad_batches
# the loss will get scaled for amp. avoid any modifications to it
loss = closure_loss.detach().clone()
return AttributeDict(closure_loss=closure_loss, loss=loss, training_step_output=training_step_output)
def _process_training_step_output(self, training_step_output):
if training_step_output is None:
return None
results = self.results
loss = None
hiddens = None
results.extra = {}
# handle dict return
if isinstance(training_step_output, dict):
loss = training_step_output.pop("loss", None)
hiddens = training_step_output.pop("hiddens", None)
if hiddens is not None:
hiddens = hiddens.detach()
results.extra = training_step_output
# handle scalar return
elif isinstance(training_step_output, torch.Tensor):
loss = training_step_output
# map to results under the hood
results.minimize = loss
self._hiddens = hiddens
if self.trainer.move_metrics_to_cpu:
results.cpu()
return results
@staticmethod
def _prepare_outputs(
outputs: List[List[List['ResultCollection']]],
batch_mode: bool,
) -> Union[List[List[List[Dict]]], List[List[Dict]], List[Dict], Dict]:
"""
Extract required information from batch or epoch end results.
Args:
outputs: A 3-dimensional list of ``ResultCollection`` objects with dimensions:
``[optimizer outs][batch outs][tbptt steps]``.
batch_mode: If True, ignore the batch output dimension.
Returns:
The cleaned outputs with ``ResultCollection`` objects converted to dictionaries.
All list dimensions of size one will be collapsed.
"""
processed_outputs = []
for opt_outputs in outputs:
# handle an edge case where an optimizer output is the empty list
if len(opt_outputs) == 0:
continue
processed_batch_outputs = []
if batch_mode:
opt_outputs = [opt_outputs]
for batch_outputs in opt_outputs:
processed_tbptt_outputs = []
if isinstance(batch_outputs, ResultCollection):
batch_outputs = [batch_outputs]
for tbptt_output in batch_outputs:
out = tbptt_output.extra
if tbptt_output.minimize is not None:
out['loss'] = tbptt_output.minimize.detach()
processed_tbptt_outputs.append(out)
# if there was only one tbptt step then we can collapse that dimension
if len(processed_tbptt_outputs) == 1:
processed_tbptt_outputs = processed_tbptt_outputs[0]
processed_batch_outputs.append(processed_tbptt_outputs)
# batch_outputs should be just one dict (or a list of dicts if using tbptt) per optimizer
if batch_mode:
processed_batch_outputs = processed_batch_outputs[0]
processed_outputs.append(processed_batch_outputs)
# if there is only one optimiser then we collapse that dimension
if len(processed_outputs) == 1:
processed_outputs = processed_outputs[0]
return processed_outputs
def optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure):
model_ref = self.trainer.lightning_module
is_lbfgs = isinstance(optimizer, torch.optim.LBFGS)
using_native_amp = self.trainer.amp_backend == AMPType.NATIVE
# native amp + lbfgs is a no go right now
if using_native_amp and is_lbfgs:
raise MisconfigurationException(
'native PyTorch amp and lbfgs are not compatible.'
' To request, please file a Github issue in PyTorch and tag @mcarilli'
)
# wraps into LightningOptimizer only for running step
optimizer = LightningOptimizer._to_lightning_optimizer(optimizer, self.trainer, opt_idx)
# model hook
model_ref.optimizer_step(
self.trainer.current_epoch,
batch_idx,
optimizer,
opt_idx,
train_step_and_backward_closure,
on_tpu=self.trainer._device_type == DeviceType.TPU and _TPU_AVAILABLE,
using_native_amp=using_native_amp,
using_lbfgs=is_lbfgs,
)
def on_before_zero_grad(self, optimizer):
self.trainer.call_hook('on_before_zero_grad', optimizer)
def optimizer_zero_grad(self, batch_idx, optimizer, opt_idx):
self.trainer.accelerator.optimizer_zero_grad(self.trainer.current_epoch, batch_idx, optimizer, opt_idx)
def track_and_norm_grad(self, optimizer) -> dict:
# track gradient norms
grad_norm_dict = {}
if (self.global_step + 1) % self.trainer.log_every_n_steps == 0 and float(self.trainer.track_grad_norm) > 0:
grad_norm_dict = grad_norm(self.trainer.lightning_module, self.trainer.track_grad_norm)
# clip gradients
self.trainer.accelerator.clip_gradients(
optimizer, self.trainer.gradient_clip_val, gradient_clip_algorithm=self.trainer.gradient_clip_algorithm
)
return grad_norm_dict
def _tbptt_split_batch(self, batch: Any) -> List[Any]:
splits = [batch]
truncated_bptt_enabled = self._truncated_bptt_enabled()
if truncated_bptt_enabled:
model_ref = self.trainer.lightning_module
with self.trainer.profiler.profile("tbptt_split_batch"):
splits = model_ref.tbptt_split_batch(batch, self._truncated_bptt_steps())
return splits
def run_training_epoch(self):
# modify dataloader if needed (ddp, etc...)
train_dataloader = self.trainer.accelerator.process_dataloader(self.trainer.train_dataloader)
# track epoch output
epoch_output = [[] for _ in range(self.num_active_optimizers)]
train_dataloader = self.trainer.data_connector.get_profiled_train_dataloader(train_dataloader)
dataloader_idx = 0
batch_idx = None
for batch_idx, (batch, is_last_batch) in train_dataloader:
self.batch_idx = batch_idx
# ------------------------------------
# TRAINING_STEP + TRAINING_STEP_END
# ------------------------------------
with self.trainer.profiler.profile("run_training_batch"):
batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)
# when returning -1 from train_step, we end epoch early
if batch_output.signal == -1:
break
# hook
self.on_train_batch_end(
epoch_output,
batch_output.training_step_output,
batch,
batch_idx,
dataloader_idx,
)
# -----------------------------------------
# SAVE METRICS TO LOGGERS AND PROGRESS_BAR
# -----------------------------------------
self.trainer.logger_connector.update_train_step_metrics()
# -----------------------------------------
# VALIDATE IF NEEDED
# -----------------------------------------
should_check_val = self._should_check_val_fx(batch_idx, is_last_batch)
if should_check_val:
self.trainer.validating = True
self.trainer._run_evaluation()
self.trainer.training = True
# -----------------------------------------
# SAVE LOGGERS (ie: Tensorboard, etc...)
# -----------------------------------------
self.save_loggers_on_train_batch_end()
# update LR schedulers
self.update_lr_schedulers('step')
self.trainer.checkpoint_connector.has_trained = True
self.total_batch_idx += 1
# progress global step according to grads progress
self.increment_accumulated_grad_global_step()
max_steps_reached = (self.max_steps is not None and self.max_steps <= self.global_step)
if max_steps_reached or self.trainer.should_stop or self._num_training_batches_reached(is_last_batch):
break
if batch_idx is None:
# dataloader/iterator did not produce a batch
return
# handle epoch_output on epoch end
self.on_train_epoch_end(epoch_output)
# the global step is manually decreased here due to backwards compatibility with existing loggers
# as they expect that the same step is used when logging epoch end metrics even when the batch loop has
# finished. this means the attribute does not exactly track the number of optimizer steps applied.
# TODO(@carmocca): deprecate and rename so users don't get confused
self.global_step -= 1
# log epoch metrics
self.trainer.logger_connector.update_train_epoch_metrics()
self.global_step += 1
self.update_lr_schedulers('epoch')
did_train_only = self.trainer.disable_validation or self.trainer.evaluation_loop.should_skip_evaluation(
self.trainer.num_val_batches
)
if did_train_only:
self.global_step -= 1
self.check_checkpoint_callback(True)
self.global_step += 1
def on_train_epoch_end(self, epoch_output: List[List[List['ResultCollection']]]) -> None:
# inform logger the batch loop has finished
self.trainer.logger_connector.epoch_end_reached()
# prepare epoch output
processed_epoch_output = TrainLoop._prepare_outputs(epoch_output, batch_mode=False)
# get the model and call model.training_epoch_end
model = self.trainer.lightning_module
if is_overridden('training_epoch_end', model):
# run training_epoch_end
# refresh the result for custom logging at the epoch level
model._current_fx_name = 'training_epoch_end'
training_epoch_end_output = model.training_epoch_end(processed_epoch_output)
if training_epoch_end_output is not None:
raise MisconfigurationException(
'training_epoch_end expects a return of None. '
'HINT: remove the return statement in training_epoch_end'
)
# call train epoch end hooks
self._on_train_epoch_end_hook(processed_epoch_output)
self.trainer.call_hook('on_epoch_end')
self.trainer.logger_connector.on_epoch_end()
def _on_train_epoch_end_hook(self, processed_epoch_output) -> None:
# We cannot rely on Trainer.call_hook because the signatures might be different across
# lightning module and callback
# As a result, we need to inspect if the module accepts `outputs` in `on_train_epoch_end`
# This implementation is copied from Trainer.call_hook
hook_name = "on_train_epoch_end"
prev_fx_name = self.trainer.lightning_module._current_fx_name
self.trainer.lightning_module._current_fx_name = hook_name
# always profile hooks
with self.trainer.profiler.profile(hook_name):
# first call trainer hook
if hasattr(self.trainer, hook_name):
trainer_hook = getattr(self.trainer, hook_name)
trainer_hook(processed_epoch_output)
# next call hook in lightningModule
model_ref = self.trainer.lightning_module
if is_overridden(hook_name, model_ref):
hook_fx = getattr(model_ref, hook_name)
if is_param_in_hook_signature(hook_fx, "outputs"):
self.warning_cache.warn(
"The signature of `ModelHooks.on_train_epoch_end` has changed in v1.3."
" `outputs` parameter has been deprecated."
" Support for the old signature will be removed in v1.5", DeprecationWarning
)
model_ref.on_train_epoch_end(processed_epoch_output)
else:
model_ref.on_train_epoch_end()
# call the accelerator hook
if hasattr(self.trainer.accelerator, hook_name):
accelerator_hook = getattr(self.trainer.accelerator, hook_name)
accelerator_hook()
# restore current_fx when nested context
self.trainer.lightning_module._current_fx_name = prev_fx_name
def run_training_batch(self, batch, batch_idx, dataloader_idx):
# bookkeeping
self._hiddens = None
optimizers = list(enumerate(self.trainer.optimizers))
# track all outputs across time and num of optimizers
batch_outputs = [[] for _ in range(len(optimizers))]
if batch is None:
self.warning_cache.warn("train_dataloader yielded None. If this was on purpose, ignore this warning...")
return AttributeDict(signal=0, training_step_output=batch_outputs)
# hook
self.trainer.logger_connector.on_batch_start()
response = self.trainer.call_hook("on_batch_start")
if response == -1:
return AttributeDict(signal=-1)
# hook
response = self.trainer.call_hook("on_train_batch_start", batch, batch_idx, dataloader_idx)
if response == -1:
return AttributeDict(signal=-1)
# lightning module hook
splits = self._tbptt_split_batch(batch)
for split_idx, split_batch in enumerate(splits):
self.split_idx = split_idx
# let logger connector extract batch size
self.trainer.logger_connector.on_train_split_start(batch_idx, split_idx, split_batch)
if self.trainer.lightning_module.automatic_optimization:
for opt_idx, optimizer in self.get_active_optimizers(batch_idx):
result = self._run_optimization(batch_idx, split_batch, opt_idx, optimizer)
if result:
batch_outputs[opt_idx].append(result.training_step_output)
else:
# in manual optimization, there is no looping over optimizers
result = self._run_optimization(batch_idx, split_batch)
if result:
batch_outputs[0].append(result.training_step_output)
return AttributeDict(signal=0, training_step_output=batch_outputs)
def _run_optimization(self, batch_idx, split_batch, opt_idx=0, optimizer=None):
# TODO: In v1.5, when optimizer_idx gets removed from training_step in manual_optimization, change
# opt_idx=0 to opt_idx=None in the signature here
# toggle model params
self.run_optimization_start(opt_idx, optimizer)
result = AttributeDict()
closure = self.make_closure(split_batch, batch_idx, opt_idx, optimizer, self._hiddens, result)
if self.should_accumulate():
# For gradient accumulation
# -------------------
# calculate loss (train step + train step end)
# -------------------
# automatic_optimization=True: perform ddp sync only when performing optimizer_step
# automatic_optimization=False: don't block synchronization here
with self.block_ddp_sync_behaviour():
closure()
# ------------------------------
# BACKWARD PASS
# ------------------------------
# gradient update with accumulated gradients
else:
if self.trainer.lightning_module.automatic_optimization:
self.optimizer_step(optimizer, opt_idx, batch_idx, closure)
if len(self.trainer.optimizers) > 1:
# revert back to previous state
self.trainer.lightning_module.untoggle_optimizer(opt_idx)
else:
result = self.training_step(split_batch, batch_idx, opt_idx, self._hiddens)
if not result:
# user decided to skip optimization
return result
# update running loss + reset accumulated loss
self.update_running_loss(result.loss)
self._process_closure_result(result)
return result
def training_step_and_backward_closure(
self,
split_batch: Any,
batch_idx: int,
opt_idx: int,
optimizer: Optimizer,
hiddens,
return_result: AttributeDict,
) -> Optional[torch.Tensor]:
result = self.training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens)
if result is not None:
return_result.update(result)
return return_result.loss
def make_closure(self, *closure_args, **closure_kwargs: Any) -> Callable:
""" Wraps the training step closure into a partial object which will be called within ``optimizer.step``. """
partial_func = partial(self.training_step_and_backward_closure, *closure_args, **closure_kwargs)
return update_wrapper(partial_func, self.training_step_and_backward_closure)
@contextmanager
def block_ddp_sync_behaviour(self, should_block_sync: bool = False):
"""
automatic_optimization = True
Blocks ddp sync gradients behaviour on backwards pass.
This is useful for skipping sync when accumulating gradients, reducing communication overhead
automatic_optimization = False
do not block ddp gradient sync when using manual optimization
as gradients are needed within the training step
Returns:
context manager with sync behaviour off
"""
if (
isinstance(self.trainer.training_type_plugin, ParallelPlugin)
and (self.trainer.lightning_module.automatic_optimization or should_block_sync)
):
with self.trainer.training_type_plugin.block_backward_sync():
yield None
else:
yield None
def _process_closure_result(self, opt_closure_result: Optional[AttributeDict]) -> None:
if not opt_closure_result:
return
# check if loss or model weights are nan
if self.trainer.terminate_on_nan:
self._check_finite(opt_closure_result.loss)
def training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens):
"""Wrap forward, zero_grad and backward in a closure so second order methods work"""
with self.trainer.profiler.profile("training_step_and_backward"):
# lightning module hook
result = self.training_step(split_batch, batch_idx, opt_idx, hiddens)
if not self._skip_backward and self.trainer.lightning_module.automatic_optimization:
is_first_batch_to_accumulate = batch_idx % self.trainer.accumulate_grad_batches == 0
if is_first_batch_to_accumulate:
self.on_before_zero_grad(optimizer)
self.optimizer_zero_grad(batch_idx, optimizer, opt_idx)
# backward pass
if result is not None:
with self.trainer.profiler.profile("backward"):
self.backward(result, optimizer, opt_idx)
# hook - call this hook only
# when gradients have finished to accumulate
if not self.should_accumulate():
self.on_after_backward(batch_idx, result.loss)
# check if loss or model weights are nan
if self.trainer.terminate_on_nan:
self._check_finite(result.loss)
else:
self.warning_cache.warn(
"training_step returned None. If this was on purpose, ignore this warning..."
)
return result
def _check_finite(self, loss: torch.Tensor) -> None:
if not torch.isfinite(loss).all():
raise ValueError(f'The loss returned in `training_step` is {loss}.')
model = self.trainer.lightning_module
detect_nan_parameters(model)
def backward(self, result, optimizer, opt_idx, *args, **kwargs):
self.trainer.dev_debugger.track_event("backward_call")
should_accumulate = self.should_accumulate()
# backward can be called manually in the training loop
if isinstance(result, torch.Tensor):
self.trainer.accelerator.backward(result, optimizer, opt_idx, should_accumulate, *args, **kwargs)
else:
result.closure_loss = self.trainer.accelerator.backward(
result.closure_loss, optimizer, opt_idx, should_accumulate, *args, **kwargs
)
if not self.should_accumulate():
# track gradients
grad_norm_dict = self.track_and_norm_grad(optimizer=optimizer)
if grad_norm_dict:
self.trainer.lightning_module._current_fx_name = "on_after_backward"
self.trainer.lightning_module.log_grad_norm(grad_norm_dict)
def update_lr_schedulers(self, interval: str) -> None:
if interval == "step":
finished_accumulation = self._accumulated_batches_reached()
finished_epoch = self._num_training_batches_reached()
if not finished_accumulation and not finished_epoch:
return
self.trainer.optimizer_connector.update_learning_rates(
interval=interval,
opt_indices=[opt_idx for opt_idx, _ in self.get_active_optimizers()],
)
def increment_accumulated_grad_global_step(self):
num_accumulated_batches_reached = self._accumulated_batches_reached()
num_training_batches_reached = self._num_training_batches_reached()
# progress global step according to grads progress
if num_accumulated_batches_reached or num_training_batches_reached:
self.global_step = self.trainer.accelerator.update_global_step(self.total_batch_idx, self.global_step)
def _accumulated_batches_reached(self):
return (self.batch_idx + 1) % self.trainer.accumulate_grad_batches == 0
def _num_training_batches_reached(self, is_last_batch=False):
return (self.batch_idx + 1) == self.trainer.num_training_batches or is_last_batch
def should_accumulate(self):
# checks if backward or backward + optimizer step (via closure)
accumulation_done = self._accumulated_batches_reached()
is_final_batch = self._num_training_batches_reached()
return not (accumulation_done or is_final_batch)
def _should_check_val_fx(self, batch_idx: int, is_last_batch: bool) -> bool:
""" Decide if we should run validation. """
if not self.trainer.enable_validation:
return False
is_val_check_epoch = (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch == 0
if not is_val_check_epoch:
return False
# val_check_batch is inf for iterable datasets with no length defined
is_infinite_dataset = self.trainer.val_check_batch == float('inf')
if is_last_batch and is_infinite_dataset:
return True
if self.trainer.should_stop:
return True
# TODO: let training/eval loop handle logic around limit_*_batches and val_check_batch
is_val_check_batch = is_last_batch
if isinstance(self.trainer.limit_train_batches, int) and is_infinite_dataset:
is_val_check_batch = (batch_idx + 1) % self.trainer.limit_train_batches == 0
elif self.trainer.val_check_batch != float('inf'):
is_val_check_batch = (batch_idx + 1) % self.trainer.val_check_batch == 0
return is_val_check_batch
def _build_kwargs(self, batch, batch_idx, opt_idx, hiddens):
# enable not needing to add opt_idx to training_step
step_kwargs = OrderedDict([('batch', batch), ('batch_idx', batch_idx)])
lightning_module = self.trainer.lightning_module
if len(self.trainer.optimizers) > 1:
training_step_fx = getattr(lightning_module, "training_step")
has_opt_idx_in_train_step = is_param_in_hook_signature(training_step_fx, "optimizer_idx")
if has_opt_idx_in_train_step:
if not lightning_module.automatic_optimization:
self.warning_cache.warn(
"`training_step` hook signature has changed in v1.3."
" `optimizer_idx` argument has been removed in case of manual optimization. Support for"
" the old signature will be removed in v1.5", DeprecationWarning
)
step_kwargs['optimizer_idx'] = opt_idx
elif not has_opt_idx_in_train_step and self.trainer.lightning_module.automatic_optimization:
raise ValueError(
f"Your LightningModule defines {len(self.trainer.optimizers)} optimizers but"
' `training_step` is missing the `optimizer_idx` argument.'
)
# pass hiddens if using tbptt
if self._truncated_bptt_enabled():
step_kwargs['hiddens'] = hiddens
return step_kwargs
def _truncated_bptt_enabled(self) -> bool:
""" Temporary tbptt utilities until this flag is fully migrated to the lightning module. """
return self._truncated_bptt_steps() > 0
def _truncated_bptt_steps(self) -> int:
lightning_module = self.trainer.lightning_module
# Give precedence to the LightningModule as the Trainer flag will be removed in v1.5
if lightning_module.truncated_bptt_steps > 0:
return lightning_module.truncated_bptt_steps
return self.trainer.truncated_bptt_steps or 0
def save_loggers_on_train_batch_end(self):
# when loggers should save to disk
should_flush_logs = self.trainer.logger_connector.should_flush_logs
if should_flush_logs and self.trainer.is_global_zero and self.trainer.logger is not None:
self.trainer.logger.save()
def run_optimization_start(self, opt_idx, optimizer):
# make sure only the gradients of the current optimizer's parameters are calculated
# in the training step to prevent dangling gradients in multiple-optimizer setup.
if self.trainer.lightning_module.automatic_optimization and len(self.trainer.optimizers) > 1:
model = self.trainer.lightning_module
model.toggle_optimizer(optimizer, opt_idx)
def update_running_loss(self, current_loss: torch.Tensor) -> None:
if self.trainer.lightning_module.automatic_optimization:
# track total loss for logging (avoid mem leaks)
self.accumulated_loss.append(current_loss)
accumulated_loss = self.accumulated_loss.mean()
if accumulated_loss is not None:
# calculate running loss for display
self.running_loss.append(self.accumulated_loss.mean() * self.trainer.accumulate_grad_batches)
# reset for next set of accumulated grads
self.accumulated_loss.reset()
| [
"torch.isfinite"
] | 1.4 | simran2905/pytorch-lightning | 024cf23c67fb92fabb2d238bd33d73b24aafa7a9 |
1.1 | from typing import Tuple, Optional
from torch import Tensor
from torch.distributions import MultivariateNormal
import numpy as np
from torch.distributions.multivariate_normal import _batch_mv
from torch.distributions.utils import _standard_normal
def bmat_idx(*args) -> Tuple:
"""
Create indices for tensor assignment that act like slices. E.g., batch[:,[1,2,3],[1,2,3]] does not select the upper
3x3 sub-matrix over batches, but batch[bmat_idx(slice(None),[1,2,3],[1,2,3])] does.
:param args: Each arg is a sequence of integers. The first N args can be slices, and the last N args can be slices.
:return: A tuple that can be used for matrix/tensor-selection.
"""
if len(args) == 0:
return ()
elif isinstance(args[-1], slice):
# trailing slices can't be passed to np._ix, but can be appended to its results
return bmat_idx(*args[:-1]) + (args[-1],)
elif isinstance(args[0], slice):
# leading slices can't be passed to np._ix, but can be prepended to its results
return (args[0],) + bmat_idx(*args[1:])
else:
if any(isinstance(arg, slice) for arg in args[1:]):
raise ValueError("Only the first/last contiguous args can be slices, not middle args.")
return np.ix_(*args)
def deterministic_sample_mvnorm(distribution: MultivariateNormal, eps: Optional[Tensor] = None) -> Tensor:
if isinstance(eps, Tensor):
if eps.shape[-len(distribution.event_shape):] != distribution.event_shape:
raise RuntimeError(f"Expected shape ending in {distribution.event_shape}, got {eps.shape}.")
else:
shape = distribution.batch_shape + distribution.event_shape
if eps is None:
eps = 1.0
eps *= _standard_normal(shape, dtype=distribution.loc.dtype, device=distribution.loc.device)
return distribution.loc + _batch_mv(distribution._unbroadcasted_scale_tril, eps)
| [
"torch.distributions.multivariate_normal._batch_mv",
"torch.distributions.utils._standard_normal"
] | 1.1 | Suhwan-Dev/torch-kalman | f310c42e264d1642819e4c49a8b0212209a18a85 |
1.1 | import torch
from torch import Tensor
from torch.nn import Parameter
class Bounded:
def __init__(self, lower: float, upper: float):
self.lower = lower
self.range = upper - lower
self.parameter = Parameter(torch.randn(1))
def get_value(self) -> Tensor:
return torch.sigmoid(self.parameter) * self.range + self.lower
| [
"torch.sigmoid",
"torch.randn"
] | 1.1 | Suhwan-Dev/torch-kalman | f310c42e264d1642819e4c49a8b0212209a18a85 |
1.0 | import json
import logging
import numpy as np
import os
import torch
from ..utils.various import create_missing_folders, load_and_check
logger = logging.getLogger(__name__)
class Estimator:
"""
Abstract class for any ML estimator. Subclassed by ParameterizedRatioEstimator, DoubleParameterizedRatioEstimator,
ScoreEstimator, and LikelihoodEstimator.
Each instance of this class represents one neural estimator. The most important functions are:
* `Estimator.train()` to train an estimator. The keyword `method` determines the inference technique
and whether a class instance represents a single-parameterized likelihood ratio estimator, a doubly-parameterized
likelihood ratio estimator, or a local score estimator.
* `Estimator.evaluate()` to evaluate the estimator.
* `Estimator.save()` to save the trained model to files.
* `Estimator.load()` to load the trained model from files.
Please see the tutorial for a detailed walk-through.
"""
def __init__(self, features=None, n_hidden=(100,), activation="tanh", dropout_prob=0.0):
self.features = features
self.n_hidden = n_hidden
self.activation = activation
self.dropout_prob = dropout_prob
self.model = None
self.n_observables = None
self.n_parameters = None
self.x_scaling_means = None
self.x_scaling_stds = None
def train(self, *args, **kwargs):
raise NotImplementedError()
def evaluate_log_likelihood(self, *args, **kwargs):
"""
Log likelihood estimation. Signature depends on the type of estimator. The first returned value is the log
likelihood with shape `(n_thetas, n_x)`.
"""
raise NotImplementedError()
def evaluate_log_likelihood_ratio(self, *args, **kwargs):
"""
Log likelihood ratio estimation. Signature depends on the type of estimator. The first returned value is the log
likelihood ratio with shape `(n_thetas, n_x)` or `(n_x)`.
"""
raise NotImplementedError()
def evaluate_score(self, *args, **kwargs):
"""
Score estimation. Signature depends on the type of estimator. The only returned value is the score with shape
`(n_x)`.
"""
raise NotImplementedError()
def evaluate(self, *args, **kwargs):
raise NotImplementedError()
def save(self, filename, save_model=False):
"""
Saves the trained model to four files: a JSON file with the settings, a pickled pyTorch state dict
file, and numpy files for the mean and variance of the inputs (used for input scaling).
Parameters
----------
filename : str
Path to the files. '_settings.json' and '_state_dict.pl' will be added.
save_model : bool, optional
If True, the whole model is saved in addition to the state dict. This is not necessary for loading it
again with Estimator.load(), but can be useful for debugging, for instance to plot the computational graph.
Returns
-------
None
"""
logger.info("Saving model to %s", filename)
if self.model is None:
raise ValueError("No model -- train or load model before saving!")
# Check paths
create_missing_folders([os.path.dirname(filename)])
# Save settings
logger.debug("Saving settings to %s_settings.json", filename)
settings = self._wrap_settings()
with open(f"{filename}_settings.json", "w") as f:
json.dump(settings, f)
# Save scaling
if self.x_scaling_stds is not None and self.x_scaling_means is not None:
logger.debug("Saving input scaling information to %s_x_means.npy and %s_x_stds.npy", filename, filename)
np.save(f"{filename}_x_means.npy", self.x_scaling_means)
np.save(f"{filename}_x_stds.npy", self.x_scaling_stds)
# Save state dict
logger.debug("Saving state dictionary to %s_state_dict.pt", filename)
torch.save(self.model.state_dict(), f"{filename}_state_dict.pt")
# Save model
if save_model:
logger.debug("Saving model to %s_model.pt", filename)
torch.save(self.model, f"{filename}_model.pt")
def load(self, filename):
"""
Loads a trained model from files.
Parameters
----------
filename : str
Path to the files. '_settings.json' and '_state_dict.pl' will be added.
Returns
-------
None
"""
logger.info("Loading model from %s", filename)
# Load settings and create model
logger.debug("Loading settings from %s_settings.json", filename)
with open(f"{filename}_settings.json", "r") as f:
settings = json.load(f)
self._unwrap_settings(settings)
self._create_model()
# Load scaling
try:
self.x_scaling_means = np.load(f"{filename}_x_means.npy")
self.x_scaling_stds = np.load(f"{filename}_x_stds.npy")
logger.debug(
" Found input scaling information: means %s, stds %s", self.x_scaling_means, self.x_scaling_stds
)
except FileNotFoundError:
logger.warning("Scaling information not found in %s", filename)
self.x_scaling_means = None
self.x_scaling_stds = None
# Load state dict
logger.debug("Loading state dictionary from %s_state_dict.pt", filename)
self.model.load_state_dict(torch.load(f"{filename}_state_dict.pt", map_location="cpu"))
def initialize_input_transform(self, x, transform=True, overwrite=True):
if self.x_scaling_stds is not None and self.x_scaling_means is not None and not overwrite:
logger.info(
"Input rescaling already defined. To overwrite, call initialize_input_transform(x, overwrite=True)."
)
elif transform:
logger.info("Setting up input rescaling")
self.x_scaling_means = np.mean(x, axis=0)
self.x_scaling_stds = np.maximum(np.std(x, axis=0), 1.0e-6)
else:
logger.info("Disabling input rescaling")
n_parameters = x.shape[0]
self.x_scaling_means = np.zeros(n_parameters)
self.x_scaling_stds = np.ones(n_parameters)
def _transform_inputs(self, x):
if self.x_scaling_means is not None and self.x_scaling_stds is not None:
if isinstance(x, torch.Tensor):
x_scaled = x - torch.tensor(self.x_scaling_means, dtype=x.dtype, device=x.device)
x_scaled = x_scaled / torch.tensor(self.x_scaling_stds, dtype=x.dtype, device=x.device)
else:
x_scaled = x - self.x_scaling_means
x_scaled /= self.x_scaling_stds
else:
x_scaled = x
return x_scaled
def _wrap_settings(self):
settings = {
"n_observables": self.n_observables,
"n_parameters": self.n_parameters,
"features": self.features,
"n_hidden": list(self.n_hidden),
"activation": self.activation,
"dropout_prob": self.dropout_prob,
}
return settings
def _unwrap_settings(self, settings):
try:
_ = str(settings["estimator_type"])
except KeyError:
raise RuntimeError(
"Can't find estimator type information in file. Maybe this file was created with"
" an incompatible MadMiner version < v0.3.0?"
)
self.n_observables = int(settings["n_observables"])
self.n_parameters = int(settings["n_parameters"])
self.n_hidden = tuple([int(item) for item in settings["n_hidden"]])
self.activation = str(settings["activation"])
self.features = settings["features"]
if self.features == "None":
self.features = None
if self.features is not None:
self.features = [int(item) for item in self.features]
try:
self.dropout_prob = float(settings["dropout_prob"])
except KeyError:
self.dropout_prob = 0.0
logger.info(
"Can't find dropout probability in model file. Probably this file was created with an older"
" MadMiner version < 0.6.1. That's totally fine, we'll just stick to the default of 0 (no"
" dropout)."
)
def _create_model(self):
raise NotImplementedError()
def calculate_fisher_information(self, x, theta=None, weights=None, n_events=1, sum_events=True):
"""
Calculates the expected Fisher information matrix based on the kinematic information in a given number of
events.
Parameters
----------
x : str or ndarray
Sample of observations, or path to numpy file with observations. Note that this sample has to be sampled
from the reference parameter where the score is estimated with the SALLY / SALLINO estimator.
theta: None or ndarray
Numerator parameter point, or filename of a pickled numpy array. Has no effect for ScoreEstimator.
weights : None or ndarray, optional
Weights for the observations. If None, all events are taken to have equal weight. Default value: None.
n_events : float, optional
Expected number of events for which the kinematic Fisher information should be calculated. Default value: 1.
sum_events : bool, optional
If True, the expected Fisher information summed over the events x is calculated. If False, the per-event
Fisher information for each event is returned. Default value: True.
Returns
-------
fisher_information : ndarray
Expected kinematic Fisher information matrix with shape `(n_events, n_parameters, n_parameters)` if
sum_events is False or `(n_parameters, n_parameters)` if sum_events is True.
"""
if self.model is None:
raise ValueError("No model -- train or load model before evaluating it!")
# Load training data
logger.info("Loading evaluation data")
x = load_and_check(x)
n_samples = x.shape[0]
# Estimate scores
t_hats = self.evaluate_score(x=x, theta=np.array([theta for _ in x]), nuisance_mode="keep")
# Weights
if weights is None:
weights = np.ones(n_samples)
weights /= np.sum(weights)
# Calculate Fisher information
logger.info("Calculating Fisher information")
if sum_events:
fisher_information = float(n_events) * np.einsum("n,ni,nj->ij", weights, t_hats, t_hats)
else:
fisher_information = float(n_events) * np.einsum("n,ni,nj->nij", weights, t_hats, t_hats)
# Calculate expected score
expected_score = np.mean(t_hats, axis=0)
logger.debug("Expected per-event score (should be close to zero): %s", expected_score)
return fisher_information
class ConditionalEstimator(Estimator):
"""
Abstract class for estimator that is conditional on theta. Subclassed by ParameterizedRatioEstimator,
DoubleParameterizedRatioEstimator, and LikelihoodEstimator (but not ScoreEstimator).
Adds functionality to rescale parameters.
"""
def __init__(self, features=None, n_hidden=(100,), activation="tanh", dropout_prob=0.0):
super(ConditionalEstimator, self).__init__(features, n_hidden, activation, dropout_prob)
self.theta_scaling_means = None
self.theta_scaling_stds = None
def save(self, filename, save_model=False):
"""
Saves the trained model to four files: a JSON file with the settings, a pickled pyTorch state dict
file, and numpy files for the mean and variance of the inputs (used for input scaling).
Parameters
----------
filename : str
Path to the files. '_settings.json' and '_state_dict.pl' will be added.
save_model : bool, optional
If True, the whole model is saved in addition to the state dict. This is not necessary for loading it
again with Estimator.load(), but can be useful for debugging, for instance to plot the computational graph.
Returns
-------
None
"""
super(ConditionalEstimator, self).save(filename, save_model)
# Save param scaling
if self.theta_scaling_stds is not None and self.theta_scaling_means is not None:
logger.debug(
"Saving parameter scaling information to %s_theta_means.npy and %s_theta_stds.npy", filename, filename
)
np.save(f"{filename}_theta_means.npy", self.theta_scaling_means)
np.save(f"{filename}_theta_stds.npy", self.theta_scaling_stds)
def load(self, filename):
"""
Loads a trained model from files.
Parameters
----------
filename : str
Path to the files. '_settings.json' and '_state_dict.pl' will be added.
Returns
-------
None
"""
super(ConditionalEstimator, self).load(filename)
# Load param scaling
try:
self.theta_scaling_means = np.load(f"{filename}_theta_means.npy")
self.theta_scaling_stds = np.load(f"{filename}_theta_stds.npy")
logger.debug(
" Found parameter scaling information: means %s, stds %s",
self.theta_scaling_means,
self.theta_scaling_stds,
)
except FileNotFoundError:
logger.warning("Parameter scaling information not found in %s", filename)
self.theta_scaling_means = None
self.theta_scaling_stds = None
def initialize_parameter_transform(self, theta, transform=True, overwrite=True):
if self.x_scaling_stds is not None and self.x_scaling_means is not None and not overwrite:
logger.info(
"Parameter rescaling already defined. To overwrite, call initialize_parameter_transform(theta, overwrite=True)."
)
elif transform:
logger.info("Setting up parameter rescaling")
self.theta_scaling_means = np.mean(theta, axis=0)
self.theta_scaling_stds = np.maximum(np.std(theta, axis=0), 1.0e-6)
else:
logger.info("Disabling parameter rescaling")
self.theta_scaling_means = None
self.theta_scaling_stds = None
def _transform_parameters(self, theta):
if self.theta_scaling_means is not None and self.theta_scaling_stds is not None:
if isinstance(theta, torch.Tensor):
theta_scaled = theta - torch.tensor(self.theta_scaling_means, dtype=theta.dtype, device=theta.device)
theta_scaled = theta_scaled / torch.tensor(
self.theta_scaling_stds, dtype=theta.dtype, device=theta.device
)
else:
theta_scaled = theta - self.theta_scaling_means[np.newaxis, :]
theta_scaled /= self.theta_scaling_stds[np.newaxis, :]
else:
theta_scaled = theta
return theta_scaled
def _transform_score(self, t_xz, inverse=False):
if self.theta_scaling_means is not None and self.theta_scaling_stds is not None and t_xz is not None:
if inverse:
t_xz_scaled = t_xz / self.theta_scaling_stds[np.newaxis, :]
else:
t_xz_scaled = t_xz * self.theta_scaling_stds[np.newaxis, :]
else:
t_xz_scaled = t_xz
return t_xz_scaled
class TheresAGoodReasonThisDoesntWork(Exception):
pass
| [
"torch.save",
"torch.tensor",
"torch.load"
] | 1.0.0 | sbrass/madminer | df664344d1a43551ee9ecd91fe2dc0bccb4d529f |
1.5 | from __future__ import absolute_import
from __future__ import division
import cv2
import numpy as np
import torch
from PIL import Image, ImageOps
def dortmund_distort(img, random_limits=(0.8, 1.1)):
"""
Creates an augmentation by computing a homography from three points in the
image to three randomly generated points.
"""
y, x = img.shape[:2]
src_point = np.float32([[x / 2, y / 3], [2 * x / 3, 2 * y / 3], [x / 3, 2 * y / 3]])
random_shift = (np.random.rand(3, 2) - 0.5) * 2 * (
random_limits[1] - random_limits[0]
) / 2 + np.mean(random_limits)
dst_point = src_point * random_shift.astype(np.float32)
transform = cv2.getAffineTransform(src_point, dst_point)
if img.ndim == 3:
border_value = np.median(
np.reshape(img, (img.shape[0] * img.shape[1], -1)), axis=0
)
else:
border_value = float(np.median(img))
return cv2.warpAffine(img, transform, dsize=(x, y), borderValue=border_value)
class DortmundImageToTensor(object):
def __init__(
self, fixed_height=None, fixed_width=None, min_height=None, min_width=None
):
assert fixed_height is None or fixed_height > 0
assert fixed_width is None or fixed_width > 0
assert min_height is None or min_height > 0
assert min_width is None or min_width > 0
self._fh = fixed_height
self._fw = fixed_width
self._mh = min_height
self._mw = min_width
def __call__(self, x):
assert isinstance(x, Image.Image)
x = x.convert("L")
x = ImageOps.invert(x)
if self._fh or self._fw:
# Optionally, resize image to a fixed size
cw, ch = x.size
nw = self._fw if self._fw else int(cw * self._fh / ch)
nh = self._fh if self._fh else int(ch * self._fw / cw)
x.resize((nw, nh), Image.BILINEAR)
elif self._mh or self._mw:
# Optionally, pad image to have the minimum size
cw, ch = x.size
nw = cw if self._mw is None or cw >= self._mw else self._mw
nh = ch if self._mh is None or ch >= self._mh else self._mh
if cw != nw or ch != nh:
nx = Image.new("L", (nw, nh))
nx.paste(x, ((nw - cw) // 2, (nh - ch) // 2))
x = nx
x = np.asarray(x, dtype=np.float32)
x = dortmund_distort(x / 255.0)
if x.shape != 3:
x = np.expand_dims(x, axis=-1)
x = np.transpose(x, (2, 0, 1))
return torch.from_numpy(x)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import laia.random
from laia.data import TextImageFromTextTableDataset, ImageDataLoader
from laia.plugins.arguments import add_argument, add_defaults, args
add_defaults("seed")
add_argument("--num_images", type=int, help="Show only this number of images")
add_argument("--shuffle", action="store_true", help="Shuffle the list of images")
add_argument("img_dir", help="Directory containing images")
add_argument("txt_table", help="Transcriptions of each image")
args = args()
laia.random.manual_seed(args.seed)
dataset = TextImageFromTextTableDataset(
args.txt_table, args.img_dir, img_transform=DortmundImageToTensor()
)
dataset_loader = ImageDataLoader(
dataset=dataset, image_channels=1, shuffle=args.shuffle
)
for i, batch in enumerate(dataset_loader, 1):
if args.num_images and i > args.num_images:
break
# Note: batch['img'] is a PaddedTensor
img = batch["img"].data.squeeze().numpy()
imgplt = plt.imshow(img, cmap="gray")
imgplt.axes.set_title(" ".join(batch["txt"][0]))
plt.show()
| [
"torch.from_numpy"
] | 1.5.0 | basbeu/PyLaia | d14458484b56622204b1730a7d53220c5d0f1bc1 |
1.8 | """Torch module for GCN."""
import torch
import torch.nn as nn
import torch.nn.functional as F
from grb.utils.normalize import GCNAdjNorm
class GCN(nn.Module):
r"""
Description
-----------
Graph Convolutional Networks (`GCN <https://arxiv.org/abs/1609.02907>`__)
Parameters
----------
in_features : int
Dimension of input features.
out_features : int
Dimension of output features.
hidden_features : int or list of int
Dimension of hidden features. List if multi-layer.
n_layers : int
Number of layers.
layer_norm : bool, optional
Whether to use layer normalization. Default: ``False``.
activation : func of torch.nn.functional, optional
Activation function. Default: ``torch.nn.functional.relu``.
residual : bool, optional
Whether to use residual connection. Default: ``False``.
feat_norm : str, optional
Type of features normalization, choose from ["arctan", "tanh", None]. Default: ``None``.
adj_norm_func : func of utils.normalize, optional
Function that normalizes adjacency matrix. Default: ``GCNAdjNorm``.
dropout : float, optional
Dropout rate during training. Default: ``0.0``.
"""
def __init__(self,
in_features,
out_features,
hidden_features,
n_layers,
activation=F.relu,
layer_norm=False,
residual=False,
feat_norm=None,
adj_norm_func=GCNAdjNorm,
dropout=0.0):
super(GCN, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.feat_norm = feat_norm
self.adj_norm_func = adj_norm_func
if type(hidden_features) is int:
hidden_features = [hidden_features] * (n_layers - 1)
elif type(hidden_features) is list or type(hidden_features) is tuple:
assert len(hidden_features) == (n_layers - 1), "Incompatible sizes between hidden_features and n_layers."
n_features = [in_features] + hidden_features + [out_features]
self.layers = nn.ModuleList()
for i in range(n_layers):
if layer_norm:
self.layers.append(nn.LayerNorm(n_features[i]))
self.layers.append(GCNConv(in_features=n_features[i],
out_features=n_features[i + 1],
activation=activation if i != n_layers - 1 else None,
residual=residual if i != n_layers - 1 else False,
dropout=dropout if i != n_layers - 1 else 0.0))
self.reset_parameters()
@property
def model_type(self):
"""Indicate type of implementation."""
return "torch"
@property
def model_name(self):
return "gcn"
def reset_parameters(self):
"""Reset parameters."""
for layer in self.layers:
layer.reset_parameters()
def forward(self, x, adj):
r"""
Parameters
----------
x : torch.Tensor
Tensor of input features.
adj : torch.SparseTensor
Sparse tensor of adjacency matrix.
Returns
-------
x : torch.Tensor
Output of model (logits without activation).
"""
for layer in self.layers:
if isinstance(layer, nn.LayerNorm):
x = layer(x)
else:
x = layer(x, adj)
return x
class GCNGC(nn.Module):
r"""
Description
-----------
Graph Convolutional Networks (`GCN <https://arxiv.org/abs/1609.02907>`__)
Parameters
----------
in_features : int
Dimension of input features.
out_features : int
Dimension of output features.
hidden_features : int or list of int
Dimension of hidden features. List if multi-layer.
n_layers : int
Number of layers.
layer_norm : bool, optional
Whether to use layer normalization. Default: ``False``.
activation : func of torch.nn.functional, optional
Activation function. Default: ``torch.nn.functional.relu``.
residual : bool, optional
Whether to use residual connection. Default: ``False``.
feat_norm : str, optional
Type of features normalization, choose from ["arctan", "tanh", None]. Default: ``None``.
adj_norm_func : func of utils.normalize, optional
Function that normalizes adjacency matrix. Default: ``GCNAdjNorm``.
dropout : float, optional
Dropout rate during training. Default: ``0.0``.
"""
def __init__(self,
in_features,
out_features,
hidden_features,
n_layers,
activation=F.relu,
layer_norm=False,
residual=False,
feat_norm=None,
adj_norm_func=GCNAdjNorm,
dropout=0.0):
super(GCNGC, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.feat_norm = feat_norm
self.adj_norm_func = adj_norm_func
if type(hidden_features) is int:
hidden_features = [hidden_features] * (n_layers - 1)
elif type(hidden_features) is list or type(hidden_features) is tuple:
assert len(hidden_features) == (n_layers - 1), "Incompatible sizes between hidden_features and n_layers."
n_features = [in_features] + hidden_features
self.layers = nn.ModuleList()
for i in range(n_layers - 1):
if layer_norm:
self.layers.append(nn.LayerNorm(n_features[i]))
self.layers.append(GCNConv(in_features=n_features[i],
out_features=n_features[i + 1],
activation=activation,
residual=residual,
dropout=dropout))
self.linear = nn.Linear(hidden_features[-1], out_features)
self.dropout = nn.Dropout(dropout)
self.reset_parameters()
@property
def model_type(self):
"""Indicate type of implementation."""
return "torch"
@property
def model_name(self):
return "gcn"
def reset_parameters(self):
"""Reset parameters."""
for layer in self.layers:
layer.reset_parameters()
def forward(self, x, adj, batch_index=None):
r"""
Parameters
----------
x : torch.Tensor
Tensor of input features.
adj : torch.SparseTensor
Sparse tensor of adjacency matrix.
Returns
-------
x : torch.Tensor
Output of model (logits without activation).
"""
for layer in self.layers:
if isinstance(layer, nn.LayerNorm):
x = layer(x)
else:
x = layer(x, adj)
if batch_index is not None:
batch_size = int(torch.max(batch_index)) + 1
out = torch.zeros(batch_size, x.shape[1]).to(x.device)
out = out.scatter_add_(dim=0, index=batch_index.view(-1, 1).repeat(1, x.shape[1]), src=x)
else:
out = torch.sum(x, dim=0)
out = self.dropout(self.linear(out))
return out
class GCNConv(nn.Module):
r"""
Description
-----------
GCN convolutional layer.
Parameters
----------
in_features : int
Dimension of input features.
out_features : int
Dimension of output features.
activation : func of torch.nn.functional, optional
Activation function. Default: ``None``.
residual : bool, optional
Whether to use residual connection. Default: ``False``.
dropout : float, optional
Dropout rate during training. Default: ``0.0``.
"""
def __init__(self,
in_features,
out_features,
activation=None,
residual=False,
dropout=0.0):
super(GCNConv, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.linear = nn.Linear(in_features, out_features)
if residual:
self.residual = nn.Linear(in_features, out_features)
else:
self.residual = None
self.activation = activation
if dropout > 0.0:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = None
self.reset_parameters()
def reset_parameters(self):
"""Reset parameters."""
if self.activation == F.leaky_relu:
gain = nn.init.calculate_gain('leaky_relu')
else:
gain = nn.init.calculate_gain('relu')
nn.init.xavier_normal_(self.linear.weight, gain=gain)
def forward(self, x, adj):
r"""
Parameters
----------
x : torch.Tensor
Tensor of input features.
adj : torch.SparseTensor
Sparse tensor of adjacency matrix.
Returns
-------
x : torch.Tensor
Output of layer.
"""
x = self.linear(x)
x = torch.sparse.mm(adj, x)
if self.activation is not None:
x = self.activation(x)
if self.residual is not None:
x = x + self.residual(x)
if self.dropout is not None:
x = self.dropout(x)
return x
| [
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.nn.ModuleList",
"torch.max",
"torch.sum",
"torch.nn.init.calculate_gain",
"torch.nn.init.xavier_normal_",
"torch.sparse.mm"
] | 1.8.0 | sigeisler/grb | c89e21076dc05d1edb87dfe2eff20c29ba6bd0c1 |
1.8 | import dgl
import torch.nn as nn
import torch.nn.functional as F
from dgl.nn.pytorch import GATConv
from grb.utils.normalize import GCNAdjNorm
class GAT(nn.Module):
r"""
Description
-----------
Graph Attention Networks (`GAT <https://arxiv.org/abs/1710.10903>`__)
Parameters
----------
in_features : int
Dimension of input features.
out_features : int
Dimension of output features.
hidden_features : int or list of int
Dimension of hidden features. List if multi-layer.
n_layers : int
Number of layers.
layer_norm : bool, optional
Whether to use layer normalization. Default: ``False``.
activation : func of torch.nn.functional, optional
Activation function. Default: ``torch.nn.functional.leaky_relu``.
feat_norm : str, optional
Type of features normalization, choose from ["arctan", "tanh", None]. Default: ``None``.
adj_norm_func : func of utils.normalize, optional
Function that normalizes adjacency matrix. Default: ``None``.
feat_dropout : float, optional
Dropout rate for input features. Default: ``0.0``.
attn_dropout : float, optional
Dropout rate for attention. Default: ``0.0``.
residual : bool, optional
Whether to use residual connection. Default: ``False``.
dropout : float, optional
Dropout rate during training. Default: ``0.0``.
"""
def __init__(self,
in_features,
out_features,
hidden_features,
n_layers,
n_heads,
activation=F.leaky_relu,
layer_norm=False,
feat_norm=None,
adj_norm_func=None,
feat_dropout=0.0,
attn_dropout=0.0,
residual=False,
dropout=0.0):
super(GAT, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.feat_norm = feat_norm
self.adj_norm_func = adj_norm_func
if type(hidden_features) is int:
hidden_features = [hidden_features] * (n_layers - 1)
elif type(hidden_features) is list or type(hidden_features) is tuple:
assert len(hidden_features) == (n_layers - 1), "Incompatible sizes between hidden_features and n_layers."
n_features = [in_features] + hidden_features + [out_features]
self.layers = nn.ModuleList()
for i in range(n_layers):
if layer_norm:
if i == 0:
self.layers.append(nn.LayerNorm(n_features[i]))
else:
self.layers.append(nn.LayerNorm(n_features[i] * n_heads))
self.layers.append(GATConv(in_feats=n_features[i] * n_heads if i != 0 else n_features[i],
out_feats=n_features[i + 1],
num_heads=n_heads if i != n_layers - 1 else 1,
feat_drop=feat_dropout if i != n_layers - 1 else 0.0,
attn_drop=attn_dropout if i != n_layers - 1 else 0.0,
residual=residual if i != n_layers - 1 else False,
activation=activation if i != n_layers - 1 else None))
if dropout > 0.0:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = None
@property
def model_type(self):
return "dgl"
@property
def model_name(self):
return "gat"
def forward(self, x, adj):
r"""
Parameters
----------
x : torch.Tensor
Tensor of input features.
adj : torch.SparseTensor
Sparse tensor of adjacency matrix.
Returns
-------
x : torch.Tensor
Output of layer.
"""
graph = dgl.from_scipy(adj).to(x.device)
graph = dgl.remove_self_loop(graph)
graph = dgl.add_self_loop(graph)
graph.ndata['features'] = x
for i, layer in enumerate(self.layers):
if isinstance(layer, nn.LayerNorm):
x = layer(x)
else:
x = layer(graph, x).flatten(1)
if i != len(self.layers) - 1:
if self.dropout is not None:
x = self.dropout(x)
return x
| [
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.nn.ModuleList"
] | 1.8.0 | sigeisler/grb | c89e21076dc05d1edb87dfe2eff20c29ba6bd0c1 |
1.8 | """Torch module for APPNP."""
import torch
import torch.nn as nn
import torch.nn.functional as F
from grb.utils.normalize import GCNAdjNorm
class APPNP(nn.Module):
r"""
Description
-----------
Approximated Personalized Propagation of Neural Predictions (`APPNP <https://arxiv.org/abs/1810.05997>`__)
Parameters
----------
in_features : int
Dimension of input features.
out_features : int
Dimension of output features.
hidden_features : int or list of int
Dimension of hidden features. List if multi-layer.
n_layers : int
Number of layers.
layer_norm : bool, optional
Whether to use layer normalization. Default: ``False``.
activation : func of torch.nn.functional, optional
Activation function. Default: ``torch.nn.functional.relu``.
feat_norm : str, optional
Type of features normalization, choose from ["arctan", "tanh", None]. Default: ``None``.
adj_norm_func : func of utils.normalize, optional
Function that normalizes adjacency matrix. Default: ``GCNAdjNorm``.
edge_drop : float, optional
Rate of edge drop.
alpha : float, optional
Hyper-parameter, refer to original paper. Default: ``0.01``.
k : int, optional
Hyper-parameter, refer to original paper. Default: ``10``.
dropout : float, optional
Dropout rate during training. Default: ``0.0``.
"""
def __init__(self,
in_features,
out_features,
hidden_features,
n_layers,
layer_norm=False,
activation=F.relu,
edge_drop=0.0,
alpha=0.01,
k=10,
feat_norm=None,
adj_norm_func=GCNAdjNorm,
dropout=0.0):
super(APPNP, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.feat_norm = feat_norm
self.adj_norm_func = adj_norm_func
if type(hidden_features) is int:
hidden_features = [hidden_features] * (n_layers - 1)
elif type(hidden_features) is list or type(hidden_features) is tuple:
assert len(hidden_features) == (n_layers - 1), "Incompatible sizes between hidden_features and n_layers."
n_features = [in_features] + hidden_features + [out_features]
self.layers = nn.ModuleList()
for i in range(n_layers):
if layer_norm:
self.layers.append(nn.LayerNorm(n_features[i]))
self.layers.append(nn.Linear(n_features[i], n_features[i + 1]))
self.alpha = alpha
self.k = k
self.activation = activation
if edge_drop > 0.0:
self.edge_dropout = SparseEdgeDrop(edge_drop)
else:
self.edge_dropout = None
if dropout > 0.0:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = None
@property
def model_type(self):
"""Indicate type of implementation."""
return "torch"
@property
def model_name(self):
return "appnp"
def reset_parameters(self):
"""Reset parameters."""
for layer in self.layers:
layer.reset_parameters()
def forward(self, x, adj):
r"""
Parameters
----------
x : torch.Tensor
Tensor of input features.
adj : torch.SparseTensor
Sparse tensor of adjacency matrix.
Returns
-------
x : torch.Tensor
Output of model (logits without activation).
"""
for layer in self.layers:
if isinstance(layer, nn.LayerNorm):
x = layer(x)
else:
x = layer(x)
x = self.activation(x)
if self.dropout is not None:
x = self.dropout(x)
for i in range(self.k):
if self.edge_dropout is not None and self.training:
adj = self.edge_dropout(adj)
x = (1 - self.alpha) * torch.spmm(adj, x) + self.alpha * x
return x
class SparseEdgeDrop(nn.Module):
r"""
Description
-----------
Sparse implementation of edge drop.
Parameters
----------
edge_drop : float
Rate of edge drop.
"""
def __init__(self, edge_drop):
super(SparseEdgeDrop, self).__init__()
self.edge_drop = edge_drop
def forward(self, adj):
"""Sparse edge drop"""
mask = ((torch.rand(adj._values().size()) + self.edge_drop) > 1.0)
rc = adj._indices()
val = adj._values().clone()
val[mask] = 0.0
return torch.sparse.FloatTensor(rc, val)
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.nn.ModuleList",
"torch.sparse.FloatTensor",
"torch.spmm"
] | 1.8.0 | sigeisler/grb | c89e21076dc05d1edb87dfe2eff20c29ba6bd0c1 |
1.10 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 17 10:33:24 2021
@author: Jose Antonio
"""
#of the paper Towards Char... using GNNs
import torch_geometric.nn as pyg_nn
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_scatter.composite import scatter_softmax
class DiscriminativeModel(nn.Module):
def __init__(self, dim_input,
hidden_dim,dropout,
vocab_nodes,
vocab_edges):
super(DiscriminativeModel, self).__init__()
self.emb_nodes = nn.Embedding(len(vocab_nodes), dim_input)
self.conv_1 = pyg_nn.RGCNConv(in_channels = dim_input, out_channels = hidden_dim,
num_relations = len(vocab_edges))
self.conv_2 = pyg_nn.RGCNConv(in_channels = hidden_dim, out_channels = hidden_dim,
num_relations = len(vocab_edges))
self.d_1 = nn.Dropout(dropout)
self.lin = nn.Linear(hidden_dim, 1)
self.attention_vector = nn.Linear(hidden_dim,1,bias=False)
def forward(self,nodeTypes,edge_index, edge_attr, bs):
nodeTypes = self.emb_nodes(nodeTypes)
nodes_mess_1 = self.conv_1(nodeTypes, edge_index, edge_attr)
nodes_mess_1 = self.d_1(F.relu(nodes_mess_1))
nodes_mess_1 = F.relu(self.conv_2(nodes_mess_1, edge_index, edge_attr))
attentions = scatter_softmax(torch.squeeze(self.attention_vector(nodes_mess_1)), bs)
nodes_mess_1 = torch.unsqueeze(attentions,dim=1) * nodes_mess_1
graph_emb = pyg_nn.global_add_pool(nodes_mess_1, bs)
rtu = self.lin(graph_emb)
return F.sigmoid(rtu)
def getAttentions(self,nodeTypes,edge_index, edge_attr, bs):
nodeTypes = self.emb_nodes(nodeTypes)
nodes_mess_1 = self.conv_1(nodeTypes, edge_index, edge_attr)
nodes_mess_1 = self.d_1(F.relu(nodes_mess_1))
nodes_mess_1 = F.relu(self.conv_2(nodes_mess_1, edge_index, edge_attr))
attentions = scatter_softmax(torch.squeeze(self.attention_vector(nodes_mess_1)), bs)
return attentions
| [
"torch.nn.Linear",
"torch.nn.functional.sigmoid",
"torch.nn.Dropout",
"torch.unsqueeze",
"torch.nn.functional.relu"
] | 1.10.1 | Antolin1/DMG-Python | ba3942e13006e1a32f3fe9f1b29615311f667274 |
1.4 | # AdamP
# Copyright (c) 2020-present NAVER Corp.
# MIT license
import torch
from torch.optim.optimizer import Optimizer
import math
class AdamP(Optimizer):
"""
Paper: "AdamP: Slowing Down the Slowdown for Momentum Optimizers on Scale-invariant Weights"
Copied from https://github.com/clovaai/AdamP/
Copyright (c) 2020 Naver Corp.
MIT License
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, delta=0.1, wd_ratio=0.1, nesterov=False):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,
delta=delta, wd_ratio=wd_ratio, nesterov=nesterov)
super(AdamP, self).__init__(params, defaults)
def _channel_view(self, x):
return x.view(x.size(0), -1)
def _layer_view(self, x):
return x.view(1, -1)
def _cosine_similarity(self, x, y, eps, view_func):
x = view_func(x)
y = view_func(y)
x_norm = x.norm(dim=1).add_(eps)
y_norm = y.norm(dim=1).add_(eps)
dot = (x * y).sum(dim=1)
return dot.abs() / x_norm / y_norm
def _projection(self, p, grad, perturb, delta, wd_ratio, eps):
wd = 1
expand_size = [-1] + [1] * (len(p.shape) - 1)
for view_func in [self._channel_view, self._layer_view]:
cosine_sim = self._cosine_similarity(grad, p.data, eps, view_func)
if cosine_sim.max() < delta / math.sqrt(view_func(p.data).size(1)):
p_n = p.data / view_func(p.data).norm(dim=1).view(expand_size).add_(eps)
perturb -= p_n * view_func(p_n * perturb).sum(dim=1).view(expand_size)
wd = wd_ratio
return perturb, wd
return perturb, wd
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
beta1, beta2 = group['betas']
nesterov = group['nesterov']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
# Adam
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
step_size = group['lr'] / bias_correction1
if nesterov:
perturb = (beta1 * exp_avg + (1 - beta1) * grad) / denom
else:
perturb = exp_avg / denom
# Projection
wd_ratio = 1
if len(p.shape) > 1:
perturb, wd_ratio = self._projection(p, grad, perturb, group['delta'], group['wd_ratio'],
group['eps'])
# Weight decay
if group['weight_decay'] > 0:
p.data.mul_(1 - group['lr'] * group['weight_decay'] * wd_ratio)
# Step
p.data.add_(-step_size, perturb)
return loss
| [
"torch.zeros_like"
] | 1.4.0 | Rhcsky/KoSpeech | dbff78140d150dcc71d14d65f81c011847e9574d |
1.4 | # Copyright (c) 2021, Soohwan Kim. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from torch import Tensor
from typing import Tuple
from kospeech.models.modules import Linear
from kospeech.models.encoder import (
BaseEncoder,
TransducerEncoder,
)
from kospeech.models.decoder import (
BaseDecoder,
TransducerDecoder,
)
class BaseModel(nn.Module):
def __init__(self):
super(BaseModel, self).__init__()
def count_parameters(self) -> int:
""" Count parameters of encoder """
return sum([p.numel for p in self.parameters()])
def update_dropout(self, dropout_p: float) -> None:
""" Update dropout probability of encoder """
for name, child in self.named_children():
if isinstance(child, nn.Dropout):
child.p = dropout_p
@torch.no_grad()
def recognize(self, inputs: Tensor, input_lengths: Tensor):
raise NotImplementedError
class EncoderModel(BaseModel):
""" Super class of KoSpeech's Encoder only Models """
def __init__(self):
super(EncoderModel, self).__init__()
self.decoder = None
def set_decoder(self, decoder):
""" Setter for decoder """
self.decoder = decoder
def forward(self, inputs: Tensor, input_lengths: Tensor) -> Tuple[Tensor, Tensor]:
"""
Forward propagate a `inputs` for ctc training.
Args:
inputs (torch.FloatTensor): A input sequence passed to encoder. Typically for inputs this will be a padded
`FloatTensor` of size ``(batch, seq_length, dimension)``.
input_lengths (torch.LongTensor): The length of input tensor. ``(batch)``
Returns:
(Tensor, Tensor):
* predicted_log_prob (torch.FloatTensor)s: Log probability of model predictions.
* output_lengths (torch.LongTensor): The length of output tensor ``(batch)``
"""
raise NotImplementedError
@torch.no_grad()
def decode(self, predicted_log_probs: Tensor) -> Tensor:
"""
Decode encoder_outputs.
Args:
predicted_log_probs (torch.FloatTensor):Log probability of model predictions. `FloatTensor` of size
``(batch, seq_length, dimension)``
Returns:
* predictions (torch.FloatTensor): Result of model predictions.
"""
return predicted_log_probs.max(-1)[1]
@torch.no_grad()
def recognize(self, inputs: Tensor, input_lengths: Tensor) -> Tensor:
"""
Recognize input speech.
Args:
inputs (torch.FloatTensor): A input sequence passed to encoder. Typically for inputs this will be a padded
`FloatTensor` of size ``(batch, seq_length, dimension)``.
input_lengths (torch.LongTensor): The length of input tensor. ``(batch)``
Returns:
* predictions (torch.FloatTensor): Result of model predictions.
"""
predicted_log_probs, _ = self.forward(inputs, input_lengths)
if self.decoder is not None:
return self.decoder.decode(predicted_log_probs)
return self.decode(predicted_log_probs)
class EncoderDecoderModel(BaseModel):
""" Super class of KoSpeech's Encoder-Decoder Models """
def __init__(self, encoder: BaseEncoder, decoder: BaseDecoder) -> None:
super(EncoderDecoderModel, self).__init__()
self.encoder = encoder
self.decoder = decoder
def set_encoder(self, encoder):
""" Setter for encoder """
self.encoder = encoder
def set_decoder(self, decoder):
""" Setter for decoder """
self.decoder = decoder
def count_parameters(self) -> int:
""" Count parameters of encoder """
num_encoder_parameters = self.encoder.count_parameters()
num_decoder_parameters = self.decoder.count_parameters()
return num_encoder_parameters + num_decoder_parameters
def update_dropout(self, dropout_p) -> None:
""" Update dropout probability of model """
self.encoder.update_dropout(dropout_p)
self.decoder.update_dropout(dropout_p)
def forward(
self,
inputs: Tensor,
input_lengths: Tensor,
targets: Tensor,
*args,
) -> Tuple[Tensor, Tensor, Tensor]:
"""
Forward propagate a `inputs` and `targets` pair for training.
Args:
inputs (torch.FloatTensor): A input sequence passed to encoder. Typically for inputs this will be a padded
`FloatTensor` of size ``(batch, seq_length, dimension)``.
input_lengths (torch.LongTensor): The length of input tensor. ``(batch)``
targets (torch.LongTensr): A target sequence passed to decoder. `IntTensor` of size ``(batch, seq_length)``
Returns:
(Tensor, Tensor, Tensor)
* predicted_log_probs (torch.FloatTensor): Log probability of model predictions.
* encoder_output_lengths: The length of encoder outputs. ``(batch)``
* encoder_log_probs: Log probability of encoder outputs will be passed to CTC Loss.
If joint_ctc_attention is False, return None.
"""
raise NotImplementedError
@torch.no_grad()
def recognize(self, inputs: Tensor, input_lengths: Tensor) -> Tensor:
"""
Recognize input speech. This method consists of the forward of the encoder and the decode() of the decoder.
Args:
inputs (torch.FloatTensor): A input sequence passed to encoder. Typically for inputs this will be a padded
`FloatTensor` of size ``(batch, seq_length, dimension)``.
input_lengths (torch.LongTensor): The length of input tensor. ``(batch)``
Returns:
* predictions (torch.FloatTensor): Result of model predictions.
"""
encoder_outputs, encoder_output_lengths, _ = self.encoder(inputs, input_lengths)
return self.decoder.decode(encoder_outputs, encoder_output_lengths)
class TransducerModel(BaseModel):
""" Super class of KoSpeech's Transducer Models """
def __init__(
self,
encoder: TransducerEncoder,
decoder: TransducerDecoder,
d_model: int,
num_classes: int,
) -> None:
super(TransducerModel, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.fc = Linear(d_model << 1, num_classes, bias=False)
def set_encoder(self, encoder):
""" Setter for encoder """
self.encoder = encoder
def set_decoder(self, decoder):
""" Setter for decoder """
self.decoder = decoder
def count_parameters(self) -> int:
""" Count parameters of encoder """
num_encoder_parameters = self.encoder.count_parameters()
num_decoder_parameters = self.decoder.count_parameters()
return num_encoder_parameters + num_decoder_parameters
def update_dropout(self, dropout_p) -> None:
""" Update dropout probability of model """
self.encoder.update_dropout(dropout_p)
self.decoder.update_dropout(dropout_p)
def joint(self, encoder_outputs: Tensor, decoder_outputs: Tensor) -> Tensor:
"""
Joint `encoder_outputs` and `decoder_outputs`.
Args:
encoder_outputs (torch.FloatTensor): A output sequence of encoder. `FloatTensor` of size
``(batch, seq_length, dimension)``
decoder_outputs (torch.FloatTensor): A output sequence of decoder. `FloatTensor` of size
``(batch, seq_length, dimension)``
Returns:
* outputs (torch.FloatTensor): outputs of joint `encoder_outputs` and `decoder_outputs`..
"""
if encoder_outputs.dim() == 3 and decoder_outputs.dim() == 3:
input_length = encoder_outputs.size(1)
target_length = decoder_outputs.size(1)
encoder_outputs = encoder_outputs.unsqueeze(2)
decoder_outputs = decoder_outputs.unsqueeze(1)
encoder_outputs = encoder_outputs.repeat([1, 1, target_length, 1])
decoder_outputs = decoder_outputs.repeat([1, input_length, 1, 1])
outputs = torch.cat((encoder_outputs, decoder_outputs), dim=-1)
outputs = self.fc(outputs).log_softmax(dim=-1)
return outputs
def forward(
self,
inputs: Tensor,
input_lengths: Tensor,
targets: Tensor,
target_lengths: Tensor,
) -> Tensor:
"""
Forward propagate a `inputs` and `targets` pair for training.
Args:
inputs (torch.FloatTensor): A input sequence passed to encoder. Typically for inputs this will be a padded
`FloatTensor` of size ``(batch, seq_length, dimension)``.
input_lengths (torch.LongTensor): The length of input tensor. ``(batch)``
targets (torch.LongTensr): A target sequence passed to decoder. `IntTensor` of size ``(batch, seq_length)``
target_lengths (torch.LongTensor): The length of target tensor. ``(batch)``
Returns:
* predictions (torch.FloatTensor): Result of model predictions.
"""
encoder_outputs, _ = self.encoder(inputs, input_lengths)
decoder_outputs, _ = self.decoder(targets, target_lengths)
return self.joint(encoder_outputs, decoder_outputs)
@torch.no_grad()
def decode(self, encoder_output: Tensor, max_length: int) -> Tensor:
"""
Decode `encoder_outputs`.
Args:
encoder_output (torch.FloatTensor): A output sequence of encoder. `FloatTensor` of size
``(seq_length, dimension)``
max_length (int): max decoding time step
Returns:
* predicted_log_probs (torch.FloatTensor): Log probability of model predictions.
"""
pred_tokens, hidden_state = list(), None
decoder_input = encoder_output.new_tensor([[self.decoder.sos_id]], dtype=torch.long)
for t in range(max_length):
decoder_output, hidden_state = self.decoder(decoder_input, hidden_states=hidden_state)
step_output = self.joint(encoder_output[t].view(-1), decoder_output.view(-1))
step_output = step_output.softmax(dim=0)
pred_token = step_output.argmax(dim=0)
pred_token = int(pred_token.item())
pred_tokens.append(pred_token)
decoder_input = step_output.new_tensor([[pred_token]], dtype=torch.long)
return torch.LongTensor(pred_tokens)
@torch.no_grad()
def recognize(self, inputs: Tensor, input_lengths: Tensor) -> Tensor:
"""
Recognize input speech. This method consists of the forward of the encoder and the decode() of the decoder.
Args:
inputs (torch.FloatTensor): A input sequence passed to encoder. Typically for inputs this will be a padded
`FloatTensor` of size ``(batch, seq_length, dimension)``.
input_lengths (torch.LongTensor): The length of input tensor. ``(batch)``
Returns:
* outputs (torch.FloatTensor): Result of model predictions.
"""
outputs = list()
encoder_outputs, output_lengths = self.encoder(inputs, input_lengths)
max_length = encoder_outputs.size(1)
for encoder_output in encoder_outputs:
decoded_seq = self.decode(encoder_output, max_length)
outputs.append(decoded_seq)
outputs = torch.stack(outputs, dim=1).transpose(0, 1)
return outputs
| [
"torch.no_grad",
"torch.cat",
"torch.LongTensor",
"torch.stack"
] | 1.4.0 | Rhcsky/KoSpeech | dbff78140d150dcc71d14d65f81c011847e9574d |
1.9 | # %% [markdown]
# ## Text Classification with LM-BFF.
# In this tutorial, we do sentiment analysis with automatic template and verbalizer generation. We use SST-2 as an example.
# %% [markdown]
# ### 1. load dataset
# %%
# import argparse
# parser = argparse.ArgumentParser("")
# parser.add_argument("--lr", type=float, default=5e-5)
# args = parser.parse_args()
from openprompt.data_utils.text_classification_dataset import SST2Processor
dataset = {}
dataset['train'] = SST2Processor().get_train_examples("./datasets/TextClassification/SST-2/16-shot/16-13")
dataset['validation'] = SST2Processor().get_dev_examples("./datasets/TextClassification/SST-2/16-shot/16-13")
dataset['test'] = SST2Processor().get_test_examples("./datasets/TextClassification/SST-2/16-shot/16-13")
# %% [markdown]
# ### 2. build initial verbalizer and template
# - note that if you wish to do automaitc label word generation, the verbalizer is not the final verbalizer, and is only used for template generation.
# - note that if you wish to do automatic template generation, the template text may desirably include `{"meta":"labelword"}` so that label word can be used and remember to use `LMBFFTemplateGenerationTemplate` class so that "labelword" can be handled properly. Else you can just use `ManualTemplate`
# - below is a template that expects plain text generation at each "mask" token position
# %%
print('load model...')
from openprompt.plms import load_plm
# load mlm model for main tasks
plm, tokenizer, model_config, WrapperClass = load_plm("roberta", "roberta-large")
# load generation model for template generation
template_generate_model, template_generate_tokenizer, template_generate_model_config, template_tokenizer_wrapper = load_plm('t5', 't5-large')
from openprompt.prompts import ManualVerbalizer, ManualTemplate
verbalizer = ManualVerbalizer(tokenizer=tokenizer, num_classes=2, label_words=[['terrible'],['great']])
from openprompt.prompts.prompt_generator import LMBFFTemplateGenerationTemplate
template = LMBFFTemplateGenerationTemplate(tokenizer=template_generate_tokenizer, verbalizer=verbalizer, text='{"placeholder":"text_a"} {"mask"} {"meta":"labelword"} {"mask"}.')
# template = ManualTemplate(tokenizer=tokenizer, text='{"placeholder":"text_a"} It is {"mask"}.')
# view wrapped example
wrapped_example = template.wrap_one_example(dataset['train'][0])
print(wrapped_example)
# %%
# parameter setting
cuda = True
auto_t = True # whether to perform automatic template generation
auto_v = True # whether to perform automatic label word generation
# %%
# train util function
from openprompt.plms import load_plm
from openprompt.prompts.prompt_generator import T5TemplateGenerator
from openprompt.pipeline_base import PromptDataLoader, PromptForClassification
from openprompt.prompts import ManualTemplate
from openprompt.trainer import ClassificationRunner
import copy
import torch
from transformers import AdamW, get_linear_schedule_with_warmup
def fit(model, train_dataloader, val_dataloader, loss_func, optimizer):
best_score = 0.0
for epoch in range(10):
train_epoch(model, train_dataloader, loss_func, optimizer)
score = evaluate(model, val_dataloader)
if score > best_score:
best_score = score
return best_score
def train_epoch(model, train_dataloader, loss_func, optimizer):
model.train()
for step, inputs in enumerate(train_dataloader):
if cuda:
inputs = inputs.cuda()
logits = model(inputs)
labels = inputs['label']
loss = loss_func(logits, labels)
loss.backward()
optimizer.step()
optimizer.zero_grad()
def evaluate(model, val_dataloader):
model.eval()
allpreds = []
alllabels = []
with torch.no_grad():
for step, inputs in enumerate(val_dataloader):
if cuda:
inputs = inputs.cuda()
logits = model(inputs)
labels = inputs['label']
alllabels.extend(labels.cpu().tolist())
allpreds.extend(torch.argmax(logits, dim=-1).cpu().tolist())
acc = sum([int(i==j) for i,j in zip(allpreds, alllabels)])/len(allpreds)
return acc
# %% [markdown]
# ### 3. automatic template and verbalizer generation
# %%
from tqdm import tqdm
# template generation
if auto_t:
print('performing auto_t...')
if cuda:
template_generate_model = template_generate_model.cuda()
template_generator = T5TemplateGenerator(template_generate_model, template_generate_tokenizer, template_tokenizer_wrapper, verbalizer, beam_width=5) # beam_width is set to 5 here for efficiency, to improve performance, try a larger number.
dataloader = PromptDataLoader(dataset['train'], template, template_generate_tokenizer, template_tokenizer_wrapper, batch_size=len(dataset['train']), decoder_max_length=128) # register all data at once
for data in dataloader:
if cuda:
data = data.cuda()
template_generator._register_buffer(data)
template_generate_model.eval()
print('generating...')
template_texts = template_generator._get_templates()
original_template = template.text
template_texts = [template_generator.convert_template(template_text, original_template) for template_text in template_texts]
# template_generator._show_template()
template_generator.release_memory()
# generate a number of candidate template text
print(template_texts)
# iterate over each candidate and select the best one
best_metrics = 0.0
best_template_text = None
for template_text in tqdm(template_texts):
template = ManualTemplate(tokenizer, template_text)
train_dataloader = PromptDataLoader(dataset['train'], template, tokenizer, WrapperClass)
valid_dataloader = PromptDataLoader(dataset['validation'], template, tokenizer, WrapperClass)
model = PromptForClassification(copy.deepcopy(plm), template, verbalizer)
loss_func = torch.nn.CrossEntropyLoss()
no_decay = ['bias', 'LayerNorm.weight']
# it's always good practice to set no decay to biase and LayerNorm parameters
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=1e-4)
if cuda:
model = model.cuda()
score = fit(model, train_dataloader, valid_dataloader, loss_func, optimizer)
if score > best_metrics:
print('best score:', score)
print('template:', template_text)
best_metrics = score
best_template_text = template_text
# use the best template
template = ManualTemplate(tokenizer, text=best_template_text)
print(best_template_text)
# %%
# verbalizer generation
from openprompt.prompts.prompt_generator import RobertaVerbalizerGenerator
if auto_v:
print('performing auto_v...')
# load generation model for template generation
if cuda:
plm = plm.cuda()
verbalizer_generator = RobertaVerbalizerGenerator(model=plm, tokenizer=tokenizer, candidate_num=20, label_word_num_per_class=20)
# to improve performace , try larger numbers
dataloader = PromptDataLoader(dataset['train'], template, tokenizer, WrapperClass, batch_size=32)
for data in dataloader:
if cuda:
data = data.cuda()
verbalizer_generator.register_buffer(data)
label_words_list = verbalizer_generator.generate()
verbalizer_generator.release_memory()
# iterate over each candidate and select the best one
current_verbalizer = copy.deepcopy(verbalizer)
best_metrics = 0.0
best_label_words = None
for label_words in tqdm(label_words_list):
current_verbalizer.label_words = label_words
train_dataloader = PromptDataLoader(dataset['train'], template, tokenizer, WrapperClass)
valid_dataloader = PromptDataLoader(dataset['validation'], template, tokenizer, WrapperClass)
model = PromptForClassification(copy.deepcopy(plm), template, current_verbalizer)
loss_func = torch.nn.CrossEntropyLoss()
no_decay = ['bias', 'LayerNorm.weight']
# it's always good practice to set no decay to biase and LayerNorm parameters
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=1e-4)
if cuda:
model = model.cuda()
score = fit(model, train_dataloader, valid_dataloader, loss_func, optimizer)
if score > best_metrics:
best_metrics = score
best_label_words = label_words
# use the best verbalizer
print(best_label_words)
verbalizer = ManualVerbalizer(tokenizer, num_classes=2, label_words=best_label_words)
# %% [markdown]
# ### 4. main training loop
# %%
# main training loop
train_dataloader = PromptDataLoader(dataset['train'], template, tokenizer, WrapperClass)
valid_dataloader = PromptDataLoader(dataset['validation'], template, tokenizer, WrapperClass)
test_dataloader = PromptDataLoader(dataset['test'], template, tokenizer, WrapperClass)
model = PromptForClassification(copy.deepcopy(plm), template, verbalizer)
loss_func = torch.nn.CrossEntropyLoss()
no_decay = ['bias', 'LayerNorm.weight']
# it's always good practice to set no decay to biase and LayerNorm parameters
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=1e-4)
if cuda:
model = model.cuda()
score = fit(model, train_dataloader, valid_dataloader, loss_func, optimizer)
test_score = evaluate(model, test_dataloader)
print(test_score)
| [
"torch.no_grad",
"torch.argmax",
"torch.nn.CrossEntropyLoss"
] | 1.9.0 | creativeautomaton/OpenPrompt | bd9ea544ab144d94af32d245101ba35c9d5a5a65 |
1.10 | from utilities.utils import build_windowed_data
from utilities.utils import load_h5_df_train_test_dataset, get_data, cast_sleep_stages
from sleep_stage_config import *
from sklearn.model_selection import train_test_split
import torch
from torch.utils.data import Dataset, DataLoader
import pandas as pd
import numpy as np
class WindowedFrameAppleRAWDataLoader(torch.utils.data.Dataset):
def __init__(self, acc_data, hrv_data, target, idx, transform=None):
self.acc_data = torch.from_numpy(acc_data).float()
self.acc_data = self.acc_data.permute(0, 2, 1)
self.hrv_data = torch.from_numpy(hrv_data).float()
self.hrv_data = self.hrv_data.permute(0, 2, 1) # set it to batch_num, channel, time_dim
self.idx = torch.from_numpy(idx)
self.target = torch.from_numpy(target).long()
self.transform = transform
def __getitem__(self, index):
hrv_x = self.hrv_data[index]
acc_x = self.acc_data[index]
y = self.target[index]
i = self.idx[index]
return acc_x, hrv_x, y, i
def __len__(self):
return len(self.target)
class WindowedFrameMESARAWDataLoader(torch.utils.data.Dataset):
def __init__(self, data, target, idx, transform=None):
self.data = torch.from_numpy(data).float()
self.data = self.data.permute(0, 2, 1) # set it to batch_num, channel, time_dim
self.idx = torch.from_numpy(idx)
self.target = torch.from_numpy(target).long()
self.transform = transform
def __getitem__(self, index):
x = self.data[index]
y = self.target[index]
i = self.idx[index]
if self.transform:
x = self.transform(x)
return x, y, i
def __len__(self):
return len(self.data)
def get_raw_dataloader_by_id(pid, cfg: Config, shuffle, batch_size, data_set, seq_len, apple_acc_hz=1):
import h5py as h5py
if data_set == "apple_raw":
pid_raw_acc_path = os.path.join(cfg.APPLE_CROPPED_RAW_PATH, f"{str(pid)}_cleaned_resampled_"
f"{str(apple_acc_hz)}_hz.out")
raw_acc = pd.read_csv(pid_raw_acc_path, delimiter=' ', header=None).values
raw_acc = raw_acc[:raw_acc.shape[0]-30, 1:]
outputs = []
for i in np.arange(3):
sig = raw_acc[:, i].reshape(-1, 30) # e.g. 200 x 30
out = build_windowed_data(sig=sig, sampling_rate=1, epoch_len=30, win_len=seq_len+1)
assert out.shape == (sig.shape[0], 30*(seq_len+1))
outputs.append(np.expand_dims(out, -1))
raw_acc_x = np.concatenate(outputs, axis=-1)
cache_path = cfg.APPLE_LOOCV_ALL_WINDOWED % seq_len
with h5py.File(cache_path, 'r') as data:
df_data = data["df_values"][:]
x = data["x"][:]
y = data["y"][:]
columns = data["columns"][:].astype(str).tolist()
data.close()
df = pd.DataFrame(df_data, columns=columns)
pid_idx = df[df.pid == pid]['window_idx'].values.astype(int)
x_hrv = x[pid_idx, :, :][:, :, 1:] # remove the activity counts only keep the hrv features
y_stage = y[pid_idx]
data_ds = WindowedFrameAppleRAWDataLoader(raw_acc_x, x_hrv, y_stage, pid_idx)
data_loader = DataLoader(
data_ds,
batch_size=batch_size,
shuffle=shuffle,
num_workers=0,
pin_memory=torch.cuda.is_available()
)
return data_loader
def get_raw_test_df(pid, cfg: Config, dataset, num_classes, seq_len):
import h5py as h5py
if dataset == "apple_raw":
with h5py.File(cfg.APPLE_LOOCV_ALL_WINDOWED % seq_len, 'r') as data:
df_value = data["df_values"][:]
df_columns = data['columns'][:].astype(str).tolist()
data.close()
df_test = pd.DataFrame(df_value, columns=df_columns)
df_test = df_test[df_test['pid'] == pid].copy(deep=True)
return df_test
| [
"torch.from_numpy",
"torch.cuda.is_available"
] | 1.10.2 | bzhai/Ubi-SleepNet | 27837827dec608d06659421d073872fb1f68453e |
1.1 | import torch
import torch.nn as nn
import spconv
from spconv.modules import SparseModule
from collections import OrderedDict
class ResidualBlock(SparseModule):
def __init__(self, in_channels, out_channels, norm_fn, indice_key=None):
super().__init__()
if in_channels == out_channels:
self.i_branch = spconv.SparseSequential(
nn.Identity()
)
else:
self.i_branch = spconv.SparseSequential(
spconv.SubMConv3d(in_channels, out_channels, kernel_size=1, bias=False)
)
self.conv_branch = spconv.SparseSequential(
norm_fn(in_channels),
nn.ReLU(),
spconv.SubMConv3d(in_channels, out_channels, kernel_size=3, padding=1, bias=False, indice_key=indice_key),
norm_fn(out_channels),
nn.ReLU(),
spconv.SubMConv3d(out_channels, out_channels, kernel_size=3, padding=1, bias=False, indice_key=indice_key)
)
def forward(self, input):
identity = spconv.SparseConvTensor(input.features, input.indices, input.spatial_shape, input.batch_size)
output = self.conv_branch(input)
output.features += self.i_branch(identity).features
return output
class VGGBlock(SparseModule):
def __init__(self, in_channels, out_channels, norm_fn, indice_key=None):
super().__init__()
self.conv_layers = spconv.SparseSequential(
norm_fn(in_channels),
nn.ReLU(),
spconv.SubMConv3d(in_channels, out_channels, kernel_size=3, padding=1, bias=False, indice_key=indice_key)
)
def forward(self, input):
return self.conv_layers(input)
class UBlock(nn.Module):
def __init__(self, nPlanes, norm_fn, block_reps, block, indice_key_id=1):
super().__init__()
self.nPlanes = nPlanes
blocks = {'block{}'.format(i): block(nPlanes[0], nPlanes[0], norm_fn, indice_key='subm{}'.format(indice_key_id)) for i in range(block_reps)}
blocks = OrderedDict(blocks)
self.blocks = spconv.SparseSequential(blocks)
if len(nPlanes) > 1:
self.conv = spconv.SparseSequential(
norm_fn(nPlanes[0]),
nn.ReLU(),
spconv.SparseConv3d(nPlanes[0], nPlanes[1], kernel_size=2, stride=2, bias=False, indice_key='spconv{}'.format(indice_key_id))
)
self.u = UBlock(nPlanes[1:], norm_fn, block_reps, block, indice_key_id=indice_key_id+1)
self.deconv = spconv.SparseSequential(
norm_fn(nPlanes[1]),
nn.ReLU(),
spconv.SparseInverseConv3d(nPlanes[1], nPlanes[0], kernel_size=2, bias=False, indice_key='spconv{}'.format(indice_key_id))
)
blocks_tail = {}
for i in range(block_reps):
blocks_tail['block{}'.format(i)] = block(nPlanes[0] * (2 - i), nPlanes[0], norm_fn, indice_key='subm{}'.format(indice_key_id))
blocks_tail = OrderedDict(blocks_tail)
self.blocks_tail = spconv.SparseSequential(blocks_tail)
def forward(self, input):
output = self.blocks(input)
identity = spconv.SparseConvTensor(output.features, output.indices, output.spatial_shape, output.batch_size)
if len(self.nPlanes) > 1:
output_decoder = self.conv(output)
output_decoder = self.u(output_decoder)
output_decoder = self.deconv(output_decoder)
output.features = torch.cat((identity.features, output_decoder.features), dim=1)
output = self.blocks_tail(output)
return output
| [
"torch.nn.ReLU",
"torch.cat",
"torch.nn.Identity"
] | 1.1 | thangvubk/SphereRPN | 9b154256774437bb23d81e22990d350555d39b81 |
1.7 | import os
import torch
from collections import OrderedDict
import glob
class Saver(object):
def __init__(self, args):
self.args = args
self.directory = os.path.join('run', args.train_dataset, args.checkname)
self.runs = sorted(glob.glob(os.path.join(self.directory, 'experiment_*')))
run_id = int(self.runs[-1].split('_')[-1]) + 1 if self.runs else 0
self.experiment_dir = os.path.join(self.directory, 'experiment_{}'.format(str(run_id)))
if not os.path.exists(self.experiment_dir):
os.makedirs(self.experiment_dir)
def save_checkpoint(self, state, filename='checkpoint.pth.tar'):
"""Saves checkpoint to disk"""
filename = os.path.join(self.experiment_dir, filename)
torch.save(state, filename)
def save_experiment_config(self):
logfile = os.path.join(self.experiment_dir, 'parameters.txt')
log_file = open(logfile, 'w')
p = OrderedDict()
p['train_dataset'] = self.args.train_dataset
p['lr'] = self.args.lr
p['epoch'] = self.args.epochs
for key, val in p.items():
log_file.write(key + ':' + str(val) + '\n')
log_file.close()
| [
"torch.save"
] | 1.7.0 | dumpmemory/Transformer-Explainability | 951e112d24c1a642ceefeb0dd03a607040305383 |
1.4 | # ---------------------------------------------------------------
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the NVIDIA Source Code License
# for OSCAR. To view a copy of this license, see the LICENSE file.
# ---------------------------------------------------------------
from isaacgym import gymapi
import torch
from .base_controller import Controller
class JointTorqueController(Controller):
"""
Joint Torque Controller.
This controller expects D-DOF commands either in delta form (dq1, dq2, ..., dqD), or absolute form
(q1, q2, ..., qD), as specified by the @use_delta argument.
Args:
input_min (int, float, or array): Minimum values below which received commands will be clipped
input_max (int, float, or array): Maximum values above which received commands will be clipped
output_min (int, float, or array): Lower end of range that received commands will be mapped to
output_max (int, float, or array): Upper end of range that received commands will be mapped to
control_min (int, float, or array): Minimum control values below which outputted controls will be clipped
control_max (int, float, or array): Maximum control values above which outputted controls will be clipped
control_noise (float): Amount of noise to apply. Should be in [0, 1)
control_dim (int): Outputted control dimension -- should be number of joints from base to eef body frame
device (str): Which device to send all tensors to by default
use_delta (bool): Whether to expect received commands to be delta or absolute joint positions
normalize_control (bool): Whether or not to normalize outputted controls to (-1, 1) range
"""
def __init__(
self,
input_min,
input_max,
output_min,
output_max,
control_min,
control_max,
control_noise,
control_dim,
device,
use_delta=True,
normalize_control=True,
**kwargs, # hacky way to sink extraneous args
):
# Run super init first
super().__init__(
command_dim=control_dim,
input_min=input_min,
input_max=input_max,
output_min=output_min,
output_max=output_max,
control_min=control_min,
control_max=control_max,
control_noise=control_noise,
control_dim=control_dim,
device=device,
normalize_control=normalize_control,
)
# Store internal vars
self.use_delta = use_delta
# Initialize internal vars
self.n_envs = None
self.goal_torque = None
def update_goal(self, control_dict, command, env_ids=None, train=False):
"""
Updates the internal goal (absolute joint torques) based on the inputted joint command
NOTE: received joints from @control_dict can be greater than control_dim; we assume the first control_dim
indexes correspond to the relevant elements to be used for joint torque goal setting
Args:
control_dict (dict): Dictionary of keyword-mapped tensors including relevant control
information (eef state, q states, etc.)
Expected keys:
eef_state: shape of (N, 13), the (lin_pos, quat_ori, lin_vel, ang_vel) state of the eef body
command (tensor): D-DOF joint torque command -- should be (dq1, dq2, ..., dqD), or absolute form
(q1, q2, ..., qD) if self.use_delta is False.
env_ids (None or tensor): If specified, should be (integer) IDs corresponding to the
specific env instances of this robot that should be reset
train (bool): If True, will assume env_ids is None and will NOT index specific goals so we avoid inplace
operations and so that we can backprop later
"""
# Scale the commands appropriately
cmd = self.scale_command(command)
# Set n_envs, goal_pos, and goal_ori if we haven't done so already
if self.n_envs is None:
self.n_envs = command.shape[0]
self.goal_torque = torch.zeros(self.n_envs, self.control_dim, device=self.device)
# If we're training, make sure env_ids is None
if train:
assert env_ids is None or len(env_ids) == self.n_envs, "When in training mode, env_ids must be None or len of n_envs!"
# Directly set goals
self.goal_torque = self.goal_torque + cmd if self.use_delta else cmd
else:
# If env_ids is None, we update all the envs
if env_ids is None:
env_ids = torch.arange(start=0, end=self.n_envs, device=self.device, dtype=torch.uint32)
# Update goal
self.goal_torque[env_ids] = self.goal_torque[env_ids] + cmd[env_ids] if self.use_delta else cmd[env_ids]
def compute_control(self, control_dict):
"""
Computes low-level joint torque controls.
Since we are directly using joint-torque control, this simply is equivalent to returning the
internal goal state
Args:
control_dict (dict): Dictionary of state tensors including relevant info for controller computation
Expected keys:
eef_state: shape of (N, 13), the (lin_pos, quat_ori, lin_vel, ang_vel) state of the eef body
Returns:
tensor: Processed low-level joint position control actions
"""
# Post-process internal goal (clipping + normalization)
u = self.postprocess_control(self.goal_torque)
# Return the control joint positions
return u
def reset(self, control_dict, env_ids=None):
"""
Reset the internal vars associated with this controller
Args:
control_dict (dict): Dictionary of state tensors including relevant info for controller computation
Expected keys:
eef_state: shape of (N, 13), the (lin_pos, quat_ori, lin_vel, ang_vel) state of the eef body
env_ids (None or tensor): If specified, should be (integer) IDs corresponding to the
specific env instances of this policy that should be reset
"""
# Clear n_envs, goal if we're now controlling a new set of envs
n_cmds = control_dict["eef_state"].shape[0]
if self.n_envs != n_cmds:
self.n_envs = None
self.goal_torque = None
# Reset corresponding envs to current positions
cmd = torch.zeros(n_cmds, self.command_dim, device=self.device)
self.update_goal(
control_dict=control_dict,
command=cmd,
env_ids=env_ids
)
def get_flattened_goals(self):
"""
Returns the current goal command in a serialized 2D form
Returns:
torch.tensor: (N, -1) current goals in this controller
"""
return self.goal_torque
@property
def goal_dim(self):
# This is the same as the control dimension
return self.control_dim
@property
def control_type(self):
# This controller outputs joint positions
return gymapi.DOF_MODE_EFFORT
@property
def differentiable(self):
# We can backprop through all computations
return True
| [
"torch.zeros",
"torch.arange"
] | 1.4.0 | NVlabs/oscar | df778a4173a118f10627cb2ef4021c26303231fc |
1.0 | import os
import subprocess
import sys
import torch
from setuptools import setup, find_packages
from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CUDA_HOME
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output(
[cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
raw_output, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(
cuda_dir)
torch_binary_major = torch.version.cuda.split(".")[0]
torch_binary_minor = torch.version.cuda.split(".")[1]
print("\nCompiling cuda extensions with")
print(raw_output + "from " + cuda_dir + "/bin\n")
if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):
raise RuntimeError("Cuda extensions are being compiled with a version of Cuda that does " +
"not match the version used to compile Pytorch binaries. " +
"Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda) +
"In some cases, a minor-version mismatch will not cause later errors: " +
"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. "
"You can try commenting out this check (at your own risk).")
def fetch_requirements(path):
with open(path, 'r') as fd:
return [r.strip() for r in fd.readlines()]
if not torch.cuda.is_available():
# https://github.com/NVIDIA/apex/issues/486
# Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
# which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
print('\nWarning: Torch did not find available GPUs on this system.\n',
'If your intention is to cross-compile, this is not an error.\n'
'By default, Colossal-AI will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n'
'Volta (compute capability 7.0), Turing (compute capability 7.5),\n'
'and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n'
'If you wish to cross-compile for a single specific architecture,\n'
'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n')
if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None:
_, bare_metal_major, _ = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) == 11:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0"
else:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
if TORCH_MAJOR == 0 and TORCH_MINOR < 4:
raise RuntimeError("Colossal-AI requires Pytorch 0.4 or newer.\n" +
"The latest stable release can be obtained from https://pytorch.org/")
cmdclass = {}
ext_modules = []
# Set up macros for forward/backward compatibility hack around
# https://github.com/pytorch/pytorch/commit/4404762d7dd955383acee92e6f06b48144a0742e
# and
# https://github.com/NVIDIA/apex/issues/456
# https://github.com/pytorch/pytorch/commit/eb7b39e02f7d75c26d8a795ea8c7fd911334da7e#diff-4632522f237f1e4e728cb824300403ac
version_ge_1_1 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0):
version_ge_1_1 = ['-DVERSION_GE_1_1']
version_ge_1_3 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2):
version_ge_1_3 = ['-DVERSION_GE_1_3']
version_ge_1_5 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4):
version_ge_1_5 = ['-DVERSION_GE_1_5']
version_dependent_macros = version_ge_1_1 + version_ge_1_3 + version_ge_1_5
if "--cuda_ext" in sys.argv:
if TORCH_MAJOR == 0:
raise RuntimeError("--cuda_ext requires Pytorch 1.0 or later, "
"found torch.__version__ = {}".format(torch.__version__))
sys.argv.remove("--cuda_ext")
if CUDA_HOME is None:
raise RuntimeError(
"--cuda_ext was requested, but nvcc was not found. Are you sure your environment has nvcc available? If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
else:
check_cuda_torch_binary_vs_bare_metal(CUDA_HOME)
ext_modules.append(
CUDAExtension(name='colossal_C',
sources=['csrc/colossal_C_frontend.cpp',
'csrc/multi_tensor_sgd_kernel.cu',
'csrc/multi_tensor_scale_kernel.cu',
'csrc/multi_tensor_adam.cu',
'csrc/multi_tensor_l2norm_kernel.cu',
'csrc/multi_tensor_lamb.cu'],
extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
'nvcc': ['-lineinfo',
'-O3',
# '--resource-usage',
'--use_fast_math'] + version_dependent_macros}))
install_requires = fetch_requirements('requirements/requirements.txt')
setup(
name='colossalai',
version='0.0.1-beta',
packages=find_packages(exclude=('csrc',
'tests',
'docs',
'tests',
'*.egg-info',)),
description='An integrated large-scale model training system with efficient parallelization techniques',
ext_modules=ext_modules,
cmdclass={'build_ext': BuildExtension} if ext_modules else {},
install_requires=install_requires,
) | [
"torch.cuda.is_available",
"torch.utils.cpp_extension.CUDAExtension",
"torch.__version__.split",
"torch.version.cuda.split"
] | 1.0 | DevinCheung/ColossalAI | 632e622de818697f9949e35117c0432d88f62c87 |
1.0 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import math
import numbers
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch import Tensor
from torch.nn.parameter import Parameter
from typing import Tuple
import importlib
from colossalai.context import seed, ParallelMode
from colossalai.core import global_context as gpc
from colossalai.registry import LAYERS
from colossalai.utils import get_current_device
from ._operation import FusedLayerNormAffineFunction1D
from .._common_utils import divide, set_tensor_parallel_attribute_by_partition
from .._parallel_utilities import reduce_grad, reduce_input, gather_forward_split_backward, \
split_forward_gather_backward
from ..base_layer import ParallelLayer
@LAYERS.register_module
class Linear1D_Col(ParallelLayer):
"""Linear layer with column parallelism.
The linear layer is defined as :math:`Y = XA + b`. A is parallelized along
its second dimension as :math:`A = [A_1, ..., A_p]`.
:param in_features: first dimension of matrix A.
:type in_features: int
:param output_size: second dimension of matrix A.
:type output_size: int
:param bias: If true, add bias, defaults to True
:type bias: bool, optional
:param dtype: The dtype of parameters, defaults to None
:type dtype: torch.dtype, optional
:param gather_output: If true, call all-gether on output and make Y avaiable
to all GPUs, otherwise, every GPU will have its output
which is :math:`Y_i = XA_i`, defaults to False
:type gather_output: bool, optional
"""
def __init__(self,
in_features: int,
output_size: int,
bias: bool = True,
dtype: torch.dtype = None,
gather_output: bool = False,
skip_bias_add: bool = False,
init_weight='torch',
init_bias='torch'
):
super().__init__()
# Keep input parameters
self.in_features = in_features
self.out_features = output_size
self.gather_output = gather_output
self.skip_bias_add = skip_bias_add
if skip_bias_add and not bias:
raise ValueError('cannot skip bias addition if bias is None')
self.output_size_per_partition = divide(output_size, gpc.tensor_parallel_size)
# Parameters.
# Initialize weight.
factory_kwargs = {'device': get_current_device(), 'dtype': dtype}
self.weight = Parameter(torch.empty(
self.output_size_per_partition, self.in_features,
**factory_kwargs))
if bias:
self.bias = Parameter(torch.empty(
self.output_size_per_partition,
**factory_kwargs))
# Always initialize bias to zero.
with torch.no_grad():
self.bias.zero_()
else:
self.register_parameter('bias', None)
with seed(ParallelMode.TENSOR):
self.reset_parameters(init_weight, init_bias)
self._set_tensor_parallel_attributes()
def reset_parameters(self, init_weight, init_bias) -> None:
assert init_weight in ('torch', 'jax', 'zero')
assert init_bias in ('torch', 'jax', 'zero')
# setting
fan_in, fan_out = self.in_features, self.out_features
# init weight
if init_weight == 'torch':
a = math.sqrt(5)
nonlinearity = 'leaky_relu'
std = init.calculate_gain(nonlinearity, a) / math.sqrt(fan_in)
bound = math.sqrt(3.0) * std
init.uniform_(self.weight, -bound, bound)
elif init_weight == 'jax':
std = math.sqrt(2.0 / float(fan_in + fan_out))
a = math.sqrt(3.0) * std
init.uniform_(self.weight, -a, a)
elif init_weight == 'zero':
init.zeros_(self.weight)
# init bias
if self.bias is not None:
if init_bias == 'torch':
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
init.uniform_(self.bias, -bound, bound)
elif init_bias == 'jax':
init.normal_(self.bias, std=1e-6)
elif init_bias == 'zero':
init.zeros_(self.bias)
def _set_tensor_parallel_attributes(self):
num_partition = gpc.get_world_size(ParallelMode.TENSOR)
set_tensor_parallel_attribute_by_partition(self.weight, num_partition)
if self.bias is not None:
set_tensor_parallel_attribute_by_partition(self.bias, num_partition)
def forward(self, input_: Tensor) -> Tuple[Tensor, Tensor]:
# Set up backprop all-reduce.
input_parallel = reduce_grad(input_, ParallelMode.PARALLEL_1D)
# Matrix multiply.
bias = self.bias if not self.skip_bias_add else None
output_parallel = F.linear(input_parallel, self.weight, bias)
if self.gather_output:
# All-gather across the partitions.
output = gather_forward_split_backward(
output_parallel, ParallelMode.PARALLEL_1D, dim=-1)
else:
output = output_parallel
if self.skip_bias_add:
return output, self.bias
else:
return output
@LAYERS.register_module
class Linear1D_Row(ParallelLayer):
""" Linear layer with row parallelism
:param in_features: size of each input sample
:type in_features: int
:param out_features: size of each output sample
:type out_features: int
:param bias: If set to ``False``, the layer will not learn an additive bias, defaults to True
:type bias: bool, optional
:param dtype: The dtype of parameters, defaults to None
:type dtype: torch.dtype, optional
:param parallel_input: If set to ``True``, it's assumed that the input is splitted, defaults to False
:type parallel_input: bool, optional
"""
def __init__(self,
in_features: int,
out_features: int,
bias: bool = True,
dtype: torch.dtype = None,
parallel_input: bool = False,
skip_bias_add: bool = False,
init_weight='torch',
init_bias='torch'
):
super().__init__()
# Keep input parameters
self.in_features = in_features
self.out_features = out_features
self.parallel_input = parallel_input
self.skip_bias_add = skip_bias_add
if skip_bias_add and not bias:
raise ValueError('cannot skip bias addition if bias is None')
# Divide the weight matrix along the last dimension.
self.input_size_per_partition = divide(in_features, gpc.tensor_parallel_size)
# Parameters.
# Initialize weight.
factory_kwargs = {'device': get_current_device(), 'dtype': dtype}
self.weight = Parameter(torch.empty(
self.out_features,
self.input_size_per_partition,
**factory_kwargs))
if bias:
self.bias = Parameter(torch.empty(
self.out_features,
**factory_kwargs
))
# Always initialize bias to zero.
with torch.no_grad():
self.bias.zero_()
else:
self.register_parameter('bias', None)
with seed(ParallelMode.TENSOR):
self.reset_parameters(init_weight, init_bias)
self._set_tensor_parallel_attributes()
def reset_parameters(self, init_weight, init_bias) -> None:
assert init_weight in ('torch', 'jax', 'zero')
assert init_bias in ('torch', 'jax', 'zero')
# setting
fan_in, fan_out = self.in_features, self.out_features
# init weight
if init_weight == 'torch':
a = math.sqrt(5)
nonlinearity = 'leaky_relu'
std = init.calculate_gain(nonlinearity, a) / math.sqrt(fan_in)
bound = math.sqrt(3.0) * std
init.uniform_(self.weight, -bound, bound)
elif init_weight == 'jax':
std = math.sqrt(2.0 / float(fan_in + fan_out))
a = math.sqrt(3.0) * std
init.uniform_(self.weight, -a, a)
elif init_weight == 'zero':
init.zeros_(self.weight)
# init bias
if self.bias is not None:
if init_bias == 'torch':
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
init.uniform_(self.bias, -bound, bound)
elif init_bias == 'jax':
init.normal_(self.bias, std=1e-6)
elif init_bias == 'zero':
init.zeros_(self.bias)
dist.broadcast(self.bias,
src=gpc.get_ranks_in_group(ParallelMode.PARALLEL_1D)[0],
group=gpc.get_group(ParallelMode.PARALLEL_1D))
def _set_tensor_parallel_attributes(self):
num_partition = gpc.get_world_size(ParallelMode.TENSOR)
set_tensor_parallel_attribute_by_partition(self.weight, num_partition)
def forward(self, input_: Tensor) -> Tensor:
# Set up backprop all-reduce.
if self.parallel_input:
input_ = input_
else:
input_ = split_forward_gather_backward(
input_, ParallelMode.PARALLEL_1D, dim=-1)
output_parallel = F.linear(input_, self.weight)
output = reduce_input(output_parallel, ParallelMode.PARALLEL_1D)
if not self.skip_bias_add:
output = output + self.bias
return output
else:
return output, self.bias
@LAYERS.register_module
class MixedFusedLayerNorm1D(torch.nn.Module):
def __init__(self, normalized_shape, eps=1e-5):
super(MixedFusedLayerNorm1D, self).__init__()
if isinstance(normalized_shape, numbers.Integral):
normalized_shape = (normalized_shape,)
self.normalized_shape = torch.Size(normalized_shape)
self.eps = eps
self.weight = Parameter(torch.Tensor(*normalized_shape))
self.bias = Parameter(torch.Tensor(*normalized_shape))
self.reset_parameters()
def reset_parameters(self):
init.ones_(self.weight)
init.zeros_(self.bias)
def forward(self, input):
return FusedLayerNormAffineFunction1D.apply(
input, self.weight, self.bias, self.normalized_shape, self.eps)
| [
"torch.Size",
"torch.no_grad",
"torch.nn.init.ones_",
"torch.nn.functional.linear",
"torch.nn.init.normal_",
"torch.nn.init.calculate_gain",
"torch.nn.init.uniform_",
"torch.nn.init.zeros_",
"torch.empty",
"torch.Tensor"
] | 1.0 | DevinCheung/ColossalAI | 632e622de818697f9949e35117c0432d88f62c87 |
1.0 | import torch
def get_test_devices():
"""Creates a string list with the devices type to test the source code.
CUDA devices will be test only in case the current hardware supports it.
Return:
list(str): list with devices names.
"""
devices = ["cpu"]
if torch.cuda.is_available():
devices.append("cuda")
return devices
# setup the devices to test the source code
TEST_DEVICES = get_test_devices()
| [
"torch.cuda.is_available"
] | 1.0.0 | kajal-puri/torchgeometry | 36c4992d5f741a65a1f558266c588e37c24462da |
1.6 | from typing import Optional
from typing import Type
import torch
from scipy.sparse import coo_matrix
from .indexing import SizeType
from .indexing import unroll_index
def torch_coo_to_scipy_coo(m: torch.sparse.FloatTensor) -> coo_matrix:
"""Convert torch :class:`torch.sparse.FloatTensor` tensor to.
:class:`scipy.sparse.coo_matrix`
"""
data = m.values().numpy()
indices = m.indices()
return coo_matrix((data, (indices[0], indices[1])), tuple(m.size()))
def scatter_indices(indices: torch.LongTensor, shape: SizeType):
"""Unroll the coo indices using the provided shape.
.. code-block::
indices = torch.tensor([
[0, 1, 2],
[2, 3, 4],
[4, 5, 4]
])
shape = (3, 2)
print(scatter_indices(indices, shape))
# tensor([[0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2,
# 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2],
# [2, 3, 4, 2, 3, 4, 2, 3, 4, 2, 3, 4, 2, 3, 4, 2, 3, 4, 2, 3, 4, 2, 3, 4,
# 2, 3, 4, 2, 3, 4, 2, 3, 4, 2, 3, 4],
# [4, 5, 4, 4, 5, 4, 4, 5, 4, 4, 5, 4, 4, 5, 4, 4, 5, 4, 4, 5, 4, 4, 5, 4,
# 4, 5, 4, 4, 5, 4, 4, 5, 4, 4, 5, 4],
# [0, 0, 1, 1, 2, 2, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 2, 2, 0, 1, 0, 1, 0, 1,
# 0, 0, 1, 1, 2, 2, 0, 1, 0, 1, 0, 1]])
:param indices:
:param shape:
:return:
"""
if not shape:
return indices
idx = torch.stack(unroll_index(shape))
a_repeat = [1] * indices.ndim
a_repeat[-1] = idx.shape[-1]
b_repeat = [1] * indices.ndim
b_repeat[-1] = indices.shape[-1]
a = torch.repeat_interleave(indices, idx.shape[-1], dim=1)
b = idx.repeat(b_repeat)
return torch.cat((a, b))
def _expand_idx(idx):
if idx.ndim == 1:
idx = idx.unsqueeze(0)
idx = torch.cat((torch.zeros_like(idx), idx))
return idx
def _coo_tensor(
indices: torch.LongTensor,
source: torch.Tensor,
size: Optional[SizeType] = None,
dtype: Optional[Type] = None,
**kwargs
):
if size is not None:
kwargs["size"] = size
if dtype is None:
kwargs["dtype"] = source.dtype
else:
kwargs["dtype"] = dtype
if size is not None:
kwargs = dict(dtype=dtype, size=size)
else:
kwargs = dict(dtype=dtype)
return torch.sparse_coo_tensor(indices, source, **kwargs)
# TODO: infer size from index sizes
def scatter_coo(
indices: torch.LongTensor,
source: torch.FloatTensor,
size: Optional[SizeType] = None,
expand: bool = False,
dtype: Optional[Type] = None,
) -> torch.sparse.FloatTensor:
"""Scatter the provided source tensor to the provided indices.
:param indices:
:param source:
:return:
"""
indices = _expand_idx(indices)
if not torch.is_tensor(source):
source = torch.tensor(source)
if expand:
shape = source.shape
# r = prod(shape[:-1]) * indices.shape[1]
r = indices.shape[1]
flattened = source.view(-1).repeat(r)
else:
shape = source.shape[1:]
flattened = source.view(-1)
if size is not None and size[-1] is ...:
if not len(size) - 1 == indices.shape[0]:
raise ValueError(
"Provided dims ({}) must match number of index dims ({})".format(
len(size) - 1, indices.shape[0]
)
)
size = tuple(list(size)[:-1]) + shape
sidx = scatter_indices(indices, shape)
return _coo_tensor(sidx, flattened, size=size, dtype=dtype)
#
# def scatter_coo_fill(
# indices: torch.LongTensor,
# source: torch.FloatTensor,
# size: Optional[SizeType] = None,
# dtype: Optional[Type] = None,
# ) -> torch.sparse.FloatTensor:
# """Fill sparse coo matrix with the provided tensor at the provided indices.
#
# :param indices:
# :param source:
# :return:
# """
# indices = _expand_idx(indices)
# source = torch.tensor(source)
# sidx = scatter_indices(indices, source.shape)
# if size is not None and size[-1] is ...:
# size = tuple(list(size)[:-1])
# if torch.is_tensor():
# size += source.shape
# return _coo_tensor(
# sidx, source.view(-1).repeat(indices.shape[1]), size=size, dtype=dtype
# )
| [
"torch.cat",
"torch.is_tensor",
"torch.repeat_interleave",
"torch.tensor",
"torch.sparse_coo_tensor",
"torch.zeros_like"
] | 1.6.0 | jvrana/caldera | a346324e77f20739e00a82f97530dda4906f59dd |
1.6 | """test_train_networks.py.
Inststructions for creating a new test case.
loader, getter, network
"""
import functools
from collections import OrderedDict
from contextlib import contextmanager
from typing import Any
from typing import Callable
from typing import Dict
from typing import Optional
from typing import Tuple
from typing import Type
import networkx as nx
import numpy as np
import pytest
import torch
from torch import optim
from caldera.blocks import AggregatingEdgeBlock
from caldera.blocks import AggregatingGlobalBlock
from caldera.blocks import AggregatingNodeBlock
from caldera.blocks import Aggregator
from caldera.blocks import EdgeBlock
from caldera.blocks import Flex
from caldera.blocks import GlobalBlock
from caldera.blocks import MLP
from caldera.blocks import MultiAggregator
from caldera.blocks import NodeBlock
from caldera.data import GraphBatch
from caldera.data import GraphData
from caldera.data import GraphDataLoader
from caldera.models import GraphCore
from caldera.models import GraphEncoder
from caldera.utils import deterministic_seed
from caldera.utils.nx import nx_iter_roots
from caldera.utils.tensor import to_one_hot
SEED = 0
class NamedNetwork:
def __init__(self, name, network_func):
self.name = name
self.f = network_func
def __call__(self, *args, **kwargs):
return self.f(*args, **kwargs)
class Networks:
"""Networks that will be used in the tests."""
n = NamedNetwork
linear_block = n(
"linear",
lambda: torch.nn.Sequential(
torch.nn.Linear(5, 16), torch.nn.ReLU(), torch.nn.Linear(16, 1)
),
)
mlp_block = n(
"mlp",
lambda: torch.nn.Sequential(
Flex(MLP)(Flex.d(), 16), Flex(torch.nn.Linear)(Flex.d(), 1)
),
)
node_block = n(
"node_block",
lambda: torch.nn.Sequential(
NodeBlock(Flex(MLP)(Flex.d(), 25, 25, layer_norm=False)),
Flex(torch.nn.Linear)(Flex.d(), 1),
),
)
edge_block = n(
"edge_block",
lambda: torch.nn.Sequential(
EdgeBlock(Flex(MLP)(Flex.d(), 25, 25, layer_norm=False)),
Flex(torch.nn.Linear)(Flex.d(), 1),
),
)
global_block = n(
"global_block",
lambda: torch.nn.Sequential(
GlobalBlock(Flex(MLP)(Flex.d(), 25, 25, layer_norm=False)),
Flex(torch.nn.Linear)(Flex.d(), 1),
),
)
graph_encoder = n(
"graph_encoder",
lambda: GraphEncoder(
EdgeBlock(
torch.nn.Sequential(
Flex(MLP)(Flex.d(), 5, 5, layer_norm=False),
Flex(torch.nn.Linear)(Flex.d(), 1),
)
),
NodeBlock(
torch.nn.Sequential(
Flex(MLP)(Flex.d(), 5, 5, layer_norm=False),
Flex(torch.nn.Linear)(Flex.d(), 1),
)
),
GlobalBlock(
torch.nn.Sequential(
Flex(MLP)(Flex.d(), 5, 5, layer_norm=False),
Flex(torch.nn.Linear)(Flex.d(), 1),
)
),
),
)
def create_graph_core(pass_global_to_edge: bool, pass_global_to_node: bool):
return GraphCore(
AggregatingEdgeBlock(
torch.nn.Sequential(
Flex(MLP)(Flex.d(), 5, 5, layer_norm=False),
Flex(torch.nn.Linear)(Flex.d(), 1),
)
),
AggregatingNodeBlock(
torch.nn.Sequential(
Flex(MLP)(Flex.d(), 5, 5, layer_norm=False),
Flex(torch.nn.Linear)(Flex.d(), 1),
),
edge_aggregator=Aggregator("add"),
),
AggregatingGlobalBlock(
torch.nn.Sequential(
Flex(MLP)(Flex.d(), 5, 5, layer_norm=False),
Flex(torch.nn.Linear)(Flex.d(), 1),
),
edge_aggregator=Aggregator("add"),
node_aggregator=Aggregator("add"),
),
pass_global_to_edge=pass_global_to_edge,
pass_global_to_node=pass_global_to_node,
)
graph_core = n("graph_core", create_graph_core)
def create_graph_core_multi_agg(
pass_global_to_edge: bool, pass_global_to_node: bool
):
agg = lambda: Flex(MultiAggregator)(Flex.d(), ["add", "mean", "max", "min"])
return GraphCore(
AggregatingEdgeBlock(
torch.nn.Sequential(
Flex(MLP)(
Flex.d(), 5, 5, layer_norm=True, activation=torch.nn.LeakyReLU
),
Flex(torch.nn.Linear)(Flex.d(), 1),
)
),
AggregatingNodeBlock(
torch.nn.Sequential(
Flex(MLP)(
Flex.d(), 5, 5, layer_norm=True, activation=torch.nn.LeakyReLU
),
Flex(torch.nn.Linear)(Flex.d(), 1),
),
edge_aggregator=agg(),
),
AggregatingGlobalBlock(
torch.nn.Sequential(
Flex(MLP)(
Flex.d(), 5, 5, layer_norm=True, activation=torch.nn.LeakyReLU
),
Flex(torch.nn.Linear)(Flex.d(), 1),
),
edge_aggregator=agg(),
node_aggregator=agg(),
),
pass_global_to_edge=pass_global_to_edge,
pass_global_to_node=pass_global_to_node,
)
graph_core_multi_agg = n("graph_core(multiagg)", create_graph_core_multi_agg)
@staticmethod
def reset(net: torch.nn.Module):
def weight_reset(model):
for layer in model.children():
if hasattr(layer, "reset_parameters"):
layer.reset_parameters()
net.apply(weight_reset)
class DataModifier:
"""Methods to modify data before training."""
def __init__(self, datalist):
self.datalist = datalist
@staticmethod
def node_sum(batch: GraphBatch, copy=True):
if copy:
batch = batch.copy()
batch.x = torch.cat([batch.x, batch.x.sum(axis=1, keepdim=True)], axis=1)
return batch
@staticmethod
def edge_sum(batch: GraphBatch, copy=True):
if copy:
batch = batch.copy()
batch.e = torch.cat([batch.e, batch.e.sum(axis=1, keepdim=True)], axis=1)
return batch
@staticmethod
def global_sum(batch: GraphBatch, copy=True):
if copy:
batch = batch.copy()
batch.g = torch.cat([batch.g, batch.g.sum(axis=1, keepdim=True)], axis=1)
return batch
def apply(self, f, *args, **kwargs):
f = self.resolve(f)
return [f(_d, *args, **kwargs) for _d in self.datalist]
@classmethod
def resolve(cls, f):
cls.valid(f)
if isinstance(f, str):
f = getattr(cls, f)
return f
@classmethod
def valid(self, f):
if callable(f):
return True
elif isinstance(f, str) and hasattr(self, f):
return True
return False
class DataLoaders:
"""Data loaders for test."""
@staticmethod
def random_loader(data_size, batch_size):
datalist = [GraphData.random(5, 5, 5) for _ in range(data_size)]
return GraphDataLoader(datalist, batch_size=batch_size)
@staticmethod
def _default_g(g: nx.DiGraph, global_key: str = None):
for _, data in g.nodes(data=True):
data["features"] = np.zeros((1,))
data["target"] = np.zeros((1,))
for _, _, data in g.edges(data=True):
data["features"] = np.zeros((1,))
data["target"] = np.zeros((1,))
g.set_global({"features": np.zeros((1,)), "target": np.zeros((1,))}, global_key)
return g
@classmethod
def random_graph_red_black_nodes(cls, data_size, batch_size):
input_data = []
output_data = []
s = 2
for _ in range(data_size):
g = nx.to_directed(nx.random_tree(10))
cls._default_g(g)
for n, ndata in g.nodes(data=True):
i = np.random.randint(0, 1, (1,))
ndata["features"] = to_one_hot(i, s)
if i % 2 == 0:
target = np.array([0.5])
else:
target = np.zeros(1)
ndata["target"] = target
input_data.append(GraphData.from_networkx(g, feature_key="features"))
output_data.append(GraphData.from_networkx(g, feature_key="target"))
return GraphDataLoader(input_data, output_data, batch_size=batch_size)
@classmethod
def random_graph_red_black_edges(cls, data_size, batch_size):
input_data = []
output_data = []
s = 2
for _ in range(data_size):
g = nx.to_directed(nx.random_tree(10))
cls._default_g(g)
for _, _, edata in g.edges(data=True):
i = np.random.randint(0, 1, (1,))
edata["features"] = to_one_hot(i, s)
if i % 2 == 0:
target = np.array([0.5])
else:
target = np.zeros((1,))
edata["target"] = target
input_data.append(GraphData.from_networkx(g, feature_key="features"))
output_data.append(GraphData.from_networkx(g, feature_key="target"))
return GraphDataLoader(input_data, output_data, batch_size=batch_size)
@classmethod
def random_graph_red_black_global(cls, data_size, batch_size):
input_data = []
output_data = []
s = 2
for _ in range(data_size):
g = nx.to_directed(nx.random_tree(10))
cls._default_g(g)
gdata = g.get_global()
i = np.random.randint(0, 1, (1,))
gdata["features"] = to_one_hot(i, s)
if i % 2 == 0:
target = np.array([0.5])
else:
target = np.zeros((1,))
gdata["target"] = target
input_data.append(GraphData.from_networkx(g, feature_key="features"))
output_data.append(GraphData.from_networkx(g, feature_key="target"))
return GraphDataLoader(input_data, output_data, batch_size=batch_size)
@classmethod
def est_density(cls, data_size, batch_size):
input_data = []
output_data = []
s = 2
for _ in range(data_size):
n_size = np.random.randint(2, 20)
g = nx.to_directed(nx.random_tree(n_size))
cls._default_g(g)
gdata = g.get_global()
gdata["features"] = np.random.randn(1)
gdata["target"] = np.array([nx.density(g)])
input_data.append(GraphData.from_networkx(g, feature_key="features"))
output_data.append(GraphData.from_networkx(g, feature_key="target"))
return GraphDataLoader(input_data, output_data, batch_size=batch_size)
@classmethod
def in_degree(cls, data_size, batch_size):
input_data = []
output_data = []
s = 2
for _ in range(data_size):
n_size = np.random.randint(2, 20)
g = nx.to_directed(nx.random_tree(n_size))
cls._default_g(g)
for n, ndata in g.nodes(data=True):
ndata["features"] = np.random.randn(1)
ind = g.in_degree(n)
ndata["target"] = np.array([ind])
input_data.append(GraphData.from_networkx(g, feature_key="features"))
output_data.append(GraphData.from_networkx(g, feature_key="target"))
return GraphDataLoader(input_data, output_data, batch_size=batch_size)
@classmethod
def boolean_network(cls, data_size, batch_size):
input_data = []
output_data = []
for _ in range(data_size):
n_size = np.random.randint(2, 20)
tree = nx.random_tree(n_size)
# randomize node directions
g = nx.DiGraph()
for n1, n2, edata in tree.edges(data=True):
i = np.random.randint(2)
if i % 2 == 0:
g.add_edge(n1, n2)
else:
g.add_edge(n2, n1)
cls._default_g(g)
for n in nx_iter_roots(g):
ndata = g.nodes[n]
ndata["target"] = np.array([1.0])
for n in nx.topological_sort(g):
ndata = g.nodes[n]
if "target" not in ndata:
incoming = []
for p in g.predecessors(n):
pdata = g.nodes[p]
incoming.append(pdata["target"])
incoming = np.concatenate(incoming)
i = incoming.max()
if i == 1:
o = np.array([0.0])
else:
o = np.array([1.0])
ndata["target"] = o
input_data.append(GraphData.from_networkx(g, feature_key="features"))
output_data.append(GraphData.from_networkx(g, feature_key="target"))
return GraphDataLoader(input_data, output_data, batch_size=batch_size)
@classmethod
def sigmoid_circuit(cls, data_size, batch_size):
import math
def func(x):
return 1 - 1.0 / (1 + math.exp(-x))
input_data = []
output_data = []
for _ in range(data_size):
n_size = np.random.randint(2, 20)
tree = nx.random_tree(n_size)
# randomize node directions
g = nx.DiGraph()
for n1, n2, edata in tree.edges(data=True):
i = np.random.randint(2)
if i % 2 == 0:
g.add_edge(n1, n2)
else:
g.add_edge(n2, n1)
cls._default_g(g)
for n in nx_iter_roots(g):
ndata = g.nodes[n]
ndata["target"] = np.array([3.0])
for n in nx.topological_sort(g):
ndata = g.nodes[n]
if "target" not in ndata:
incoming = []
for p in g.predecessors(n):
pdata = g.nodes[p]
incoming.append(pdata["target"])
incoming = np.concatenate(incoming)
i = incoming.sum()
o = func(i)
ndata["target"] = o
input_data.append(GraphData.from_networkx(g, feature_key="features"))
output_data.append(GraphData.from_networkx(g, feature_key="target"))
return GraphDataLoader(input_data, output_data, batch_size=batch_size)
T = Tuple[Tuple[Tuple[Any, ...], Dict], torch.Tensor]
class DataGetter:
"""Methods to collect input, output from the loader."""
@classmethod
def get_node(cls, batch: GraphBatch) -> T:
args = (batch.x[:, :-1],)
kwargs = {}
out = batch.x[:, -1:]
return ((args, kwargs), out)
@classmethod
def get_edge(cls, batch: GraphBatch) -> T:
args = (batch.e[:, :-1],)
kwargs = {}
out = batch.e[:, -1:]
return ((args, kwargs), out)
@classmethod
def get_global(cls, batch: GraphBatch) -> T:
args = (batch.g[:, :-1],)
kwargs = {}
out = batch.g[:, -1:]
return ((args, kwargs), out)
@classmethod
def get_batch(cls, batch_tuple: Tuple[GraphBatch, GraphBatch]) -> T:
args = (batch_tuple[0],)
kwargs = {}
out = batch_tuple[1]
return ((args, kwargs), (out.e, out.x, out.g))
class NetworkTestCaseValidationError(Exception):
pass
@contextmanager
def does_not_raise():
yield
# TODO: model reset is not working
class NetworkTestCase:
"""A network test case."""
def __init__(
self,
network: torch.nn.Module,
modifier: Optional[Callable[[GraphBatch], Any]] = None,
getter: Optional[Callable[[GraphBatch], Any]] = None,
optimizer: Type[torch.optim.Optimizer] = None,
criterion=None,
loss_func: Callable = None,
epochs: int = 20,
batch_size: int = 100,
data_size: int = 1000,
loader: Optional[Callable[[int, int], GraphDataLoader]] = None,
expectation: Callable = None,
tags: Tuple[str, ...] = None,
device: str = None,
):
if expectation is None:
expectation = does_not_raise()
self.expectation = expectation
self.tags = tags
if modifier is None:
self.modifier = lambda x: x
else:
self.modifier = modifier
if getter is None:
self.getter = lambda x: x
else:
self.getter = getter
self.network = network
self.epochs = epochs
self.batch_size = batch_size
self.data_size = data_size
self.device = device
if loader is None:
loader = DataLoaders.random_loader
self.loader_func = loader
self.loader = self.loader_func(data_size, batch_size)
self.optimizer = optimizer
if criterion is None:
criterion = torch.nn.MSELoss()
if loss_func is not None:
loss_func = functools.partial(loss_func, criterion, self.device)
else:
loss_func = criterion
self.loss_func = loss_func
self.losses = None
def to(self, x, device=None):
device = device or self.device
if device is not None:
if isinstance(x, tuple):
return tuple([self.to(_x) for _x in x])
else:
return x.to(device)
return x
def seed(self, seed: int = SEED):
deterministic_seed(seed)
def reset(self, seed: int = SEED):
self.seed(seed)
Networks.reset(self.network)
self.to(self.network)
def provide_example(self):
batch = self.loader.first()
mod_batch = self.modifier(batch)
mod_batch = self.to(mod_batch)
data = self.getter(mod_batch)[0]
self.network(*data[0], **data[1])
# def validate_network_device(self):
# for p in self.network.parameters():
# assert p.device == self.device
def eval(self, data_size):
self.network.eval()
with torch.no_grad():
running_loss = 0.0
for batch in self.loader_func(data_size, data_size):
batch = self.to(batch)
batch = self.modifier(batch)
input, target = self.getter(batch)
output = self.network(*input[0], **input[1])
loss = self.loss_func(output, target)
running_loss += loss.item()
print("TARGET")
print(target)
print("OUTPUT")
print(output)
return running_loss
def train(self):
print("Training {}".format(self.network))
self.reset()
epochs = self.epochs
net = self.network
loader = self.loader
optimizer = self.optimizer
getter = self.getter
modifier = self.modifier
loss_func = self.loss_func
# provide example
self.provide_example()
if optimizer is None:
optimizer = optim.AdamW(net.parameters(), lr=1e-2)
self.pre_train_validate()
loss_arr = torch.zeros(epochs)
for epoch in range(epochs):
net.train()
running_loss = 0.0
for batch in loader:
batch = self.to(batch)
batch = modifier(batch)
input, target = getter(batch)
optimizer.zero_grad() # zero the gradient buffers
output = net(*input[0], **input[1])
for x, o, t in zip(["edge", "node", "global"], output, target):
if o.shape != t.shape:
raise NetworkTestCaseValidationError(
"{x} output shape ({o}) has a different shape from {x} target shape ({t})".format(
x=x, o=o.shape, t=t.shape
)
)
loss = loss_func(output, target)
self.to(loss)
loss.backward(retain_graph=True)
optimizer.step()
running_loss += loss.item()
loss_arr[epoch] = running_loss
self.losses = loss_arr
return loss_arr
def pre_train_validate(self):
for p in self.network.parameters():
assert p.requires_grad is True
def post_train_validate(self, threshold=0.1):
if self.losses[-1] > self.losses[0] * threshold:
raise NetworkTestCaseValidationError(
"Model did not train properly :(."
"\n\tlosses: {} -> {}".format(self.losses[0], self.losses[-1])
)
def __str__(self):
pass
@pytest.mark.parametrize(
"loader_func",
[
DataLoaders.random_loader,
DataLoaders.random_graph_red_black_nodes,
DataLoaders.random_graph_red_black_edges,
DataLoaders.est_density,
],
)
def test_loaders(loader_func):
loader = loader_func(100, 20)
for x in loader:
assert x
def mse_tuple(criterion, device, a, b):
loss = torch.tensor(0.0, dtype=torch.float32, device=device)
assert len(a) == len(b)
for i, (_a, _b) in enumerate(zip(a, b)):
assert _a.shape == _b.shape
l = criterion(_a, _b)
loss = loss + l
return loss
def get_id(case):
print(case.__class__)
tokens = OrderedDict(
{"id": None, "name": None, "loader": None, "expectation": None}
)
tokens["name"] = case["network"].name
tokens["id"] = case.get("id", None)
try:
tokens["loader"] = case.get("loader", None).__name__
except AttributeError:
pass
try:
tokens["expectation"] = case.get("expectation", None)
except AttributeError:
pass
return "-".join([str(v) for v in tokens.values() if v is not None])
@pytest.fixture
def network_case(request):
def pop(d, k, default):
if k in d:
res = d[k]
del d[k]
return res
return default
params = dict(request.param)
args = pop(params, "network_args", tuple())
kwargs = pop(params, "network_kwargs", {})
params["network"] = params["network"](*args, **kwargs)
case = NetworkTestCase(**params)
return case
cases = [
dict(
network=Networks.linear_block,
modifier=DataModifier.node_sum,
getter=DataGetter.get_node,
tags=["block", "basic"],
),
dict(
network=Networks.mlp_block,
modifier=DataModifier.node_sum,
getter=DataGetter.get_node,
tags=["block", "basic"],
),
dict(
network=Networks.node_block,
modifier=DataModifier.node_sum,
getter=DataGetter.get_node,
tags=["block", "basic", "node"],
),
dict(
network=Networks.edge_block,
modifier=DataModifier.edge_sum,
getter=DataGetter.get_edge,
tags=["block", "basic", "edge"],
),
dict(
network=Networks.global_block,
modifier=DataModifier.global_sum,
getter=DataGetter.get_global,
tags=["block", "basic", "global"],
),
dict(
network=Networks.graph_encoder,
loader=DataLoaders.random_graph_red_black_nodes,
getter=DataGetter.get_batch,
loss_func=mse_tuple,
tags=["graph_encoder", "node"],
), # randomly creates an input value, assigns 'red' or 'black' to nodes
dict(
network=Networks.graph_encoder,
loader=DataLoaders.random_graph_red_black_edges,
getter=DataGetter.get_batch,
loss_func=mse_tuple,
tags=["graph_encoder", "edge"],
), # randomly creates an input value, assigns 'red' or 'black' to edges
dict(
network=Networks.graph_encoder,
loader=DataLoaders.random_graph_red_black_global,
getter=DataGetter.get_batch,
loss_func=mse_tuple,
tags=["graph_encoder", "global"],
), # randomly creates an input value, assigns 'red' or 'black' to global
dict(
network=Networks.graph_encoder,
loader=DataLoaders.est_density,
getter=DataGetter.get_batch,
loss_func=mse_tuple,
expectation=pytest.raises(NetworkTestCaseValidationError),
tags=["graph_encoder", "fail"],
), # network cannot learn the density without connections between nodes and edges,
dict(
network=Networks.graph_core,
network_kwargs={"pass_global_to_edge": True, "pass_global_to_node": True},
loader=DataLoaders.est_density,
getter=DataGetter.get_batch,
loss_func=mse_tuple,
tags=["graph_core", "global"],
), # estimate the graph density using GraphCore
dict(
network=Networks.graph_core,
network_kwargs={"pass_global_to_edge": False, "pass_global_to_node": True},
loader=DataLoaders.est_density,
getter=DataGetter.get_batch,
loss_func=mse_tuple,
tags=["graph_core", "global"],
), # estimate the graph density using GraphCore
dict(
network=Networks.graph_core,
network_kwargs={"pass_global_to_edge": True, "pass_global_to_node": False},
loader=DataLoaders.est_density,
getter=DataGetter.get_batch,
loss_func=mse_tuple,
tags=["graph_core", "global"],
), # estimate the graph density using GraphCore
dict(
network=Networks.graph_core,
network_kwargs={"pass_global_to_edge": False, "pass_global_to_node": False},
loader=DataLoaders.est_density,
getter=DataGetter.get_batch,
loss_func=mse_tuple,
tags=["graph_core", "global"],
), # estimate the graph density using GraphCore
dict(
network=Networks.graph_core,
network_kwargs={"pass_global_to_edge": True, "pass_global_to_node": True},
loader=DataLoaders.in_degree,
getter=DataGetter.get_batch,
loss_func=mse_tuple,
tags=["graph_core", "node"],
), # estimate the graph density using GraphCore
dict(
network=Networks.graph_encoder,
loader=DataLoaders.in_degree,
getter=DataGetter.get_batch,
loss_func=mse_tuple,
tags=["graph_core", "node"],
expectation=pytest.raises(NetworkTestCaseValidationError),
), # estimate the graph density using GraphCore
dict(
network=Networks.graph_core,
network_kwargs={"pass_global_to_edge": True, "pass_global_to_node": True},
loader=DataLoaders.boolean_network,
getter=DataGetter.get_batch,
loss_func=mse_tuple,
tags=["boolean_circuit"],
), # estimate the graph density using GraphCore
dict(
network=Networks.graph_encoder,
loader=DataLoaders.boolean_network,
getter=DataGetter.get_batch,
loss_func=mse_tuple,
tags=["boolean_circuit"],
expectation=pytest.raises(NetworkTestCaseValidationError),
), # estimate the graph density using GraphCore
dict(
network=Networks.graph_core,
loader=DataLoaders.sigmoid_circuit,
network_kwargs={"pass_global_to_edge": True, "pass_global_to_node": True},
getter=DataGetter.get_batch,
loss_func=mse_tuple,
epochs=100,
tags=["sigmoid_circuit"],
), # estimate the graph density using GraphCore
dict(
network=Networks.graph_core_multi_agg,
loader=DataLoaders.sigmoid_circuit,
network_kwargs={"pass_global_to_edge": True, "pass_global_to_node": True},
getter=DataGetter.get_batch,
loss_func=mse_tuple,
epochs=100,
tags=["sigmoid_circuit_(multiagg)"],
), # estimate the graph density using GraphCore
]
# in degree
# average in degree
# a function of number of nodes, in degree
# boolean network that depends on multiple passes
# sigmoid circuit
# shortest _path example
visited_cases = set()
def parameterize_by_group(groups: Tuple[str, ...] = None) -> Callable:
params = []
for idx, p in enumerate(cases):
if groups is None:
params.append(p)
else:
for tag in p.get("tags", []):
if tag in groups:
params.append(p)
visited_cases.add(idx)
break
if not params:
raise Exception("There are no cases with tags '{}'".format(groups))
return pytest.mark.parametrize("network_case", params, ids=get_id, indirect=True)
def run_test_case(network_case, device):
network_case.device = device
with network_case.expectation:
losses = network_case.train()
print(losses)
for p in network_case.network.parameters():
assert device == str(p.device)
network_case.post_train_validate()
network_case.eval(20)
return network_case
@pytest.mark.benchmark
class TestTraining:
@parameterize_by_group(["basic", "block"])
def test_train_block(self, network_case, device, benchmark):
benchmark.pedantic(
run_test_case,
args=(network_case, device),
iterations=1,
rounds=1,
warmup_rounds=0,
)
@parameterize_by_group(["graph_encoder"])
def test_train_encoder(self, network_case, device, benchmark):
benchmark.pedantic(
run_test_case,
args=(network_case, device),
iterations=1,
rounds=1,
warmup_rounds=0,
)
@parameterize_by_group(["graph_core"])
def test_train_core(self, network_case, device, benchmark):
benchmark.pedantic(
run_test_case,
args=(network_case, device),
iterations=1,
rounds=1,
warmup_rounds=0,
)
@parameterize_by_group(["boolean_circuit"])
def test_train_boolean_circuit(self, network_case, device, benchmark):
benchmark.pedantic(
run_test_case,
args=(network_case, device),
iterations=1,
rounds=1,
warmup_rounds=0,
)
@parameterize_by_group(["sigmoid_circuit"])
def test_train_sigmoid_circuit(self, network_case, device, benchmark):
benchmark.pedantic(
run_test_case,
args=(network_case, device),
iterations=1,
rounds=1,
warmup_rounds=0,
)
@parameterize_by_group(["sigmoid_circuit_(multiagg)"])
def test_train_sigmoid_circuit_with_multi_agg(
self, network_case, device, benchmark
):
benchmark.pedantic(
run_test_case,
args=(network_case, device),
iterations=1,
rounds=1,
warmup_rounds=0,
)
| [
"torch.zeros",
"torch.nn.Linear",
"torch.nn.MSELoss",
"torch.no_grad",
"torch.nn.ReLU",
"torch.tensor"
] | 1.6.0 | jvrana/caldera | a346324e77f20739e00a82f97530dda4906f59dd |
1.8 | """
single channel speech enhancement for wind noise reduction.
refer to
"A Convolutional Recurrent Neural Network for Real-Time Speech Enhancement" .
Authors
* Wang Wei 2021
"""
import torch
import torch.nn as nn
class CNN_Block(torch.nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size=[3, 3],
stride=(1,2),
padding=(1,0)) -> None:
super().__init__()
self.layers = torch.nn.ModuleList()
self.layers.append(nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding))
self.layers.append(nn.BatchNorm2d(out_channels))
self.layers.append(nn.ELU())
def forward(self, x):
for layer in self.layers:
x = layer(x)
return x
class RNN_Block(torch.nn.Module):
def __init__(self,
input_size=1792,
hidden_size=1792,
num_layers=2,
rnn_type='LSTM',
dropout=0.2) -> None:
super().__init__()
self.rnn_type = rnn_type
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.dropout = dropout
if self.rnn_type in ['LSTM', 'GRU']:
self.rnn = getattr(nn, rnn_type)(self.input_size,
self.hidden_size, self.num_layers,
batch_first=True, dropout=self.dropout)
else:
try:
nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]
except KeyError:
raise ValueError("""An invalid option for `--model` was supplied,
options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']""")
self.rnn = nn.RNN(self.input_size, self.hidden_size, self.num_layers, nonlinearity=nonlinearity, dropout=self.dropout)
# self.hidden = self.init_hidden(batch_size)
def init_hidden(self, batch_size=1):
if self.rnn_type == 'GRU':
return torch.zeros(self.num_layers * self.directions_count, batch_size, self.hidden_dim).to(self.device)
elif self.rnn_type == 'LSTM':
return (
torch.zeros(self.num_layers * self.directions_count, batch_size, self.hidden_dim).to(self.device),
torch.zeros(self.num_layers * self.directions_count, batch_size, self.hidden_dim).to(self.device))
else:
raise Exception('Unknown rnn_type. Valid options: "gru", "lstm"')
def forward(self, x):
self.rnn.flatten_parameters()
x, _ = self.rnn(x)
return x
class DeCNN_Block(torch.nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size=[3, 3],
stride=(1,2),
padding=(1,0),
output_padding=0) -> None:
super().__init__()
self.layers = torch.nn.ModuleList()
self.layers.append(
nn.ConvTranspose2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
output_padding=output_padding))
self.layers.append(nn.BatchNorm2d(out_channels))
self.layers.append(nn.ELU())
def forward(self, x):
for layer in self.layers:
x = layer(x)
return x
class Encoder(torch.nn.Module):
def __init__(self, in_channels=1, channels=16, layers=5, scale=2) -> None:
super().__init__()
self.cnn_b1 = CNN_Block(1, channels)
self.cnn_b2 = CNN_Block(channels, channels*2)
self.cnn_b3 = CNN_Block(channels*2, channels*4)
self.cnn_b4 = CNN_Block(channels*4, channels*8)
self.cnn_b5 = CNN_Block(channels*8, channels*16)
def forward(self, x):
o1 = self.cnn_b1(x)
o2 = self.cnn_b2(o1)
o3 = self.cnn_b3(o2)
o4 = self.cnn_b4(o3)
o5 = self.cnn_b5(o4)
return o1, o2, o3, o4, o5
class Decoder(torch.nn.Module):
def __init__(self, in_channels=512, layers=5, scale=2) -> None:
super().__init__()
self.decnn_b5 = DeCNN_Block(512, 128)
self.decnn_b4 = DeCNN_Block(256, 64)
self.decnn_b3 = DeCNN_Block(128, 32)
self.decnn_b2 = DeCNN_Block(64, 16, output_padding=(0,1))
self.decnn_b1 = DeCNN_Block(32, 1)
def forward(self, x, decoder_o5, decoder_o4, decoder_o3, decoder_o2, decoder_o1):
o5 = self.decnn_b5(torch.cat((x, decoder_o5), 1))
o4 = self.decnn_b4(torch.cat((o5, decoder_o4), 1))
o3 = self.decnn_b3(torch.cat((o4, decoder_o3), 1))
o2 = self.decnn_b2(torch.cat((o3, decoder_o2), 1))
o = self.decnn_b1(torch.cat((o2, decoder_o1), 1))
return o
class crn(torch.nn.Module):
"""Basic RNN model with projection layers between RNN layers.
Arguments
---------
input_size : int
Size of the expected input in the 3rd dimension.
rnn_size : int
Number of neurons to use in rnn (for each direction -> and <-).
projection : int
Number of neurons in projection layer.
layers : int
Number of RNN layers to use.
"""
def __init__(self, input_size=161, contex=0, bidir=False, rnn_size=128, projection=64, layers=2):
super().__init__()
self.layers = torch.nn.ModuleList()
if input_size == 257:
rnn_size = 1792
elif input_size == 161:
rnn_size = 1024
self.encoder = Encoder()
self.rnn = RNN_Block(input_size=rnn_size, hidden_size=rnn_size)
self.decoder = Decoder()
def forward(self, x: torch.Tensor):
"""model forward
Args:
x (tensor): input tenosr, [N,T,F]
Returns:
[type]: [description]
"""
# N, T, F = x.size()
if len(x.shape)==3:
x = x.unsqueeze(1) # [N,T,F] to [N, 1, T, F]
N, C, T, F = x.size()
o1, o2, o3, o4, o5 = self.encoder(x)
embeded_ch = o5.size(1)
rnn_in = o5.transpose(1, 2)
rnn_in = rnn_in.reshape(N, T, -1)
rnn_out = self.rnn(rnn_in)
rnn_out = rnn_out.unsqueeze(1)
decoder_in = rnn_out.reshape(N, embeded_ch, T, -1)
decoder_out = self.decoder(decoder_in, o5, o4, o3, o2, o1)
return decoder_out.squeeze(1)
if __name__ == "__main__":
N, C, T, F = 10, 1, 100, 257
data = torch.rand((N, T,F))
print(data.shape)
model = crn(input_size=F)
output = model(data)
print(output.shape)
# input_size = 257
# contex = 3
# model = CustomModel(input_size, contex=contex)
# # input_data = torch.rand(100, 20, input_size)
from torchsummary import summary
summary(model, (1, 100, 257))
| [
"torch.zeros",
"torch.rand",
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.BatchNorm2d",
"torch.nn.ConvTranspose2d",
"torch.nn.Conv2d",
"torch.nn.RNN",
"torch.nn.ELU"
] | 1.8.0 | wangwei2009/speechbrain | ebbac4561a9c9101786e0ab0b1105017eb655fc8 |
1.8 | import torch
import torch.nn as nn
import os
import sys
# from show import show_params, show_model
import torch.nn.functional as F
from .conv_stft import ConvSTFT, ConviSTFT
from .complexnn import ComplexConv2d, ComplexConvTranspose2d, NavieComplexLSTM, complex_cat, ComplexBatchNorm
class DCCRN(nn.Module):
def __init__(
self,
rnn_layers=2,
rnn_units=128,
win_len=400,
win_inc=100,
fft_len=512,
win_type='hanning',
masking_mode='E',
use_clstm=False,
use_cbn = False,
kernel_size=5,
kernel_num=[16,32,64,128,256,256]
):
'''
rnn_layers: the number of lstm layers in the crn,
rnn_units: for clstm, rnn_units = real+imag
'''
super(DCCRN, self).__init__()
# for fft
self.win_len = win_len
self.win_inc = win_inc
self.fft_len = fft_len
self.win_type = win_type
input_dim = win_len
output_dim = win_len
self.rnn_units = rnn_units
self.input_dim = input_dim
self.output_dim = output_dim
self.hidden_layers = rnn_layers
self.kernel_size = kernel_size
#self.kernel_num = [2, 8, 16, 32, 128, 128, 128]
#self.kernel_num = [2, 16, 32, 64, 128, 256, 256]
self.kernel_num = [2]+kernel_num
self.masking_mode = masking_mode
self.use_clstm = use_clstm
#bidirectional=True
bidirectional=False
fac = 2 if bidirectional else 1
fix=True
self.fix = fix
self.stft = ConvSTFT(self.win_len, self.win_inc, fft_len, self.win_type, 'complex', fix=fix)
self.istft = ConviSTFT(self.win_len, self.win_inc, fft_len, self.win_type, 'complex', fix=fix)
self.encoder = nn.ModuleList()
self.decoder = nn.ModuleList()
for idx in range(len(self.kernel_num)-1):
self.encoder.append(
nn.Sequential(
#nn.ConstantPad2d([0, 0, 0, 0], 0),
ComplexConv2d(
self.kernel_num[idx],
self.kernel_num[idx+1],
kernel_size=(self.kernel_size, 2),
stride=(2, 1),
padding=(2, 1)
),
nn.BatchNorm2d(self.kernel_num[idx+1]) if not use_cbn else ComplexBatchNorm(self.kernel_num[idx+1]),
nn.PReLU()
)
)
hidden_dim = self.fft_len//(2**(len(self.kernel_num)))
if self.use_clstm:
rnns = []
for idx in range(rnn_layers):
rnns.append(
NavieComplexLSTM(
input_size= hidden_dim*self.kernel_num[-1] if idx == 0 else self.rnn_units,
hidden_size=self.rnn_units,
bidirectional=bidirectional,
batch_first=False,
projection_dim= hidden_dim*self.kernel_num[-1] if idx == rnn_layers-1 else None,
)
)
self.enhance = nn.Sequential(*rnns)
else:
self.enhance = nn.LSTM(
input_size= hidden_dim*self.kernel_num[-1],
hidden_size=self.rnn_units,
num_layers=2,
dropout=0.0,
bidirectional=bidirectional,
batch_first=False
)
self.tranform = nn.Linear(self.rnn_units * fac, hidden_dim*self.kernel_num[-1])
for idx in range(len(self.kernel_num)-1, 0, -1):
if idx != 1:
self.decoder.append(
nn.Sequential(
ComplexConvTranspose2d(
self.kernel_num[idx]*2,
self.kernel_num[idx-1],
kernel_size =(self.kernel_size, 2),
stride=(2, 1),
padding=(2,0),
output_padding=(1,0)
),
nn.BatchNorm2d(self.kernel_num[idx-1]) if not use_cbn else ComplexBatchNorm(self.kernel_num[idx-1]),
#nn.ELU()
nn.PReLU()
)
)
else:
self.decoder.append(
nn.Sequential(
ComplexConvTranspose2d(
self.kernel_num[idx]*2,
self.kernel_num[idx-1],
kernel_size =(self.kernel_size, 2),
stride=(2, 1),
padding=(2,0),
output_padding=(1,0)
),
)
)
# show_model(self)
# show_params(self)
self.flatten_parameters()
def flatten_parameters(self):
if isinstance(self.enhance, nn.LSTM):
self.enhance.flatten_parameters()
def forward(self, inputs, lens=None):
specs = self.stft(inputs)
real = specs[:,:self.fft_len//2+1]
imag = specs[:,self.fft_len//2+1:]
spec_mags = torch.sqrt(real**2+imag**2+1e-8)
spec_mags = spec_mags
spec_phase = torch.atan2(imag, real)
spec_phase = spec_phase
cspecs = torch.stack([real,imag],1)
cspecs = cspecs[:,:,1:]
'''
means = torch.mean(cspecs, [1,2,3], keepdim=True)
std = torch.std(cspecs, [1,2,3], keepdim=True )
normed_cspecs = (cspecs-means)/(std+1e-8)
out = normed_cspecs
'''
out = cspecs
encoder_out = []
for idx, layer in enumerate(self.encoder):
out = layer(out)
# print('encoder', out.size())
encoder_out.append(out)
batch_size, channels, dims, lengths = out.size()
out = out.permute(3, 0, 1, 2)
if self.use_clstm:
r_rnn_in = out[:,:,:channels//2]
i_rnn_in = out[:,:,channels//2:]
r_rnn_in = torch.reshape(r_rnn_in, [lengths, batch_size, channels//2*dims])
i_rnn_in = torch.reshape(i_rnn_in, [lengths, batch_size, channels//2*dims])
r_rnn_in, i_rnn_in = self.enhance([r_rnn_in, i_rnn_in])
r_rnn_in = torch.reshape(r_rnn_in, [lengths, batch_size, channels//2, dims])
i_rnn_in = torch.reshape(i_rnn_in, [lengths, batch_size, channels//2, dims])
out = torch.cat([r_rnn_in, i_rnn_in],2)
else:
# to [L, B, C, D]
out = torch.reshape(out, [lengths, batch_size, channels*dims])
out, _ = self.enhance(out)
out = self.tranform(out)
out = torch.reshape(out, [lengths, batch_size, channels, dims])
out = out.permute(1, 2, 3, 0)
for idx in range(len(self.decoder)):
out = complex_cat([out,encoder_out[-1 - idx]],1)
out = self.decoder[idx](out)
out = out[...,1:]
# print('decoder', out.size())
mask_real = out[:,0]
mask_imag = out[:,1]
mask_real = F.pad(mask_real, [0,0,1,0])
mask_imag = F.pad(mask_imag, [0,0,1,0])
if self.masking_mode == 'E' :
mask_mags = (mask_real**2+mask_imag**2)**0.5
real_phase = mask_real/(mask_mags+1e-8)
imag_phase = mask_imag/(mask_mags+1e-8)
mask_phase = torch.atan2(
imag_phase,
real_phase
)
#mask_mags = torch.clamp_(mask_mags,0,100)
mask_mags = torch.tanh(mask_mags)
est_mags = mask_mags*spec_mags
est_phase = spec_phase + mask_phase
real = est_mags*torch.cos(est_phase)
imag = est_mags*torch.sin(est_phase)
elif self.masking_mode == 'C':
real,imag = real*mask_real-imag*mask_imag, real*mask_imag+imag*mask_real
elif self.masking_mode == 'R':
real, imag = real*mask_real, imag*mask_imag
out_spec = torch.cat([real, imag], 1)
out_wav = self.istft(out_spec)
out_wav = torch.squeeze(out_wav, 1)
#out_wav = torch.tanh(out_wav)
out_wav = torch.clamp_(out_wav,-1,1)
return out_spec, out_wav
def get_params(self, weight_decay=0.0):
# add L2 penalty
weights, biases = [], []
for name, param in self.named_parameters():
if 'bias' in name:
biases += [param]
else:
weights += [param]
params = [{
'params': weights,
'weight_decay': weight_decay,
}, {
'params': biases,
'weight_decay': 0.0,
}]
return params
def loss(self, inputs, labels, loss_mode='SI-SNR'):
if loss_mode == 'MSE':
b, d, t = inputs.shape
labels[:,0,:]=0
labels[:,d//2,:]=0
return F.mse_loss(inputs, labels, reduction='mean')*d
elif loss_mode == 'SI-SNR':
#return -torch.mean(si_snr(inputs, labels))
return -(si_snr(inputs, labels))
elif loss_mode == 'MAE':
gth_spec, gth_phase = self.stft(labels)
b,d,t = inputs.shape
return torch.mean(torch.abs(inputs-gth_spec))*d
def remove_dc(data):
mean = torch.mean(data, -1, keepdim=True)
data = data - mean
return data
def l2_norm(s1, s2):
#norm = torch.sqrt(torch.sum(s1*s2, 1, keepdim=True))
#norm = torch.norm(s1*s2, 1, keepdim=True)
norm = torch.sum(s1*s2, -1, keepdim=True)
return norm
def si_snr(s1, s2, eps=1e-8):
#s1 = remove_dc(s1)
#s2 = remove_dc(s2)
s1_s2_norm = l2_norm(s1, s2)
s2_s2_norm = l2_norm(s2, s2)
s_target = s1_s2_norm/(s2_s2_norm+eps)*s2
e_nosie = s1 - s_target
target_norm = l2_norm(s_target, s_target)
noise_norm = l2_norm(e_nosie, e_nosie)
snr = 10*torch.log10((target_norm)/(noise_norm+eps)+eps)
return torch.mean(snr)
def test_complex():
torch.manual_seed(20)
inputs = torch.randn(10,2,256,10)
conv = ComplexConv2d(2,32,(3,1),(2,1),(1,0))
tconv = ComplexConvTranspose2d(32,2,(3,1),(2,1),(1,0),(1,0))
out = conv(inputs)
print(out.shape)
out = tconv(out)
print(out.shape)
if __name__ == '__main__':
torch.manual_seed(10)
# torch.autograd.set_detect_anomaly(True)
inputs = torch.randn([10,16000*4]).clamp_(-1,1)
labels = torch.randn([10,16000*4]).clamp_(-1,1)
'''
# DCCRN-E
net = DCCRN(rnn_units=256,masking_mode='E')
outputs = net(inputs)[1]
loss = net.loss(outputs, labels, loss_mode='SI-SNR')
print(loss)
# DCCRN-R
net = DCCRN(rnn_units=256,masking_mode='R')
outputs = net(inputs)[1]
loss = net.loss(outputs, labels, loss_mode='SI-SNR')
print(loss)
# DCCRN-C
net = DCCRN(rnn_units=256,masking_mode='C')
outputs = net(inputs)[1]
loss = net.loss(outputs, labels, loss_mode='SI-SNR')
print(loss)
'''
# DCCRN-CL
net = DCCRN(rnn_units=256,masking_mode='E',use_clstm=True,kernel_num=[32, 64, 128, 256, 256,256])
outputs = net(inputs)[1]
print(outputs.shape)
loss = net.loss(outputs, labels, loss_mode='SI-SNR')
print(loss)
| [
"torch.nn.Linear",
"torch.cat",
"torch.stack",
"torch.nn.LSTM",
"torch.nn.ModuleList",
"torch.nn.BatchNorm2d",
"torch.log10",
"torch.squeeze",
"torch.nn.functional.pad",
"torch.reshape",
"torch.sum",
"torch.sqrt",
"torch.manual_seed",
"torch.abs",
"torch.cos",
"torch.nn.Sequential",
"torch.clamp_",
"torch.randn",
"torch.sin",
"torch.nn.functional.mse_loss",
"torch.nn.PReLU",
"torch.tanh",
"torch.atan2",
"torch.mean"
] | 1.8.0 | wangwei2009/speechbrain | ebbac4561a9c9101786e0ab0b1105017eb655fc8 |
1.3 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from torch import Tensor, rand, randint, tensor
from tests.classification.inputs import Input
from tests.classification.inputs import _input_binary as _bin
from tests.classification.inputs import _input_binary_prob as _bin_prob
from tests.classification.inputs import _input_multiclass as _mc
from tests.classification.inputs import _input_multiclass_prob as _mc_prob
from tests.classification.inputs import _input_multidim_multiclass as _mdmc
from tests.classification.inputs import _input_multidim_multiclass_prob as _mdmc_prob
from tests.classification.inputs import _input_multilabel as _ml
from tests.classification.inputs import _input_multilabel_multidim as _mlmd
from tests.classification.inputs import _input_multilabel_multidim_prob as _mlmd_prob
from tests.classification.inputs import _input_multilabel_prob as _ml_prob
from tests.helpers.testers import BATCH_SIZE, EXTRA_DIM, NUM_BATCHES, NUM_CLASSES, THRESHOLD
from torchmetrics.utilities.checks import _input_format_classification
from torchmetrics.utilities.data import select_topk, to_onehot
from torchmetrics.utilities.enums import DataType
torch.manual_seed(42)
# Some additional inputs to test on
_ml_prob_half = Input(_ml_prob.preds.half(), _ml_prob.target)
_mc_prob_2cls_preds = rand(NUM_BATCHES, BATCH_SIZE, 2)
_mc_prob_2cls_preds /= _mc_prob_2cls_preds.sum(dim=2, keepdim=True)
_mc_prob_2cls = Input(_mc_prob_2cls_preds, randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)))
_mdmc_prob_many_dims_preds = rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES, EXTRA_DIM, EXTRA_DIM)
_mdmc_prob_many_dims_preds /= _mdmc_prob_many_dims_preds.sum(dim=2, keepdim=True)
_mdmc_prob_many_dims = Input(
_mdmc_prob_many_dims_preds,
randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM, EXTRA_DIM)),
)
_mdmc_prob_2cls_preds = rand(NUM_BATCHES, BATCH_SIZE, 2, EXTRA_DIM)
_mdmc_prob_2cls_preds /= _mdmc_prob_2cls_preds.sum(dim=2, keepdim=True)
_mdmc_prob_2cls = Input(_mdmc_prob_2cls_preds, randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM)))
# Some utils
T = Tensor
def _idn(x):
return x
def _usq(x):
return x.unsqueeze(-1)
def _thrs(x):
return x >= THRESHOLD
def _rshp1(x):
return x.reshape(x.shape[0], -1)
def _rshp2(x):
return x.reshape(x.shape[0], x.shape[1], -1)
def _onehot(x):
return to_onehot(x, NUM_CLASSES)
def _onehot2(x):
return to_onehot(x, 2)
def _top1(x):
return select_topk(x, 1)
def _top2(x):
return select_topk(x, 2)
# To avoid ugly black line wrapping
def _ml_preds_tr(x):
return _rshp1(_thrs(x))
def _onehot_rshp1(x):
return _onehot(_rshp1(x))
def _onehot2_rshp1(x):
return _onehot2(_rshp1(x))
def _top1_rshp2(x):
return _top1(_rshp2(x))
def _top2_rshp2(x):
return _top2(_rshp2(x))
def _probs_to_mc_preds_tr(x):
return _onehot2(_thrs(x))
def _mlmd_prob_to_mc_preds_tr(x):
return _onehot2(_rshp1(_thrs(x)))
########################
# Test correct inputs
########################
@pytest.mark.parametrize(
"inputs, num_classes, is_multiclass, top_k, exp_mode, post_preds, post_target",
[
#############################
# Test usual expected cases
(_bin, None, False, None, "multi-class", _usq, _usq),
(_bin, 1, False, None, "multi-class", _usq, _usq),
(_bin_prob, None, None, None, "binary", lambda x: _usq(_thrs(x)), _usq),
(_ml_prob, None, None, None, "multi-label", _thrs, _idn),
(_ml, None, False, None, "multi-dim multi-class", _idn, _idn),
(_ml_prob, None, None, None, "multi-label", _ml_preds_tr, _rshp1),
(_ml_prob, None, None, 2, "multi-label", _top2, _rshp1),
(_mlmd, None, False, None, "multi-dim multi-class", _rshp1, _rshp1),
(_mc, NUM_CLASSES, None, None, "multi-class", _onehot, _onehot),
(_mc_prob, None, None, None, "multi-class", _top1, _onehot),
(_mc_prob, None, None, 2, "multi-class", _top2, _onehot),
(_mdmc, NUM_CLASSES, None, None, "multi-dim multi-class", _onehot, _onehot),
(_mdmc_prob, None, None, None, "multi-dim multi-class", _top1_rshp2, _onehot),
(_mdmc_prob, None, None, 2, "multi-dim multi-class", _top2_rshp2, _onehot),
(_mdmc_prob_many_dims, None, None, None, "multi-dim multi-class", _top1_rshp2, _onehot_rshp1),
(_mdmc_prob_many_dims, None, None, 2, "multi-dim multi-class", _top2_rshp2, _onehot_rshp1),
###########################
# Test some special cases
# Make sure that half precision works, i.e. is converted to full precision
(_ml_prob_half, None, None, None, "multi-label", lambda x: _ml_preds_tr(x.float()), _rshp1),
# Binary as multiclass
(_bin, None, None, None, "multi-class", _onehot2, _onehot2),
# Binary probs as multiclass
(_bin_prob, None, True, None, "binary", _probs_to_mc_preds_tr, _onehot2),
# Multilabel as multiclass
(_ml, None, True, None, "multi-dim multi-class", _onehot2, _onehot2),
# Multilabel probs as multiclass
(_ml_prob, None, True, None, "multi-label", _probs_to_mc_preds_tr, _onehot2),
# Multidim multilabel as multiclass
(_mlmd, None, True, None, "multi-dim multi-class", _onehot2_rshp1, _onehot2_rshp1),
# Multidim multilabel probs as multiclass
(_mlmd_prob, None, True, None, "multi-label", _mlmd_prob_to_mc_preds_tr, _onehot2_rshp1),
# Multiclass prob with 2 classes as binary
(_mc_prob_2cls, None, False, None, "multi-class", lambda x: _top1(x)[:, [1]], _usq),
# Multi-dim multi-class with 2 classes as multi-label
(_mdmc_prob_2cls, None, False, None, "multi-dim multi-class", lambda x: _top1(x)[:, 1], _idn),
],
)
def test_usual_cases(inputs, num_classes, is_multiclass, top_k, exp_mode, post_preds, post_target):
def __get_data_type_enum(str_exp_mode):
return next(DataType[n] for n in dir(DataType) if DataType[n] == str_exp_mode)
for exp_mode in (exp_mode, __get_data_type_enum(exp_mode)):
preds_out, target_out, mode = _input_format_classification(
preds=inputs.preds[0],
target=inputs.target[0],
threshold=THRESHOLD,
num_classes=num_classes,
is_multiclass=is_multiclass,
top_k=top_k,
)
assert mode == exp_mode
assert torch.equal(preds_out, post_preds(inputs.preds[0]).int())
assert torch.equal(target_out, post_target(inputs.target[0]).int())
# Test that things work when batch_size = 1
preds_out, target_out, mode = _input_format_classification(
preds=inputs.preds[0][[0], ...],
target=inputs.target[0][[0], ...],
threshold=THRESHOLD,
num_classes=num_classes,
is_multiclass=is_multiclass,
top_k=top_k,
)
assert mode == exp_mode
assert torch.equal(preds_out, post_preds(inputs.preds[0][[0], ...]).int())
assert torch.equal(target_out, post_target(inputs.target[0][[0], ...]).int())
# Test that threshold is correctly applied
def test_threshold():
target = T([1, 1, 1]).int()
preds_probs = T([0.5 - 1e-5, 0.5, 0.5 + 1e-5])
preds_probs_out, _, _ = _input_format_classification(preds_probs, target, threshold=0.5)
assert torch.equal(tensor([0, 1, 1], dtype=torch.int), preds_probs_out.squeeze().int())
########################################################################
# Test incorrect inputs
########################################################################
@pytest.mark.parametrize("threshold", [-0.5, 0.0, 1.0, 1.5])
def test_incorrect_threshold(threshold):
preds, target = rand(size=(7, )), randint(high=2, size=(7, ))
with pytest.raises(ValueError):
_input_format_classification(preds, target, threshold=threshold)
@pytest.mark.parametrize(
"preds, target, num_classes, is_multiclass",
[
# Target not integer
(randint(high=2, size=(7, )), randint(high=2, size=(7, )).float(), None, None),
# Target negative
(randint(high=2, size=(7, )), -randint(high=2, size=(7, )), None, None),
# Preds negative integers
(-randint(high=2, size=(7, )), randint(high=2, size=(7, )), None, None),
# Negative probabilities
(-rand(size=(7, )), randint(high=2, size=(7, )), None, None),
# is_multiclass=False and target > 1
(rand(size=(7, )), randint(low=2, high=4, size=(7, )), None, False),
# is_multiclass=False and preds integers with > 1
(randint(low=2, high=4, size=(7, )), randint(high=2, size=(7, )), None, False),
# Wrong batch size
(randint(high=2, size=(8, )), randint(high=2, size=(7, )), None, None),
# Completely wrong shape
(randint(high=2, size=(7, )), randint(high=2, size=(7, 4)), None, None),
# Same #dims, different shape
(randint(high=2, size=(7, 3)), randint(high=2, size=(7, 4)), None, None),
# Same shape and preds floats, target not binary
(rand(size=(7, 3)), randint(low=2, high=4, size=(7, 3)), None, None),
# #dims in preds = 1 + #dims in target, C shape not second or last
(rand(size=(7, 3, 4, 3)), randint(high=4, size=(7, 3, 3)), None, None),
# #dims in preds = 1 + #dims in target, preds not float
(randint(high=2, size=(7, 3, 3, 4)), randint(high=4, size=(7, 3, 3)), None, None),
# is_multiclass=False, with C dimension > 2
(_mc_prob.preds[0], randint(high=2, size=(BATCH_SIZE, )), None, False),
# Probs of multiclass preds do not sum up to 1
(rand(size=(7, 3, 5)), randint(high=2, size=(7, 5)), None, None),
# Max target larger or equal to C dimension
(_mc_prob.preds[0], randint(low=NUM_CLASSES + 1, high=100, size=(BATCH_SIZE, )), None, None),
# C dimension not equal to num_classes
(_mc_prob.preds[0], _mc_prob.target[0], NUM_CLASSES + 1, None),
# Max target larger than num_classes (with #dim preds = 1 + #dims target)
(_mc_prob.preds[0], randint(low=NUM_CLASSES + 1, high=100, size=(BATCH_SIZE, NUM_CLASSES)), 4, None),
# Max target larger than num_classes (with #dim preds = #dims target)
(randint(high=4, size=(7, 3)), randint(low=5, high=7, size=(7, 3)), 4, None),
# Max preds larger than num_classes (with #dim preds = #dims target)
(randint(low=5, high=7, size=(7, 3)), randint(high=4, size=(7, 3)), 4, None),
# Num_classes=1, but is_multiclass not false
(randint(high=2, size=(7, )), randint(high=2, size=(7, )), 1, None),
# is_multiclass=False, but implied class dimension (for multi-label, from shape) != num_classes
(randint(high=2, size=(7, 3, 3)), randint(high=2, size=(7, 3, 3)), 4, False),
# Multilabel input with implied class dimension != num_classes
(rand(size=(7, 3, 3)), randint(high=2, size=(7, 3, 3)), 4, False),
# Multilabel input with is_multiclass=True, but num_classes != 2 (or None)
(rand(size=(7, 3)), randint(high=2, size=(7, 3)), 4, True),
# Binary input, num_classes > 2
(rand(size=(7, )), randint(high=2, size=(7, )), 4, None),
# Binary input, num_classes == 2 and is_multiclass not True
(rand(size=(7, )), randint(high=2, size=(7, )), 2, None),
(rand(size=(7, )), randint(high=2, size=(7, )), 2, False),
# Binary input, num_classes == 1 and is_multiclass=True
(rand(size=(7, )), randint(high=2, size=(7, )), 1, True),
],
)
def test_incorrect_inputs(preds, target, num_classes, is_multiclass):
with pytest.raises(ValueError):
_input_format_classification(
preds=preds, target=target, threshold=THRESHOLD, num_classes=num_classes, is_multiclass=is_multiclass
)
@pytest.mark.parametrize(
"preds, target, num_classes, is_multiclass, top_k",
[
# Topk set with non (md)mc or ml prob data
(_bin.preds[0], _bin.target[0], None, None, 2),
(_bin_prob.preds[0], _bin_prob.target[0], None, None, 2),
(_mc.preds[0], _mc.target[0], None, None, 2),
(_ml.preds[0], _ml.target[0], None, None, 2),
(_mlmd.preds[0], _mlmd.target[0], None, None, 2),
(_mdmc.preds[0], _mdmc.target[0], None, None, 2),
# top_k = 0
(_mc_prob_2cls.preds[0], _mc_prob_2cls.target[0], None, None, 0),
# top_k = float
(_mc_prob_2cls.preds[0], _mc_prob_2cls.target[0], None, None, 0.123),
# top_k =2 with 2 classes, is_multiclass=False
(_mc_prob_2cls.preds[0], _mc_prob_2cls.target[0], None, False, 2),
# top_k = number of classes (C dimension)
(_mc_prob.preds[0], _mc_prob.target[0], None, None, NUM_CLASSES),
# is_multiclass = True for ml prob inputs, top_k set
(_ml_prob.preds[0], _ml_prob.target[0], None, True, 2),
# top_k = num_classes for ml prob inputs
(_ml_prob.preds[0], _ml_prob.target[0], None, True, NUM_CLASSES),
],
)
def test_incorrect_inputs_topk(preds, target, num_classes, is_multiclass, top_k):
with pytest.raises(ValueError):
_input_format_classification(
preds=preds,
target=target,
threshold=THRESHOLD,
num_classes=num_classes,
is_multiclass=is_multiclass,
top_k=top_k,
)
| [
"torch.manual_seed",
"torch.rand",
"torch.randint",
"torch.tensor"
] | 1.3.1 | alanhdu/metrics | b168272eaf1ff08b9447e75338753f9c2abf0859 |
1.3 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, Optional
import torch
from torch import Tensor
from torchmetrics.functional.classification.auroc import _auroc_compute, _auroc_update
from torchmetrics.metric import Metric
from torchmetrics.utilities import rank_zero_warn
from torchmetrics.utilities.imports import _TORCH_LOWER_1_6
class AUROC(Metric):
r"""Compute `Area Under the Receiver Operating Characteristic Curve (ROC AUC)
<https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Further_interpretations>`_.
Works for both binary, multilabel and multiclass problems. In the case of
multiclass, the values will be calculated based on a one-vs-the-rest approach.
Forward accepts
- ``preds`` (float tensor): ``(N, ...)`` (binary) or ``(N, C, ...)`` (multiclass) tensor
with probabilities, where C is the number of classes.
- ``target`` (long tensor): ``(N, ...)`` or ``(N, C, ...)`` with integer labels
For non-binary input, if the ``preds`` and ``target`` tensor have the same
size the input will be interpretated as multilabel and if ``preds`` have one
dimension more than the ``target`` tensor the input will be interpretated as
multiclass.
Args:
num_classes: integer with number of classes. Not nessesary to provide
for binary problems.
pos_label: integer determining the positive class. Default is ``None``
which for binary problem is translate to 1. For multiclass problems
this argument should not be set as we iteratively change it in the
range [0,num_classes-1]
average:
- ``'micro'`` computes metric globally. Only works for multilabel problems
- ``'macro'`` computes metric for each class and uniformly averages them
- ``'weighted'`` computes metric for each class and does a weighted-average,
where each class is weighted by their support (accounts for class imbalance)
- ``None`` computes and returns the metric per class
max_fpr:
If not ``None``, calculates standardized partial AUC over the
range [0, max_fpr]. Should be a float between 0 and 1.
compute_on_step:
Forward only calls ``update()`` and return None if this is set to False. default: True
dist_sync_on_step:
Synchronize metric state across processes at each ``forward()``
before returning the value at the step.
process_group:
Specify the process group on which synchronization is called. default: None (which selects the entire world)
dist_sync_fn:
Callback that performs the allgather operation on the metric state. When ``None``, DDP
will be used to perform the allgather
Raises:
ValueError:
If ``average`` is none of ``None``, ``"macro"`` or ``"weighted"``.
ValueError:
If ``max_fpr`` is not a ``float`` in the range ``(0, 1]``.
RuntimeError:
If ``PyTorch version`` is ``below 1.6`` since max_fpr requires ``torch.bucketize``
which is not available below 1.6.
ValueError:
If the mode of data (binary, multi-label, multi-class) changes between batches.
Example:
>>> # binary case
>>> from torchmetrics import AUROC
>>> preds = torch.tensor([0.13, 0.26, 0.08, 0.19, 0.34])
>>> target = torch.tensor([0, 0, 1, 1, 1])
>>> auroc = AUROC(pos_label=1)
>>> auroc(preds, target)
tensor(0.5000)
>>> # multiclass case
>>> preds = torch.tensor([[0.90, 0.05, 0.05],
... [0.05, 0.90, 0.05],
... [0.05, 0.05, 0.90],
... [0.85, 0.05, 0.10],
... [0.10, 0.10, 0.80]])
>>> target = torch.tensor([0, 1, 1, 2, 2])
>>> auroc = AUROC(num_classes=3)
>>> auroc(preds, target)
tensor(0.7778)
"""
def __init__(
self,
num_classes: Optional[int] = None,
pos_label: Optional[int] = None,
average: Optional[str] = 'macro',
max_fpr: Optional[float] = None,
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
dist_sync_fn: Callable = None,
):
super().__init__(
compute_on_step=compute_on_step,
dist_sync_on_step=dist_sync_on_step,
process_group=process_group,
dist_sync_fn=dist_sync_fn,
)
self.num_classes = num_classes
self.pos_label = pos_label
self.average = average
self.max_fpr = max_fpr
allowed_average = (None, 'macro', 'weighted', 'micro')
if self.average not in allowed_average:
raise ValueError(
f'Argument `average` expected to be one of the following: {allowed_average} but got {average}'
)
if self.max_fpr is not None:
if not isinstance(max_fpr, float) or not 0 < max_fpr <= 1:
raise ValueError(f"`max_fpr` should be a float in range (0, 1], got: {max_fpr}")
if _TORCH_LOWER_1_6:
raise RuntimeError(
'`max_fpr` argument requires `torch.bucketize` which is not available below PyTorch version 1.6'
)
self.mode = None
self.add_state("preds", default=[], dist_reduce_fx=None)
self.add_state("target", default=[], dist_reduce_fx=None)
rank_zero_warn(
'Metric `AUROC` will save all targets and predictions in buffer.'
' For large datasets this may lead to large memory footprint.'
)
def update(self, preds: Tensor, target: Tensor):
"""
Update state with predictions and targets.
Args:
preds: Predictions from model (probabilities, or labels)
target: Ground truth labels
"""
preds, target, mode = _auroc_update(preds, target)
self.preds.append(preds)
self.target.append(target)
if self.mode is not None and self.mode != mode:
raise ValueError(
'The mode of data (binary, multi-label, multi-class) should be constant, but changed'
f' between batches from {self.mode} to {mode}'
)
self.mode = mode
def compute(self) -> Tensor:
"""
Computes AUROC based on inputs passed in to ``update`` previously.
"""
preds = torch.cat(self.preds, dim=0)
target = torch.cat(self.target, dim=0)
return _auroc_compute(
preds,
target,
self.mode,
self.num_classes,
self.pos_label,
self.average,
self.max_fpr,
)
| [
"torch.cat"
] | 1.3.1 | alanhdu/metrics | b168272eaf1ff08b9447e75338753f9c2abf0859 |
1.0 | import dataclasses
import json
import os
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple
from .file_utils import cached_property, is_torch_available, is_torch_tpu_available, torch_required
from .trainer_utils import EvaluationStrategy
from .utils import logging
if is_torch_available():
import torch
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
logger = logging.get_logger(__name__)
def default_logdir() -> str:
"""
Same default as PyTorch
"""
import socket
from datetime import datetime
current_time = datetime.now().strftime("%b%d_%H-%M-%S")
return os.path.join("runs", current_time + "_" + socket.gethostname())
@dataclass
class TrainingArguments:
"""
TrainingArguments is the subset of the arguments we use in our example scripts
**which relate to the training loop itself**.
Using :class:`~transformers.HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on the command line.
Parameters:
output_dir (:obj:`str`):
The output directory where the model predictions and checkpoints will be written.
overwrite_output_dir (:obj:`bool`, `optional`, defaults to :obj:`False`):
If :obj:`True`, overwrite the content of the output directory. Use this to continue training if
:obj:`output_dir` points to a checkpoint directory.
do_train (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to run training or not.
do_eval (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to run evaluation on the dev set or not.
do_predict (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to run predictions on the test set or not.
evaluation_strategy(:obj:`str` or :class:`~transformers.trainer_utils.EvaluationStrategy`, `optional`, defaults to :obj:`"no"`):
The evaluation strategy to adopt during training. Possible values are:
* :obj:`"no"`: No evaluation is done during training.
* :obj:`"steps"`: Evaluation is done (and logged) every :obj:`eval_steps`.
* :obj:`"epoch"`: Evaluation is done at the end of each epoch.
prediction_loss_only (:obj:`bool`, `optional`, defaults to `False`):
When performing evaluation and predictions, only returns the loss.
per_device_train_batch_size (:obj:`int`, `optional`, defaults to 8):
The batch size per GPU/TPU core/CPU for training.
per_device_eval_batch_size (:obj:`int`, `optional`, defaults to 8):
The batch size per GPU/TPU core/CPU for evaluation.
gradient_accumulation_steps: (:obj:`int`, `optional`, defaults to 1):
Number of updates steps to accumulate the gradients for, before performing a backward/update pass.
.. warning::
When using gradient accumulation, one step is counted as one step with backward pass. Therefore,
logging, evaluation, save will be conducted every ``gradient_accumulation_steps * xxx_step`` training
examples.
learning_rate (:obj:`float`, `optional`, defaults to 5e-5):
The initial learning rate for Adam.
weight_decay (:obj:`float`, `optional`, defaults to 0):
The weight decay to apply (if not zero).
adam_epsilon (:obj:`float`, `optional`, defaults to 1e-8):
Epsilon for the Adam optimizer.
max_grad_norm (:obj:`float`, `optional`, defaults to 1.0):
Maximum gradient norm (for gradient clipping).
num_train_epochs(:obj:`float`, `optional`, defaults to 3.0):
Total number of training epochs to perform (if not an integer, will perform the decimal part percents of
the last epoch before stopping training).
max_steps (:obj:`int`, `optional`, defaults to -1):
If set to a positive number, the total number of training steps to perform. Overrides
:obj:`num_train_epochs`.
warmup_steps (:obj:`int`, `optional`, defaults to 0):
Number of steps used for a linear warmup from 0 to :obj:`learning_rate`.
logging_dir (:obj:`str`, `optional`):
Tensorboard log directory. Will default to `runs/**CURRENT_DATETIME_HOSTNAME**`.
logging_first_step (:obj:`bool`, `optional`, defaults to :obj:`False`):
Wheter to log and evalulate the first :obj:`global_step` or not.
logging_steps (:obj:`int`, `optional`, defaults to 500):
Number of update steps between two logs.
save_steps (:obj:`int`, `optional`, defaults to 500):
Number of updates steps before two checkpoint saves.
save_total_limit (:obj:`int`, `optional`):
If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in
:obj:`output_dir`.
no_cuda (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to not use CUDA even when it is available or not.
seed (:obj:`int`, `optional`, defaults to 42):
Random seed for initialization.
fp16 (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to use 16-bit (mixed) precision training (through NVIDIA apex) instead of 32-bit training.
fp16_opt_level (:obj:`str`, `optional`, defaults to 'O1'):
For :obj:`fp16` training, apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. See details
on the `apex documentation <https://nvidia.github.io/apex/amp.html>`__.
local_rank (:obj:`int`, `optional`, defaults to -1):
During distributed training, the rank of the process.
tpu_num_cores (:obj:`int`, `optional`):
When training on TPU, the mumber of TPU cores (automatically passed by launcher script).
debug (:obj:`bool`, `optional`, defaults to :obj:`False`):
When training on TPU, whether to print debug metrics or not.
dataloader_drop_last (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size)
or not.
eval_steps (:obj:`int`, `optional`):
Number of update steps between two evaluations if :obj:`evaluation_strategy="steps"`. Will default to the
same value as :obj:`logging_steps` if not set.
dataloader_num_workers (:obj:`int`, `optional`, defaults to 0):
Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the main process.
past_index (:obj:`int`, `optional`, defaults to -1):
Some models like :doc:`TransformerXL <../model_doc/transformerxl>` or :doc`XLNet <../model_doc/xlnet>` can
make use of the past hidden states for their predictions. If this argument is set to a positive int, the
``Trainer`` will use the corresponding output (usually index 2) as the past state and feed it to the model
at the next training step under the keyword argument ``mems``.
run_name (:obj:`str`, `optional`):
A descriptor for the run. Notably used for wandb logging.
disable_tqdm (:obj:`bool`, `optional`):
Whether or not to disable the tqdm progress bars. Will default to :obj:`True` if the logging level is set
to warn or lower (default), :obj:`False` otherwise.
remove_unused_columns (:obj:`bool`, `optional`, defaults to :obj:`True`):
If using `nlp.Dataset` datasets, whether or not to automatically remove the columns unused by the model
forward method.
(Note: this behavior is not implemented for :class:`~transformers.TFTrainer` yet.)
label_names (:obj:`List[str]`, `optional`):
The list of keys in your dictionary of inputs that correspond to the labels.
Will eventually default to :obj:`["labels"]` except if the model used is one of the
:obj:`XxxForQuestionAnswering` in which case it will default to
:obj:`["start_positions", "end_positions"]`.
load_best_model_at_end (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to load the best model found during training at the end of training.
.. note::
When set to :obj:`True`, the parameters :obj:`save_steps` will be ignored and the model will be saved
after each evaluation.
metric_for_best_model (:obj:`str`, `optional`)
Use in conjunction with :obj:`load_best_model_at_end` to specify the metric to use to compare two different
models. Must be the name of a metric returned by the evaluation with or without the prefix :obj:`"eval_"`.
Will default to :obj:`"loss"` if unspecified and :obj:`load_best_model_at_end=True` (to use the evaluation
loss).
If you set this value, :obj:`greater_is_better` will defaut to :obj:`True`. Don't forget to set it to
:obj:`False` if your metric is better when lower.
greater_is_better (:obj:`bool`, `optional`)
Use in conjunction with :obj:`load_best_model_at_end` and :obj:`metric_for_best_model` to specify if better
models should have a greater metric or not. Will default to:
- :obj:`True` if :obj:`metric_for_best_model` is set to a value that isn't :obj:`"loss"` or
:obj:`"eval_loss"`.
- :obj:`False` if :obj:`metric_for_best_model` is not set, or set to :obj:`"loss"` or :obj:`"eval_loss"`.
"""
output_dir: str = field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."}
)
overwrite_output_dir: bool = field(
default=False,
metadata={
"help": (
"Overwrite the content of the output directory."
"Use this to continue training if output_dir points to a checkpoint directory."
)
},
)
do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
do_eval: bool = field(default=False, metadata={"help": "Whether to run eval on the dev set."})
do_predict: bool = field(default=False, metadata={"help": "Whether to run predictions on the test set."})
evaluate_during_training: bool = field(
default=False,
metadata={"help": "Run evaluation during training at each logging step."},
)
evaluation_strategy: EvaluationStrategy = field(
default="no",
metadata={"help": "Run evaluation during training at each logging step."},
)
prediction_loss_only: bool = field(
default=False,
metadata={"help": "When performing evaluation and predictions, only returns the loss."},
)
per_device_train_batch_size: int = field(
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for training."}
)
per_device_eval_batch_size: int = field(
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for evaluation."}
)
per_gpu_train_batch_size: Optional[int] = field(
default=None,
metadata={
"help": "Deprecated, the use of `--per_device_train_batch_size` is preferred. "
"Batch size per GPU/TPU core/CPU for training."
},
)
per_gpu_eval_batch_size: Optional[int] = field(
default=None,
metadata={
"help": "Deprecated, the use of `--per_device_eval_batch_size` is preferred."
"Batch size per GPU/TPU core/CPU for evaluation."
},
)
gradient_accumulation_steps: int = field(
default=1,
metadata={"help": "Number of updates steps to accumulate before performing a backward/update pass."},
)
learning_rate: float = field(default=5e-5, metadata={"help": "The initial learning rate for Adam."})
weight_decay: float = field(default=0.0, metadata={"help": "Weight decay if we apply some."})
adam_beta1: float = field(default=0.9, metadata={"help": "Beta1 for Adam optimizer"})
adam_beta2: float = field(default=0.999, metadata={"help": "Beta2 for Adam optimizer"})
adam_epsilon: float = field(default=1e-8, metadata={"help": "Epsilon for Adam optimizer."})
max_grad_norm: float = field(default=1.0, metadata={"help": "Max gradient norm."})
num_train_epochs: float = field(default=3.0, metadata={"help": "Total number of training epochs to perform."})
max_steps: int = field(
default=-1,
metadata={"help": "If > 0: set total number of training steps to perform. Override num_train_epochs."},
)
warmup_steps: int = field(default=0, metadata={"help": "Linear warmup over warmup_steps."})
logging_dir: Optional[str] = field(default_factory=default_logdir, metadata={"help": "Tensorboard log dir."})
logging_first_step: bool = field(default=False, metadata={"help": "Log and eval the first global_step"})
logging_steps: int = field(default=500, metadata={"help": "Log every X updates steps."})
save_steps: int = field(default=500, metadata={"help": "Save checkpoint every X updates steps."})
save_total_limit: Optional[int] = field(
default=None,
metadata={
"help": (
"Limit the total amount of checkpoints."
"Deletes the older checkpoints in the output_dir. Default is unlimited checkpoints"
)
},
)
no_cuda: bool = field(default=False, metadata={"help": "Do not use CUDA even when it is available"})
seed: int = field(default=42, metadata={"help": "random seed for initialization"})
fp16: bool = field(
default=False,
metadata={"help": "Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit"},
)
fp16_opt_level: str = field(
default="O1",
metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
)
},
)
local_rank: int = field(default=-1, metadata={"help": "For distributed training: local_rank"})
tpu_num_cores: Optional[int] = field(
default=None, metadata={"help": "TPU: Number of TPU cores (automatically passed by launcher script)"}
)
tpu_metrics_debug: bool = field(
default=False,
metadata={"help": "Deprecated, the use of `--debug` is preferred. TPU: Whether to print debug metrics"},
)
debug: bool = field(default=False, metadata={"help": "Whether to print debug metrics on TPU"})
dataloader_drop_last: bool = field(
default=False, metadata={"help": "Drop the last incomplete batch if it is not divisible by the batch size."}
)
eval_steps: int = field(default=None, metadata={"help": "Run an evaluation every X steps."})
dataloader_num_workers: int = field(
default=0,
metadata={
"help": "Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the main process."
},
)
past_index: int = field(
default=-1,
metadata={"help": "If >=0, uses the corresponding part of the output as the past state for next step."},
)
run_name: Optional[str] = field(
default=None, metadata={"help": "An optional descriptor for the run. Notably used for wandb logging."}
)
disable_tqdm: Optional[bool] = field(
default=None, metadata={"help": "Whether or not to disable the tqdm progress bars."}
)
remove_unused_columns: Optional[bool] = field(
default=True, metadata={"help": "Remove columns not required by the model when using an nlp.Dataset."}
)
label_names: Optional[List[str]] = field(
default=None, metadata={"help": "The list of keys in your dictionary of inputs that correspond to the labels."}
)
load_best_model_at_end: Optional[bool] = field(
default=False,
metadata={"help": "Whether or not to load the best model found during training at the end of training."},
)
metric_for_best_model: Optional[str] = field(
default=None, metadata={"help": "The metric to use to compare two different models."}
)
greater_is_better: Optional[bool] = field(
default=None, metadata={"help": "Whether the `metric_for_best_model` should be maximized or not."}
)
def __post_init__(self):
if self.disable_tqdm is None:
self.disable_tqdm = logger.getEffectiveLevel() > logging.WARN
if self.evaluate_during_training is not None:
self.evaluation_strategy = (
EvaluationStrategy.STEPS if self.evaluate_during_training else EvaluationStrategy.NO
)
warnings.warn(
"The `evaluate_during_training` argument is deprecated in favor of `evaluation_strategy` (which has more options)",
FutureWarning,
)
else:
self.evaluation_strategy = EvaluationStrategy(self.evaluation_strategy)
if self.eval_steps is None:
self.eval_steps = self.logging_steps
if self.load_best_model_at_end and self.metric_for_best_model is None:
self.metric_for_best_model = "loss"
if self.greater_is_better is None and self.metric_for_best_model is not None:
self.greater_is_better = self.metric_for_best_model not in ["loss", "eval_loss"]
@property
def train_batch_size(self) -> int:
"""
The actual batch size for training (may differ from :obj:`per_gpu_train_batch_size` in distributed training).
"""
if self.per_gpu_train_batch_size:
logger.warning(
"Using deprecated `--per_gpu_train_batch_size` argument which will be removed in a future "
"version. Using `--per_device_train_batch_size` is preferred."
)
per_device_batch_size = self.per_gpu_train_batch_size or self.per_device_train_batch_size
return per_device_batch_size * max(1, self.n_gpu)
@property
def eval_batch_size(self) -> int:
"""
The actual batch size for evaluation (may differ from :obj:`per_gpu_eval_batch_size` in distributed training).
"""
if self.per_gpu_eval_batch_size:
logger.warning(
"Using deprecated `--per_gpu_eval_batch_size` argument which will be removed in a future "
"version. Using `--per_device_eval_batch_size` is preferred."
)
per_device_batch_size = self.per_gpu_eval_batch_size or self.per_device_eval_batch_size
return per_device_batch_size * max(1, self.n_gpu)
@cached_property
@torch_required
def _setup_devices(self) -> Tuple["torch.device", int]:
logger.info("PyTorch: setting up devices")
if self.no_cuda:
device = torch.device("cpu")
n_gpu = 0
elif is_torch_tpu_available():
device = xm.xla_device()
n_gpu = 0
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend="nccl")
device = torch.device("cuda", self.local_rank)
n_gpu = 1
if device.type == "cuda":
torch.cuda.set_device(device)
return device, n_gpu
@property
@torch_required
def device(self) -> "torch.device":
"""
The device used by this process.
"""
return self._setup_devices[0]
@property
@torch_required
def n_gpu(self):
"""
The number of GPUs used by this process.
Note:
This will only be greater than one when you have multiple GPUs available but are not using distributed
training. For distributed training, it will always be 1.
"""
return self._setup_devices[1]
def to_dict(self):
"""
Serializes this instance while replace `Enum` by their values (for JSON serialization support).
"""
d = dataclasses.asdict(self)
for k, v in d.items():
if isinstance(v, Enum):
d[k] = v.value
return d
def to_json_string(self):
"""
Serializes this instance to a JSON string.
"""
return json.dumps(self.to_dict(), indent=2)
def to_sanitized_dict(self) -> Dict[str, Any]:
"""
Sanitized serialization to use with TensorBoard’s hparams
"""
d = self.to_dict()
d = {**d, **{"train_batch_size": self.train_batch_size, "eval_batch_size": self.eval_batch_size}}
valid_types = [bool, int, float, str]
if is_torch_available():
valid_types.append(torch.Tensor)
return {k: v if type(v) in valid_types else str(v) for k, v in d.items()}
| [
"torch.device",
"torch.distributed.init_process_group",
"torch.cuda.device_count",
"torch.cuda.set_device",
"torch.cuda.is_available"
] | 1.0 | xiye17/transformers | 924989e70d9425e3276ca76f148a0fcd4bbd58cf |
1.10 | import argparse
import random
from copy import deepcopy
import torch
import torch.backends
from torch import optim
from torch.hub import load_state_dict_from_url
from torch.nn import CrossEntropyLoss
from torchvision import datasets
from torchvision.models import vgg16
from torchvision.transforms import transforms
from tqdm import tqdm
from baal.active import get_heuristic, ActiveLearningDataset
from baal.active.active_loop import ActiveLearningLoop
from baal.bayesian.dropout import patch_module
from baal import ModelWrapper
"""
Minimal example to use BaaL.
"""
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--epoch", default=100, type=int)
parser.add_argument("--batch_size", default=32, type=int)
parser.add_argument("--initial_pool", default=1000, type=int)
parser.add_argument("--query_size", default=100, type=int)
parser.add_argument("--lr", default=0.001)
parser.add_argument("--heuristic", default="bald", type=str)
parser.add_argument("--iterations", default=20, type=int)
parser.add_argument("--shuffle_prop", default=0.05, type=float)
parser.add_argument("--learning_epoch", default=20, type=int)
return parser.parse_args()
def get_datasets(initial_pool):
transform = transforms.Compose(
[
transforms.Resize((224, 224)),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(30),
transforms.ToTensor(),
transforms.Normalize(3 * [0.5], 3 * [0.5]),
]
)
test_transform = transforms.Compose(
[
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(3 * [0.5], 3 * [0.5]),
]
)
# Note: We use the test set here as an example. You should make your own validation set.
train_ds = datasets.CIFAR10(
".", train=True, transform=transform, target_transform=None, download=True
)
test_set = datasets.CIFAR10(
".", train=False, transform=test_transform, target_transform=None, download=True
)
active_set = ActiveLearningDataset(train_ds, pool_specifics={"transform": test_transform})
# We start labeling randomly.
active_set.label_randomly(initial_pool)
return active_set, test_set
def main():
args = parse_args()
use_cuda = torch.cuda.is_available()
torch.backends.cudnn.benchmark = True
random.seed(1337)
torch.manual_seed(1337)
if not use_cuda:
print("warning, the experiments would take ages to run on cpu")
hyperparams = vars(args)
active_set, test_set = get_datasets(hyperparams["initial_pool"])
heuristic = get_heuristic(hyperparams["heuristic"], hyperparams["shuffle_prop"])
criterion = CrossEntropyLoss()
model = vgg16(pretrained=False, num_classes=10)
weights = load_state_dict_from_url("https://download.pytorch.org/models/vgg16-397923af.pth")
weights = {k: v for k, v in weights.items() if "classifier.6" not in k}
model.load_state_dict(weights, strict=False)
# change dropout layer to MCDropout
model = patch_module(model)
if use_cuda:
model.cuda()
optimizer = optim.SGD(model.parameters(), lr=hyperparams["lr"], momentum=0.9)
# Wraps the model into a usable API.
model = ModelWrapper(model, criterion)
logs = {}
logs["epoch"] = 0
# for prediction we use a smaller batchsize
# since it is slower
active_loop = ActiveLearningLoop(
active_set,
model.predict_on_dataset,
heuristic,
hyperparams.get("query_size", 1),
batch_size=10,
iterations=hyperparams["iterations"],
use_cuda=use_cuda,
)
# We will reset the weights at each active learning step.
init_weights = deepcopy(model.state_dict())
for epoch in tqdm(range(args.epoch)):
# Load the initial weights.
model.load_state_dict(init_weights)
model.train_on_dataset(
active_set,
optimizer,
hyperparams["batch_size"],
hyperparams["learning_epoch"],
use_cuda,
)
# Validation!
model.test_on_dataset(test_set, hyperparams["batch_size"], use_cuda)
metrics = model.metrics
should_continue = active_loop.step()
if not should_continue:
break
val_loss = metrics["test_loss"].value
logs = {
"val": val_loss,
"epoch": epoch,
"train": metrics["train_loss"].value,
"labeled_data": active_set.labelled,
"Next Training set size": len(active_set),
}
print(logs)
if __name__ == "__main__":
main()
| [
"torch.manual_seed",
"torch.cuda.is_available",
"torch.nn.CrossEntropyLoss",
"torch.hub.load_state_dict_from_url"
] | 1.10.0 | llv22/baal_tf2.4_mac | 6eed225f8b57e61d8d16b1868ea655384c566700 |
1.9 | import pytest
import torch
import torch.nn.functional as F
from lean_transformer.utils import pad_to_multiple, GELU
import numpy as np
@pytest.mark.forked
def test_pad_to_multiple():
x = torch.randn(3, 3)
assert pad_to_multiple(x, multiple=3, dims=0) is x
assert pad_to_multiple(x, multiple=3, dims=1) is x
assert pad_to_multiple(x, multiple=2, dims=1) is not x
assert pad_to_multiple(x, multiple=4, dims=1) is not x
assert torch.allclose(pad_to_multiple(x, multiple=2, dims=1), pad_to_multiple(x, multiple=4, dims=1))
assert pad_to_multiple(x, multiple=2, dims=0).shape == (4, 3)
assert pad_to_multiple(x, multiple=4, dims=1).shape == (3, 4)
assert pad_to_multiple(x, multiple=2, dims=[0, 1]).shape == (4, 4)
assert torch.allclose(pad_to_multiple(x, multiple=4, dims=1).sum(), x.sum())
assert pad_to_multiple(x, multiple=10, dims=0)[3:].norm() == 0
assert pad_to_multiple(x, multiple=4, dims=[0, 1]).shape == (4, 4)
assert pad_to_multiple(x, multiple=3, dims=[0, 1]) is x
@pytest.mark.forked
def test_gelu():
gelu_ours = GELU.apply(torch.linspace(-5, 5, 1000))
gelu_ref = F.gelu(torch.linspace(-5, 5, 1000))
assert abs(gelu_ours - gelu_ref).max().item() <= 5e-4 | [
"torch.linspace",
"torch.randn"
] | 1.9.0 | krunt/lean_transformer | 90abdb87bb08566eaba0a45bc29ec6a3220333ac |
1.3 | import torch.nn as nn
from utils.builder import get_builder
from args import args
from collections import OrderedDict
# Binary activation function with gradient estimator
import torch
class F_BinAct(torch.autograd.Function):
@staticmethod
def forward(ctx, inp):
# Save input for backward
ctx.save_for_backward(inp)
# Unscaled sign function
return torch.sign(inp)
@staticmethod
def backward(ctx, grad_out):
# Get input from saved ctx
inp, = ctx.saved_tensors
# Clone grad_out
grad_input = grad_out.clone()
# Gradient approximation from quadratic spline
inp = torch.clamp(inp, min=-1.0, max=1.0)
inp = 2*(1 - torch.abs(inp))
# Return gradient
return grad_input * inp
class BiRealAct(nn.Module):
def __init__(self):
super(BiRealAct, self).__init__()
def forward(self, input):
return F_BinAct.apply(input)
# BasicBlock {{{
class BasicBlock(nn.Module):
M = 2
expansion = 1
def __init__(self, builder, inplanes, planes, stride=1, downsample=None, base_width=64):
super(BasicBlock, self).__init__()
if base_width / 64 > 1:
raise ValueError("Base width >64 does not work for BasicBlock")
self.conv1 = builder.conv3x3(inplanes, planes, stride)
self.bn1 = builder.batchnorm(planes)
self.relu = (lambda: BiRealAct())()
self.conv2 = builder.conv3x3(planes, planes)
self.bn2 = builder.batchnorm(planes, last_bn=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
if self.bn1 is not None:
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
if self.bn2 is not None:
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
# BasicBlock }}}
class ResNeXtBottleneck(nn.Module):
"""
RexNeXt bottleneck type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)
"""
def __init__(self, builder, inplanes, planes, stride, groups, base_width, widen_factor, downsample=None):
""" Constructor
Args:
inplanes: input channel dimensionality
planes: output channel dimensionality
stride: conv stride. Replaces pooling layer.
groups: num of convolution groups.
base_width: base number of channels in each group.
widen_factor: factor to reduce the input dimensionality before convolution.
"""
super(ResNeXtBottleneck, self).__init__()
width_ratio = planes / (widen_factor * 64.)
D = groups * int(base_width * width_ratio)
self.conv1 = builder.conv1x1(inplanes, D)
self.bn1 = builder.batchnorm(D)
self.conv2 = builder.group_conv3x3(D, D, groups=groups)
self.bn2 = builder.batchnorm(D)
self.conv3 = builder.conv1x1(D, planes)
self.bn3 = builder.batchnorm(planes, last_bn=True)
self.relu = (lambda: BiRealAct())()
self.downsample = downsample
self.stride = stride
self.shortcut = nn.Sequential()
if inplanes != planes:
self.shortcut.add_module('shortcut_conv',
builder.conv1x1(inplanes, planes))
self.shortcut.add_module('shortcut_bn', builder.batchnorm(planes))
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
# bottleneck = self.conv_reduce.forward(x)
# bottleneck = self.relu(self.bn_reduce.forward(bottleneck), inplace=True)
# bottleneck = self.conv_conv.forward(bottleneck)
# bottleneck = F.relu(self.bn.forward(bottleneck), inplace=True)
# bottleneck = self.conv_expand.forward(bottleneck)
# bottleneck = self.bn_expand.forward(bottleneck)
# residual = self.shortcut.forward(x)
# return F.relu(residual + bottleneck, inplace=True)
class Bottleneck2(nn.Module):
M = 3
expansion = 4
def __init__(self, builder, inplanes, planes, groups, stride=1, downsample=None, base_width=64):
super(Bottleneck, self).__init__()
width = int(planes * base_width / 64)
self.conv1 = builder.conv1x1(inplanes, width)
self.bn1 = builder.batchnorm(width)
self.conv2 = builder.conv3x3(width, width, stride=stride)
self.bn2 = builder.batchnorm(width)
self.conv3 = builder.conv1x1(width, planes * self.expansion)
self.bn3 = builder.batchnorm(planes * self.expansion, last_bn=True)
self.relu = (lambda: BiRealAct())()
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
# Bottleneck {{{
class Bottleneck(nn.Module):
M = 3
expansion = 4
def __init__(self, builder, inplanes, planes, stride=1, downsample=None, base_width=64):
super(Bottleneck, self).__init__()
width = int(planes * base_width / 64)
self.conv1 = builder.conv1x1(inplanes, width)
self.bn1 = builder.batchnorm(width)
self.conv2 = builder.conv3x3(width, width, stride=stride)
self.bn2 = builder.batchnorm(width)
self.conv3 = builder.conv1x1(width, planes * self.expansion)
self.bn3 = builder.batchnorm(planes * self.expansion, last_bn=True)
self.relu = (lambda: BiRealAct())()
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
# Bottleneck }}}
class CifarResNeXt(nn.Module):
"""
ResNext optimized for the Cifar dataset, as specified in
https://arxiv.org/pdf/1611.05431.pdf
"""
def __init__(self, builder, groups, layers, num_classes=1000, base_width=64, widen_factor=4):
""" Constructor
Args:
groups: number of convolution groups.
layers: number of layers.
num_classes: number of classes
base_width: base number of channels in each group.
widen_factor: factor to adjust the channel dimensionality
"""
super(CifarResNeXt, self).__init__()
self.groups = groups
self.layers = layers
self.block_depth = (self.layers - 2) // 9
self.base_width = base_width
if self.base_width // 64 > 1:
print(f"==> Using {self.base_width // 64}x wide model")
self.widen_factor = widen_factor
self.num_classes = num_classes
self.output_size = 64
self.stages = [64, 64 * self.widen_factor, 128 * self.widen_factor, 256 * self.widen_factor]
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.bn1 = builder.batchnorm(64)
self.relu = (lambda: BiRealAct())()
self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)
self.bn_1 = nn.BatchNorm2d(64)
self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], self.groups[0], 1)
self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], self.groups[0], 2)
self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], self.groups[0], 2)
self.classifier = nn.Linear(self.stages[3], num_classes)
init.kaiming_normal(self.classifier.weight)
for key in self.state_dict():
if key.split('.')[-1] == 'weight':
if 'conv' in key:
init.kaiming_normal(self.state_dict()[key], mode='fan_out')
if 'bn' in key:
self.state_dict()[key][...] = 1
elif key.split('.')[-1] == 'bias':
self.state_dict()[key][...] = 0
def block(self, name, inplanes, planes, groups, pool_stride=2):
""" Stack n bottleneck modules where n is inferred from the depth of the network.
Args:
name: string name of the current block.
inplanes: number of input channels
planes: number of output channels
pool_stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.
Returns: a Module consisting of n sequential bottlenecks.
"""
block = nn.Sequential()
for bottleneck in range(self.block_depth):
name_ = '%s_bottleneck_%d' % (name, bottleneck)
if bottleneck == 0:
block.add_module(name_, ResNeXtBottleneck(inplanes, planes, pool_stride, self.groups,
self.base_width, self.widen_factor))
else:
block.add_module(name_,
ResNeXtBottleneck(planes, planes, 1, self.groups, self.base_width,
self.widen_factor))
return block
def forward(self, x):
x = self.conv_1_3x3.forward(x)
x = F.relu(self.bn_1.forward(x), inplace=True)
x = self.stage_1.forward(x)
x = self.stage_2.forward(x)
x = self.stage_3.forward(x)
x = F.avg_pool2d(x, 8, 1)
x = x.view(-1, self.stages[3])
return self.classifier(x)
class BasicBlock_C(nn.Module):
"""
increasing groups is a more effective way of
gaining accuracy than going deeper or wider
"""
def __init__(self, builder, inplanes, bottleneck_width=4, groups=32, stride=1, expansion=2):
super(BasicBlock_C, self).__init__()
inner_width = groups * bottleneck_width
width = int(inplanes * bottleneck_width / 64)
width_ratio = inplanes / (expansion * 64.)
D = groups * int(bottleneck_width * width_ratio)
self.expansion = expansion
self.relu = (lambda: BiRealAct())()
self.basic = nn.Sequential(OrderedDict(
[
('conv1_0', builder.conv1x1(inplanes, inner_width, stride)),
('bn1', builder.batchnorm(inner_width)),
('act0', (lambda: BiRealAct())()),
('conv3_0', builder.group_conv3x3(inner_width, inner_width, groups=groups, stride=stride)),
('bn2', builder.batchnorm(inner_width)),
('act1', (lambda: BiRealAct())()),
('conv1_1', builder.conv1x1(inner_width, inner_width * self.expansion)),
('bn3', builder.batchnorm(inner_width * self.expansion))]))
self.shortcut = nn.Sequential()
if stride != 1 or inplanes != inner_width * self.expansion:
self.shortcut = nn.Sequential(
builder.conv1x1(inplanes, inner_width * self.expansion)
)
self.bn0 = builder.batchnorm(self.expansion * inner_width)
def forward(self, x):
out = self.basic(x)
out += self.shortcut(x)
out = self.relu(self.bn0(out))
return out
class ResNeXt_BinAct(nn.Module):
def __init__(self, builder, layers, groups, bottleneck_width=64, expansion=2, num_classes=10):
super(ResNeXt_BinAct, self).__init__()
self.groups = groups
self.bottleneck_width = bottleneck_width
self.inplanes = 64
self.expansion = expansion
# self.conv0 = nn.Conv2d(3, self.in_planes, kernel_size=3, stride=1, padding=1)
# self.bn0 = nn.BatchNorm2d(self.in_planes)
# self.pool0 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# self.layer1=self._make_layer(num_blocks[0],1)
# self.layer2=self._make_layer(num_blocks[1],2)
# self.layer3=self._make_layer(num_blocks[2],2)
# self.layer4=self._make_layer(num_blocks[3],2)
# self.linear = nn.Linear(self.groups * self.bottleneck_width, num_classes)
if args.first_layer_dense:
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False
)
else:
self.conv1 = builder.conv7x7(3, 64, stride=2, first_layer=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.bn1 = builder.batchnorm(64)
self.relu = (lambda: BiRealAct())()
self.layer1 = self._make_layer(builder, 64, layers[0])
self.layer2 = self._make_layer(builder, 64*(self.expansion+1), layers[1], stride=2)
self.layer3 = self._make_layer(builder, 128*(self.expansion+1), layers[2], stride=2)
self.layer4 = self._make_layer(builder, 256*(self.expansion+1), layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(1)
if args.last_layer_dense:
self.fc = nn.Conv2d(512 * self.expansion, args.num_classes, 1)
else:
self.fc = builder.conv1x1(512 * self.expansion, num_classes)
def forward(self, x):
out = self.relu(self.bn1(self.conv1(x)))
# out = self.pool0(out)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.avgpool(out)
out = self.fc(out)
out = out.view(out.size(0), -1)
return out
def _make_layer(self, builder, planes, num_blocks, stride=1):
downsample = None
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(BasicBlock_C(builder, planes, self.bottleneck_width, self.groups, stride, self.expansion))
self.inplanes = self.expansion * self.bottleneck_width * self.groups
self.bottleneck_width *= 2
return nn.Sequential(*layers)
# ResNet_BinAct {{{
class ResNet_BinAct(nn.Module):
def __init__(self, builder, block, layers, num_classes=1000, base_width=64):
self.inplanes = 64
super(ResNet_BinAct, self).__init__()
self.base_width = base_width
if self.base_width // 64 > 1:
print(f"==> Using {self.base_width // 64}x wide model")
if args.first_layer_dense:
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False
)
else:
self.conv1 = builder.conv7x7(3, 64, stride=2, first_layer=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.bn1 = builder.batchnorm(64)
self.relu = (lambda: BiRealAct())()
self.layer1 = self._make_layer(builder, block, 64, layers[0])
self.layer2 = self._make_layer(builder, block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(builder, block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(builder, block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(1)
# self.fc = nn.Linear(512 * block.expansion, num_classes)
if args.last_layer_dense:
self.fc = nn.Conv2d(512 * block.expansion, args.num_classes, 1)
else:
self.fc = builder.conv1x1(512 * block.expansion, num_classes)
def _make_layer(self, builder, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
dconv = builder.conv1x1(
self.inplanes, planes * block.expansion, stride=stride
)
dbn = builder.batchnorm(planes * block.expansion)
if dbn is not None:
downsample = nn.Sequential(dconv, dbn)
else:
downsample = dconv
layers = []
layers.append(block(builder, self.inplanes, planes, stride, downsample, base_width=self.base_width))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(builder, self.inplanes, planes, base_width=self.base_width))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
if self.bn1 is not None:
x = self.bn1(x)
x = self.maxpool(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = self.fc(x)
x = x.view(x.size(0), -1)
return x
# ResNet_BinAct }}}
# WideResNet_BinAct {{{
class WideResNet_BinAct(nn.Module):
def __init__(self, builder, block, layers, num_classes=1000, base_width=64, widen_factor=1):
self.inplanes = 64
super(WideResNet_BinAct, self).__init__()
self.base_width = base_width
if self.base_width // 64 > 1:
print(f"==> Using {self.base_width // 64}x wide model")
if args.first_layer_dense:
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False
)
else:
self.conv1 = builder.conv7x7(3, 64, stride=2, first_layer=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.bn1 = builder.batchnorm(64)
self.relu = (lambda: BiRealAct())()
self.layer1 = self._make_layer(builder, block, 64, layers[0])
self.layer2 = self._make_layer(builder, block, 64*(widen_factor+1), layers[1], stride=2)
self.layer3 = self._make_layer(builder, block, 128*(widen_factor+1), layers[2], stride=2)
self.layer4 = self._make_layer(builder, block, 256*(widen_factor+1), layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(1)
# self.fc = nn.Linear(512 * block.expansion, num_classes)
if args.last_layer_dense:
self.fc = nn.Conv2d(256*(widen_factor+1) * block.expansion, args.num_classes, 1)
else:
self.fc = builder.conv1x1(256*(widen_factor+1) * block.expansion, num_classes)
def _make_layer(self, builder, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
dconv = builder.conv1x1(
self.inplanes, planes * block.expansion, stride=stride
)
dbn = builder.batchnorm(planes * block.expansion)
if dbn is not None:
downsample = nn.Sequential(dconv, dbn)
else:
downsample = dconv
layers = []
layers.append(block(builder, self.inplanes, planes, stride, downsample, base_width=self.base_width))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(builder, self.inplanes, planes, base_width=self.base_width))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
if self.bn1 is not None:
x = self.bn1(x)
x = self.maxpool(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = self.fc(x)
x = x.view(x.size(0), -1)
return x
# WideResNet_BinAct }}}
# Imagenet Networks
def ResNet18_BinAct(pretrained=False):
return ResNet_BinAct(get_builder(), BasicBlock, [2, 2, 2, 2], 1000)
def ResNet34_BinAct(pretrained=False):
return ResNet_BinAct(get_builder(), BasicBlock, [3, 4, 6, 3], 1000)
def ResNet50_BinAct(pretrained=False):
return ResNet_BinAct(get_builder(), Bottleneck, [3, 4, 6, 3], 1000)
def ResNet101_BinAct(pretrained=False):
return ResNet_BinAct(get_builder(), Bottleneck, [3, 4, 23, 3], 1000)
def WideResNet18_2_BinAct(pretrained=False):
return WideResNet_BinAct(get_builder(), BasicBlock, [2, 2, 2, 2], 1000, widen_factor=2)
def WideResNet18_3_BinAct(pretrained=False):
return WideResNet_BinAct(get_builder(), BasicBlock, [2, 2, 2, 2], 1000, widen_factor=2.5)
def WideResNet34_2_BinAct(pretrained=False):
return WideResNet_BinAct(get_builder(), BasicBlock, [3, 4, 6, 3], 1000, widen_factor=2)
def WideResNet34_3_BinAct(pretrained=False):
return WideResNet_BinAct(get_builder(), BasicBlock, [3, 4, 6, 3], 1000, widen_factor=3)
def WideResNet50_2_BinAct(pretrained=False):
return ResNet_BinAct(
get_builder(), Bottleneck, [3, 4, 6, 3], num_classes=1000, base_width=64 * 2
)
# CIFAR-10 Networks
def ResNext_BinAct(pretrained=False):
return ResNeXt_BinAct(get_builder(), [1, 2, 6, 2], groups=4, expansion=2)
def cifarResNet18_BinAct(pretrained=False):
return ResNet_BinAct(get_builder(), BasicBlock, [2, 2, 2, 2], 10)
def cifarWideResNet18_2_BinAct(pretrained=False):
return ResNet_BinAct(get_builder(), BasicBlock, [2, 2, 2, 2], 10, widen_factor=2)
def cifarWideResNet18_3_BinAct(pretrained=False):
return ResNet_BinAct(get_builder(), BasicBlock, [2, 2, 2, 2], 10, widen_factor=3)
| [
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.clamp",
"torch.sign",
"torch.abs",
"torch.nn.Conv2d",
"torch.nn.AdaptiveAvgPool2d"
] | 1.3.0 | zhanzheng8585/biprop | ce6a364c8323f102bd41ebb332e1e841ec78c79d |
1.1 | import math
import torch
from torch.optim.optimizer import Optimizer
from .types import Betas2, OptFloat, OptLossClosure, Params
__all__ = ('AdaMod',)
class AdaMod(Optimizer):
r"""Implements AccSGD algorithm.
It has been proposed in `Adaptive and Momental Bounds for Adaptive
Learning Rate Methods`__.
Arguments:
params: iterable of parameters to optimize or dicts defining
parameter groups
lr: learning rate (default: 1e-3)
betas: coefficients used for computing running averages of gradient
and its square (default: (0.9, 0.999))
beta3: smoothing coefficient for adaptive learning rates
(default: 0.9999)
eps: term added to the denominator to improve numerical stability
(default: 1e-8)
weight_decay: weight decay (L2 penalty) (default: 0)
Example:
>>> import torch_optimizer as optim
>>> optimizer = optim.AdaMod(model.parameters(), lr=0.1)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
__ https://arxiv.org/abs/1910.12249
"""
def __init__(
self,
params: Params,
lr: float = 1e-3,
betas: Betas2 = (0.9, 0.999),
beta3: float = 0.999,
eps: float = 1e-8,
weight_decay: float = 0,
) -> None:
if not 0.0 <= lr:
raise ValueError(f'Invalid learning rate: {lr}')
if not 0.0 <= eps:
raise ValueError(f'Invalid epsilon value: {eps}')
if not 0.0 <= betas[0] < 1.0:
raise ValueError(f'Invalid beta parameter at index 0: {betas[0]}')
if not 0.0 <= betas[1] < 1.0:
raise ValueError(f'Invalid beta parameter at index 1: {betas[1]}')
if not 0.0 <= beta3 < 1.0:
raise ValueError(f'Invalid beta3 parameter: {beta3}')
if not 0.0 <= weight_decay:
raise ValueError(f'Invalid weight_decay value: {weight_decay}')
defaults = dict(
lr=lr, betas=betas, beta3=beta3, eps=eps, weight_decay=weight_decay
)
super(AdaMod, self).__init__(params, defaults)
def step(self, closure: OptLossClosure = None) -> OptFloat:
"""Performs a single optimization step.
Arguments:
closure: A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
msg = 'AdaMod does not support sparse gradients'
raise RuntimeError(msg)
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p)
# Exponential moving average of actual learning rates
state['exp_avg_lr'] = torch.zeros_like(p)
exp_avg, exp_avg_sq, exp_avg_lr = (
state['exp_avg'],
state['exp_avg_sq'],
state['exp_avg_lr'],
)
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = (
group['lr']
* math.sqrt(bias_correction2)
/ bias_correction1
)
if group['weight_decay'] != 0:
p.data.add_(-group['weight_decay'] * group['lr'], p.data)
# Applies momental bounds on actual learning rates
step_size = torch.full_like(denom, step_size)
step_size.div_(denom)
exp_avg_lr.mul_(group['beta3']).add_(
1 - group['beta3'], step_size
)
step_size = torch.min(step_size, exp_avg_lr)
step_size.mul_(exp_avg)
p.data.add_(-step_size)
return loss
| [
"torch.zeros_like",
"torch.min",
"torch.full_like"
] | 1.1.0 | tkon3/pytorch-optimizer | e5578453b79143331c30fd76b08721b45dce86d3 |
1.7 | """
Implement torch iterable dataset
- build vocab ordered by freq for
"""
from tqdm import tqdm
import torch
import torch.utils.data
from torch.utils.data.dataloader import DataLoader
import os
import sys
import pickle5 as pickle #import pickle
import math
from collections import defaultdict
SPLITS = ['train', 'valid', 'test']
EOS = '<eos>'
PAD = '<pad>'
class Dataset(torch.utils.data.IterableDataset):
def __init__(self, data_dir, batch_size, split):
self.data_dir = data_dir
if not self.data_exist():
self.build_vocab()
for s in SPLITS:
self.binarize(s)
self.load_vocab()
self.data = self.load_data(split, batch_size) # bsz x (len(data)/bsz)
self.start = 0
self.end = self.data.size(1)
self.split = split
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
if worker_info is None: # single-process data loading, return the full iterator
iter_start = self.start
iter_end = self.end
else: # in a worker process split workload
per_worker = int(math.ceil((self.end - self.start) / float(worker_info.num_workers)))
worker_id = worker_info.id
iter_start = self.start + worker_id * per_worker
iter_end = min(iter_start + per_worker, self.end)
return iter(self.data.transpose(1,0)[iter_start:iter_end])
@property
def eos_idx(self):
return self.tok2id[EOS]
@property
def padding_idx(self):
return self.tok2id[PAD]
@property
def size(self):
return len(self.id2tok)
def build_vocab(self, min_freq=0, max_freq=sys.maxsize):
"""
build vocab + add eos
encode sentence
"""
with open(os.path.join(self.data_dir, 'train.txt'), 'r') as fn:
data = fn.readlines()
if 'lambada' in self.data_dir:
with open(os.path.join(self.data_dir, 'test.txt'), 'r') as fn:
data.extend(fn.readlines())
with open(os.path.join(self.data_dir, 'valid.txt'), 'r') as fn:
data.extend(fn.readlines())
print('building vocab ...')
self.vocab = defaultdict(int)
self.tok2id = {}
self.id2tok = []
for line in tqdm(data):
line = line.strip().split()
for tok in line:
self.vocab[tok] += 1
self.vocab = {a : self.vocab[a] for a in self.vocab if self.vocab[a] >= min_freq and self.vocab[a] <= max_freq}
# sort vocab in case of using adaptive softmax
self.vocab = list(sorted(self.vocab.items(), key=lambda a: a[1], reverse=True))
print(self.vocab[:10])
if 'lambada' in self.data_dir:
self.vocab = self.vocab[:60000]
self.vocab.append(('<unk>', 0))
self.id2tok = ['<pad>'] + ['<eos>'] + [a[0] for a in self.vocab]
self.tok2id = {a : i for i, a in enumerate(self.id2tok)}
self.vocab_size = len(self.id2tok)
print('end building vocab ...')
print('vocab size', len(self.tok2id))
with open(os.path.join(self.data_dir, 'vocab.pkl'), 'wb') as fn:
pickle.dump({'id2tok': self.id2tok, 'tok2id': self.tok2id, 'vocab_size':self.vocab_size}, fn)
def encode_line(self, line):
if 'lambada' not in self.data_dir:
return torch.tensor([self.tok2id[tok] for tok in line+['<eos>']])
else:
return torch.tensor([self.tok2id[tok] if tok in self.tok2id else self.tok2id['<unk>'] for tok in line])
def decode_tokids(self, tensor):
tokens = []
for tokid in tensor:
tokens.append(self.id2tok[tokid])
tokens = [t if t != '<eos>' else '\n' for t in tokens]
return ' '.join(tokens)
def binarize(self, split):
"""binarize data to torch.tensor shape (doc_len, )"""
with open(os.path.join(self.data_dir, f"{split}.txt"), "r") as fn:
data = [line.strip().split() for line in fn.readlines()]
print('binarizing data ...')
doc = []
for line in tqdm(data):
if line != '':
doc.append(self.encode_line(line))
doc = torch.cat(doc)
print('end binarizing data ...')
print('doc shape', doc.shape)
print([self.id2tok[i] for i in doc[:100]])
with open(os.path.join(self.data_dir, f"{split}.bin"), "wb") as fout:
pickle.dump({"data": doc}, fout, protocol=pickle.HIGHEST_PROTOCOL)
def load_vocab(self):
with open(os.path.join(self.data_dir, 'vocab.pkl'), 'rb') as fn:
data = pickle.load(fn)
print('loading vocab...')
self.id2tok = data['id2tok']
self.tok2id = data['tok2id']
self.vocab_size = data['vocab_size']
# self.id2freq = data['id2freq']
print(f'vocab size {self.vocab_size}')
def data_exist(self):
return all([os.path.exists(os.path.join(self.data_dir, f"{fn}.bin")) \
for fn in ['train', 'valid', 'test'] ] + [os.path.exists(os.path.join(self.data_dir, "vocab.pkl"))])
def load_data(self, split, bsz):
with open(os.path.join(self.data_dir, f"{split}.bin"), "rb") as fin:
data = pickle.load(fin)['data']
nstep = data.size(0) // bsz
return data[ : nstep * bsz].view(bsz, -1)
| [
"torch.cat",
"torch.utils.data.get_worker_info",
"torch.tensor"
] | 1.7.1 | SimengSun/revisit-nplm | bbe1cdaecf1d7d104d27b1035a591ebbd3b5141e |
1.4 | import math
import os
import random
import torch
import torch.utils.data
import numpy as np
from librosa.core import load
from librosa.util import normalize
from librosa.filters import mel as librosa_mel_fn
MAX_WAV_VALUE = 32768.0
def load_wav(full_path, sampling_rate=None):
if os.path.splitext(full_path)[1] != '.npy':
data, sampling_rate = load(full_path, sr=sampling_rate)
else:
a = np.load(full_path, allow_pickle=True).item()
assert sampling_rate == a['rate']
data = a['array']
return data, sampling_rate
def dynamic_range_compression(x, C=1, clip_val=1e-5):
return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
def dynamic_range_decompression(x, C=1):
return np.exp(x) / C
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression_torch(x, C=1):
return torch.exp(x) / C
def spectral_normalize_torch(magnitudes):
output = dynamic_range_compression_torch(magnitudes)
return output
def spectral_de_normalize_torch(magnitudes):
output = dynamic_range_decompression_torch(magnitudes)
return output
mel_basis = {}
hann_window = {}
def mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
if torch.min(y) < -1.:
print('min value is ', torch.min(y))
if torch.max(y) > 1.:
print('max value is ', torch.max(y))
global mel_basis, hann_window
if fmax not in mel_basis:
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device)
hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device)
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
y = y.squeeze(1)
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)],
center=center, pad_mode='reflect', normalized=False, onesided=True)
spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9))
spec = torch.matmul(mel_basis[str(fmax)+'_'+str(y.device)], spec)
spec = spectral_normalize_torch(spec)
return spec
def get_dataset_filelist(a):
ext = '.wav' if not a.input_wavs_npy else '.npy'
with open(a.input_training_file, 'r', encoding='utf-8') as fi:
training_files = [os.path.join(a.input_wavs_dir, x.split('|')[0] + ext)
for x in fi.read().split('\n') if len(x) > 0]
with open(a.input_validation_file, 'r', encoding='utf-8') as fi:
validation_files = [os.path.join(a.input_wavs_dir, x.split('|')[0] + ext)
for x in fi.read().split('\n') if len(x) > 0]
return training_files, validation_files
class MelDataset(torch.utils.data.Dataset):
def __init__(self, training_files, segment_size, n_fft, num_mels,
hop_size, win_size, sampling_rate, fmin, fmax, split=True, shuffle=True, n_cache_reuse=1,
device=None, fmax_loss=None, fine_tuning=False, base_mels_path=None):
self.audio_files = training_files
random.seed(1234)
if shuffle:
random.shuffle(self.audio_files)
self.segment_size = segment_size
self.sampling_rate = sampling_rate
self.split = split
self.n_fft = n_fft
self.num_mels = num_mels
self.hop_size = hop_size
self.win_size = win_size
self.fmin = fmin
self.fmax = fmax
self.fmax_loss = fmax_loss
self.cached_wav = None
self.n_cache_reuse = n_cache_reuse
self._cache_ref_count = 0
self.device = device
self.fine_tuning = fine_tuning
self.base_mels_path = base_mels_path
def __getitem__(self, index):
filename = self.audio_files[index]
if self._cache_ref_count == 0:
audio, sampling_rate = load_wav(filename, self.sampling_rate)
if not self.fine_tuning:
audio = normalize(audio) * 0.95
self.cached_wav = audio
if sampling_rate != self.sampling_rate:
raise ValueError("{} SR doesn't match target {} SR".format(
sampling_rate, self.sampling_rate))
self._cache_ref_count = self.n_cache_reuse
else:
audio = self.cached_wav
self._cache_ref_count -= 1
audio = torch.FloatTensor(audio)
audio = audio.unsqueeze(0)
if not self.fine_tuning:
if self.split:
if audio.size(1) >= self.segment_size:
max_audio_start = audio.size(1) - self.segment_size
audio_start = random.randint(0, max_audio_start)
audio = audio[:, audio_start:audio_start+self.segment_size]
else:
audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), 'constant')
mel = mel_spectrogram(audio, self.n_fft, self.num_mels,
self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax,
center=False)
else:
mel = np.load(
os.path.join(self.base_mels_path, os.path.splitext(os.path.split(filename)[-1])[0] + '.npy'))
mel = torch.from_numpy(mel)
if len(mel.shape) < 3:
mel = mel.unsqueeze(0)
if self.split:
frames_per_seg = math.ceil(self.segment_size / self.hop_size)
if audio.size(1) >= self.segment_size:
mel_start = random.randint(0, mel.size(2) - frames_per_seg - 1)
mel = mel[:, :, mel_start:mel_start + frames_per_seg]
audio = audio[:, mel_start * self.hop_size:(mel_start + frames_per_seg) * self.hop_size]
else:
mel = torch.nn.functional.pad(mel, (0, frames_per_seg - mel.size(2)), 'constant')
audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), 'constant')
mel_loss = mel_spectrogram(audio, self.n_fft, self.num_mels,
self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax_loss,
center=False)
return (mel.squeeze(), audio.squeeze(0), filename, mel_loss.squeeze())
def __len__(self):
return len(self.audio_files)
| [
"torch.min",
"torch.max",
"torch.hann_window",
"torch.FloatTensor",
"torch.clamp",
"torch.from_numpy",
"torch.exp"
] | 1.4.0 | Hiroshiba/hifi-gan | 17601a07573309ee305c58bf87a041f267b1c0c8 |
1.6 | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2020 Petuum, Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of Petuum, Inc. nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import functools
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type
import numpy as np
import torch
from torch.autograd import Variable
import torch.distributed as dist
from torch.optim import SGD, Optimizer
if TYPE_CHECKING: # pragma: no cover
from torch.optim.optimizer import _params_t
else:
_params_t = Any
class AdaScale(Optimizer):
"""
Implements the AdaScale_ algorithm for scaling the learning rate for
distributed and large batch size training. Can be used in combination with
``torch.nn.parallel.DistributedDataParallel`` and ``torch.optim.SGD``.
.. _AdaScale: https://proceedings.icml.cc/static/paper_files/icml/2020/4682-Supplemental.pdf
This class subclasses `Optimizer` so that `torch.optim.lr_scheduler` can
work with it. In other words, AdaScale is intended to be a complete wrapper of an
torch Optimizer.
Note that, AdaScale does *not* help increase per-GPU batch size.
There are several ways to integrate AdaScale with your training loop.
We show two examples below.
Example 1: using PyTorch's `lr_scheduler` classes.
.. code-block:: python
optim = AdaScale(SGD(model.parameters(), lr=0.001))
model = DistributedDataParallel(model)
scheduler = LambdaLR(optim, lr_lambda=...)
last_epoch = 0
done = False
step = 0
while not done:
for batch in dataset:
optim.zero_grad()
logits = model()
loss = criterion(logits, ...)
loss.backward()
step += optim.gain()
optim.step()
epoch = step // len(dataset)
if epoch > last_epoch:
scheduler.step()
last_epoch = epoch
if epoch >= MAX_EPOCHS:
done = True
Example 2: using a custom `update_lr()` function that update the learning
rate based on the current step count per epoch.
.. code-block:: python
optim = AdaScale(SGD(model.parameters(), lr=0.001))
model = DistributedDataParallel(model)
step = 0
while step < max_steps:
for batch in ...:
optim.zero_grad()
logits = model()
loss = criterion()
loss.backward()
step += optim.gain()
optim.step()
update_lr(step)
Args:
optimizer (torch.optim.Optimizer):
Optimizer to apply AdaScale to.
world_size (int):
Number of world_size for distributed training.
If None, defaults to ``dist.get_world_size()``.
scale (float):
Scaling factor of the batch size from scale equals 1, e.g. using a 10x
larger batch size (summed across all ranks with gradient accumulation)
means a scale of 10.
If None, defaults to ``world_size * num_gradients_to_accumulate``.
smoothing (float):
Smoothing factor for moving average.
If None, it defaults to ``max(1 - (world_size * num_gradients_to_accumulate)/1000, 0)``.
Note, for very high scale training, higher smoothing value might be needed,
esp at the begining of the training. Therefore, if your scale is close to or larger
than 1000, try experimenting with smoothing value > 0 if the final accuracy is poor.
num_gradients_to_accumulate (int):
Number of passes that we accumulate gradients locally
between each optimizer step. This can be changed during
training as long as the train loop changes gradient accumulation
accordingly.
Default to 1, which does not accumulate gradients.
debias_ewma (bool):
(experimental) Use debias exponential moving average
for smoothing and mu and sigma variables. False will
use the method in the paper's Appendix B.3.
Default: True, which is what have been validated so far.
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
world_size: Optional[int] = None,
scale: Optional[float] = None,
smoothing: float = None,
num_gradients_to_accumulate: int = 1,
debias_ewma: bool = True,
):
self._optimizer = optimizer
self._local_grad_sqr: Optional[torch.Tensor] = None
self._world_size: int = (
world_size if world_size is not None else dist.get_world_size() if dist.is_initialized() else 1
)
self._num_backward_calls = 0
self._last_final_backward_call = 0
self._num_grads_to_accum = num_gradients_to_accumulate
self._debias_ewma = debias_ewma
# Proxy the param_groups so that `torch.optim.lr_scheduler` can work.
self.param_groups = self._optimizer.param_groups
self.set_num_gradients_to_accumulate(num_gradients_to_accumulate, update_smoothing=True)
# The previous function call sets smoothing to its default value.
# Override that here if smoothing was passed as an argument.
if smoothing is not None:
self._smoothing = smoothing
if self._world_size * self._num_grads_to_accum <= 1:
# gain will be NaN since we will be dividing by zero in paper's B.3 where (S-1) == 0.
raise RuntimeError("AdaScale does not support a single worker without grad accumulation.")
# Per-param-group sqr & var states (sigma^2 & mu^2 in the paper).
self._optimizer.state.setdefault(
"adascale",
{
"grad_sqr_avg": np.ones(len(optimizer.param_groups)),
"grad_var_avg": np.zeros(len(optimizer.param_groups)),
},
)
self._scale = 1.0 # Assign to inform mypy about the typing of this variable.
self.set_scale(self._world_size * self._num_grads_to_accum if scale is None else scale)
self._hook_handles: List[Any] = []
self._hook()
def _hook(self) -> None:
""" Internal function to register the gradient hooks.
Note, don't assume every parameter will generate a gradient (i.e. triggering the hook)
in every backward pass, which is the reason that we have ``find_unused_params`` flag
in the DDP class in ``torch.nn.parallel``.
"""
assert self._hook_handles == [], "Must run unhook first"
for idx, param_group in enumerate(self._optimizer.param_groups):
for param in param_group["params"]:
h = param.register_hook(functools.partial(self._backward_hook, idx))
self._hook_handles.append(h)
def __del__(self) -> None:
""" Unhook in case caller forgets to call unhook.
This however may not "work" since there would be circular reference
between the hook objects and this objects. In that case, neither will
get GC'ed. Calling unhook explicitly if you really want to delete
AdaScale from memory.
"""
self.unhook()
def unhook(self) -> None:
""" Unregister hook handles.
This is public because caller may need to call this to ensure all GPU
memory are released. Otherwise, the hook may prevent parameters from being
released from the GPU memory pool.
Internally, we use this to support ``add_param_group()`` API.
"""
for h in self._hook_handles:
h.remove()
self._hook_handles = []
@property
def _state(self) -> Dict[str, np.ndarray]:
"""
Return the states of AdaScale.
"""
return self._optimizer.state["adascale"]
@property
def scale(self) -> float:
"""
The scaling factor of the current batch size, relative to the baseline
batch size, which could be a DDP training. For example, if the
baseline batch size is 32 on 2 GPUs, but using a scaled-up batch size
of 80 on 4 GPUs, then then the scaling factor is 80 * 4 / 32 / 2 = 5.
This is exposed API mainly for logging purpose. Note, this is different
from ``self.gain()``.
Returns:
(float):
The current scaling factor.
"""
return self._scale
@property
def smoothing(self) -> float:
"""
The smoothing constant used in exponentially-weighted moving average
tracking the gradient norm mean and variance within AdaScale.
This is exposed API since the value is computed and caller may
want to obtain this value and log it.
Returns:
(float):
The current smoothing value.
"""
return self._smoothing
def set_scale(self, scale: float, update_estimate: bool = True) -> None:
"""
Set the scaling factor of the current batch size. It is up to the
application to invoke this function to make sure that AdaScale's
scaling factor matches the actual batch size used during training.
Args:
scale (float):
New scaling factor to be applied to AdaScale.
update_estimate (bool):
Whether to update the scale-depenent estimate of gradient
variance; this is highly recommended. (default: True)
"""
assert self._local_grad_sqr is None, "Don't change scale in backward phase"
assert scale >= 1, "Scale must be at least 1"
if update_estimate and hasattr(self, "_scale"):
assert self._scale >= 1, "bug: old scale isn't valid"
# Rescale grad_var_avg to account for the change in scale
if self._debias_ewma and "grad_var_avg_biased" in self._state:
self._state["grad_var_avg_biased"] *= self._scale / scale
elif "grad_var_avg_total" in self._state: # _debias_ewma==False
self._state["grad_var_avg_total"] *= self._scale / scale
self._state["grad_var_avg"] *= self._scale / scale
self._scale = scale
def _grad_sqr_avg(self, pg_idx: Optional[int] = None) -> float:
"""
Current estimate of the squared l2-norm of the true gradient
(sigma squared in the AdaScale paper).
Args:
pg_idx (Optional[int]):
Optional index for a parameter group.
Returns:
(float):
Estimate of squared l2-norm.
"""
if pg_idx is not None:
return self._state["grad_sqr_avg"][pg_idx]
else:
return float(np.sum(self._state["grad_sqr_avg"]))
def _grad_var_avg(self, pg_idx: Optional[int] = None) -> float:
"""
Current estimate of the trace of the covariance of the true gradient
(mu squared in the AdaScale paper).
Args:
pg_idx (Optional[int]):
Optional index for a parameter group.
Returns:
(float):
Estimate of trace of the covariance.
"""
if pg_idx is not None:
return self._state["grad_var_avg"][pg_idx]
else:
return float(np.sum(self._state["grad_var_avg"]))
def gain(self, pg_idx: Optional[int] = None) -> float:
"""
Current estimate of the AdaScale gain ratio (r_t in the paper).
Args:
pg_idx (int):
Optional index of a parameter group.
Default None: returns "averaged" gain for all groups.
Returns:
(float):
Estimate of gain ratio.
"""
var = self._grad_var_avg(pg_idx)
sqr = self._grad_sqr_avg(pg_idx)
gain = (var + sqr) / (var / self.scale + sqr)
return gain
def _update_avg(self, name: str, value: np.ndarray, factor: float) -> None:
if self._debias_ewma:
# This function computes and stores the moving average of a vector
# using a smoothing factor.
biased = self._state.get(name + "_biased", np.zeros(value.shape[0]))
unbias = self._state.get(name + "_unbias", np.zeros(value.shape[0]))
biased = factor * biased + (1.0 - factor) * value
unbias = factor * unbias + (1.0 - factor)
self._state[name + "_biased"] = biased
self._state[name + "_unbias"] = unbias
self._state[name] = biased / unbias
else:
# Moving average procedure described in Appendix B.3
# For iterations t < 1 / (1 - smoothing) define grad_var_avg
# and grad_sqr_avg as mean of the past samples. After that
# start using running average.
#
# Note: we only keep a single _count for all parameter groups.
# Ideally, it should be a vector and in case a PG is added
# after some iterations are done. But, then the if condition
# below will need to be a np.where. I leave this corner
# case to a future exercise.
count = self._state.get(name + "_count", np.zeros(1))
count[0] += 1
self._state[name + "_count"] = count
if count < 1 / (1 - self._smoothing):
total = self._state.get(name + "_total", None)
if total is None:
total = value
else:
total += value
self._state[name + "_total"] = total
self._state[name] = total / count
else:
self._state[name] = factor * self._state[name] + (1.0 - factor) * value
def _backward_hook(self, pg_idx: int, grad: torch.Tensor) -> None:
# This method should be invoked once for each parameter during the
# backward pass, before gradients are synchronized between world_size.
# Store the local gradient square sums in a vector.
# This vector is also used for error checking. Whenever it is not None,
# it means that we are in backward pass.
if self._local_grad_sqr is None:
self._local_grad_sqr = torch.zeros(
len(self._optimizer.param_groups), device=grad.device, requires_grad=False,
)
self._local_grad_sqr[pg_idx] += grad.pow(2).sum()
# Now, ensure we queue a callback at the end of the callback queue.
# This will fire after all gradient callbacks are done (esp. those
# queued by DDP.
self._final_callback_queued = False
Variable._execution_engine.queue_callback(self._queue_callback)
def _queue_callback(self) -> None:
# This method should be invoked after the entire backward pass. We want
# to make sure self._final_callback is invoked once, only after all
# gradients have been synchronized between each worker. However, the
# synchronization code in DistributedDataParallel is also done in a
# callback, which might not yet be executed. Therefore, we enqueue
# self._final_callback from this method, which should ensure it is
# invoked after the gradient synchronization callback.
if self._final_callback_queued:
return
self._final_callback_queued = True
Variable._execution_engine.queue_callback(self._final_callback)
def _final_callback(self) -> None:
# This method should be invoked once for each backward pass, after
# gradients have been synchronized between each worker, unless we
# are in gradient accumulation mode, where grads are not all_reduced
# between the GPUs.
self._final_callback_queued = False
assert isinstance(self._local_grad_sqr, torch.Tensor)
# Keep track of number of backward calls for gradient accumulation.
# TODO (min): this may not work with activation checkpointing when
# multiple backward calls happen in a big backward.
self._num_backward_calls += 1
# TODO (min, mike): We need to have a way to check that training loop & DDP
# is doing the right thing where the gradient is reduced
# in this backward pass.
# Longer term, we may compute the gain and then inform
# the training loop when it is a good time to step().
assert (
self._num_backward_calls - self._last_final_backward_call
) <= self._num_grads_to_accum, (
f"bug: {self._num_backward_calls} - {self._last_final_backward_call} should <= {self._num_grads_to_accum}"
)
if (self._num_backward_calls - self._last_final_backward_call) % self._num_grads_to_accum != 0:
assert self._local_grad_sqr is not None, "We should still be in backward phase"
return
# Since self._local_grad_sqr is FP32, sum shouldn't overflow.
# This vector has length of # of param_groups, so it is small, but we
# use async to hide the all_reduce latency, esp when # of nodes is large.
work = None
if self._world_size > 1:
work = dist.all_reduce(self._local_grad_sqr, async_op=True) # SUM
# Compute the sums of squares for reduced gradients.
# Divide by _num_grads_to_accum since the gradients are accumulated.
total_grad_sqr = np.array(
[sum(param.grad.pow(2).sum().item() for param in group["params"]) for group in self._optimizer.param_groups]
)
# Divide by (_num_grads_to_accum ** 2) to account for gradient
# accumulation.
if self._num_grads_to_accum > 1:
# np array doesn't support /=.
total_grad_sqr = total_grad_sqr / (self._num_grads_to_accum ** 2)
# Wait for all_reduce to be done and move it to cpu & np.
if work:
work.wait()
local_grad_sqr = self._local_grad_sqr.cpu().numpy()
# See appendix B.3 of the paper.
# Modified to handle cases where scale != world_size
#
# local_grad_sqr is \sum_{i=1}^{c N} \norm{g_t_i}^2
# where N is world size and c is num_grads_to_accum
# total_grad_sqr is \norm{\bar{g}_t}^2
S = self._scale
cN = self._world_size * self._num_grads_to_accum
grad_var = local_grad_sqr * (S / cN) / (cN - 1) - total_grad_sqr * S / (cN - 1)
grad_sqr = total_grad_sqr - grad_var / S
grad_var = np.maximum(grad_var, 1e-6)
grad_sqr = np.maximum(grad_sqr, 0.0)
self._update_avg("grad_sqr_avg", grad_sqr, self.smoothing)
self._update_avg("grad_var_avg", grad_var, self.smoothing)
self._last_final_backward_call = self._num_backward_calls
# Indicating backward is done.
self._local_grad_sqr = None
def step(self, *args: Any, **kwargs: Any) -> Optional[float]:
"""
Run one optimizer step using Adascale. Essentially just invokes
``optimizer.step(*args, **kwargs)`` with a scaled learning rate.
.. note::
It is possible that this function becames a performance
bottleneck if you have frequent updates. To avoid that,
making bigger steps and reducing update frequency is generally
better for performance.
Args:
args (Any):
Positional arguments passed to ``optimizer.step``.
kwargs (Any):
Keyword arguments passed to ``optimizer.step``.
Returns:
(Tensor):
The loss tensor if a closure if used to re-evaluate the model.
"""
assert self._local_grad_sqr is None, "Don't step without finishing backward phase"
# Set original LR and set new LR.
original_lr = []
for idx, param_group in enumerate(self._optimizer.param_groups):
original_lr.append(param_group["lr"])
param_group["lr"] = self.gain(pg_idx=idx) * param_group["lr"]
# Step it.
res = self._optimizer.step(*args, **kwargs)
# Restore the original LR.
for lr, param_group in zip(original_lr, self._optimizer.param_groups):
param_group["lr"] = lr
return res
def add_param_group(self, pg: Dict) -> None:
""" Support adding parameter groups
We need to re-size some of the state and re-register the backward hooks.
"""
assert self._local_grad_sqr is None, "Can't add parameter group during backward"
self._optimizer.add_param_group(pg)
# Update the hooks.
self.unhook()
self._hook()
# Extend the states.
for name in self._state.keys():
assert name.startswith("grad_sqr_avg") or name.startswith("grad_var_avg"), name
if name.endswith("_count"):
# This is the "_count" variable, should be a 1D int.
assert self._state[name].shape == (1,), self._state[name].shape
continue
# must be a np array, extend it with the right value and check the shape.
val = 1 if name == "grad_sqr_avg" else 0
self._state[name] = np.append(self._state[name], val)
assert self._state[name].shape == (len(self._optimizer.param_groups),)
def zero_grad(self) -> None:
"""Proxy function to optimizer, because some training loops need this."""
assert self._local_grad_sqr is None, "Don't zero_grad in backward"
return self._optimizer.zero_grad()
def state_dict(self) -> Dict:
""" Proxy function to optimizer, checkpointing needs this.
.. note::
Do NOT checkpoint in the middle of gradient accumulation since
associated AdaScale internal states are not saved in the checkpoint.
"""
assert self._local_grad_sqr is None, "Don't checkpoint in backward"
return self._optimizer.state_dict()
def load_state_dict(self, data: Dict) -> None:
""" Proxy function to optimizer, checkpointing needs this.
.. note::
Do NOT checkpoint in the middle of gradient accumulation since
associated AdaScale internal states are not saved in the checkpoint.
"""
assert self._local_grad_sqr is None, "Don't load checkpoint in backward"
return self._optimizer.load_state_dict(data)
def set_num_gradients_to_accumulate(self, num_gradients_to_accumulate: int, update_smoothing: bool = True,) -> None:
"""Set the number of gradients to accumulate to a new value.
This is experimental. This could be called while training so that
we can gradually increasing the steps between updates. Almost always,
`set_scale` needs to be called to update the scale as well.
TODO (min): need a way of determine how much to increase the step size?
TODO (min): have both `set_scale` and `set_num_gradients_to_accumulate`
is hard to use and easy to make mistake. I think it is better
to specific a specify a `base_scale`. But more discussion is
needed here.
Args:
num_gradients_to_accumulate (int):
Number of gradients to accumulate (calls to backward) between
each optimizer step
update_smoothing (bool):
Whether to update smoothing factor or not. Default: True.
"""
assert self._local_grad_sqr is None, "Don't change num_grad_to_accum in backward"
assert num_gradients_to_accumulate >= 1, f"Invalid value {num_gradients_to_accumulate}"
self._num_grads_to_accum = num_gradients_to_accumulate
if update_smoothing:
# Set smoothing based on effective world_size rather than scale here,
# since world_size determines the number of samples being averaged over
# at every update.
#
# When effective world size is large enough, smoothing is probably
# not needed, so the smoothing factor is 0.
self._smoothing = max(1 - self._world_size * self._num_grads_to_accum / 1000, 0)
def __getattr__(self, name: str) -> Any:
"""Forward missing attributes to wrapped optimizer."""
try:
return super().__getattr__(name) # defer to Optimizer logic
except AttributeError:
return getattr(self._optimizer, name) # fallback to wrapped optim
class AdaScaleWrapper(AdaScale):
"""
A thin wrapper for AdaScale so that the constructor resembles a
standard optimizer. This allows it to work with other Optimizer
Wrappers, like `OSS`.
.. warn::
OSS(AdaScaleWrapper) (i.e. OSS wrapping AdaScale) resulting in each
rank's AdaScale operates on different set of parameters. They
will get different gain values and it is unclear how to adjust
effective step size in that case. We have not validated effectiveness
or benefit in this case.
OTOH, AdaScale(OSS) (i.e. AdaScale wrapping OSS) is recommended
and is numerically identical to AdaScale without OSS. Since
AdaScale doesn't incur per-parameter state, the memory benefit
of OSS is still the same.
Args:
params (list of tensors):
parameters to be optimized
optim (class subtyping torch.optim.Optimizer):
a optimizer class to be wrapped.
additional_optim_args (argument dict):
keyward arguments to the `optim` class above.
The rest params are in-sync with the `AdaScale` class above.
"""
def __init__(
self,
params: _params_t,
world_size: Optional[int] = None,
scale: Optional[float] = None,
smoothing: float = None,
num_gradients_to_accumulate: int = 1,
debias_ewma: bool = True,
optim_cls: Type[Optimizer] = SGD,
**additional_optim_args: Any,
):
optim_obj = optim_cls(params, **additional_optim_args)
super().__init__(optim_obj, world_size, scale, smoothing, num_gradients_to_accumulate, debias_ewma)
| [
"torch.distributed.get_world_size",
"torch.autograd.Variable._execution_engine.queue_callback",
"torch.distributed.is_initialized",
"torch.distributed.all_reduce"
] | 1.6.0 | ncilfone/fairscale | b434b7354898febf718f23c7ff21368a6e0bbe1a |
1.6 | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
""" Test checkpoint_wrapper with normalization layers. """
import pytest
import torch
from torch.nn import BatchNorm2d, LayerNorm, Linear, Sequential
from torch.optim import SGD
from fairscale.nn.checkpoint.checkpoint_activations import checkpoint_wrapper
from fairscale.utils.testing import objects_are_equal, torch_version
NORM_TYPES = [LayerNorm, BatchNorm2d]
MP_TYPES = ["fp32", "fp16", "call_half"]
def get_model(norm_type, checkpointed, mixed_precision):
assert norm_type in NORM_TYPES, norm_type
assert checkpointed in [True, False], checkpointed
assert mixed_precision in MP_TYPES
model = Sequential(Linear(3, 2), norm_type(2))
if mixed_precision == "fp16":
# Set param.data and buffers as fp16
for p in model.parameters():
p.data = p.data.half()
for m in model:
for n, b in m.named_buffers():
setattr(m, n, b.half())
elif mixed_precision == "call_half":
model.half()
if checkpointed:
model = checkpoint_wrapper(model)
return model
@pytest.mark.parametrize("device", ["cpu", "cuda"])
@pytest.mark.parametrize("norm_type", NORM_TYPES)
@pytest.mark.parametrize("mixed_precision", MP_TYPES)
def test_norm(device, norm_type, mixed_precision):
"""Test checkpoint_wrapper with different norm layers."""
if device == "cuda" and not torch.cuda.is_available():
pytest.skip("Skip due to lack of GPU")
# Get input, ref, checkpoint models and make them equal.
in_data = torch.rand(2, 2, 3, 3).to(device)
m_ref = get_model(norm_type, False, mixed_precision).to(device)
m_cpt = get_model(norm_type, True, mixed_precision).to(device)
m_cpt.load_state_dict(m_ref.state_dict())
if torch_version() >= (1, 6, 0):
# This assert fails on 1.5.1.
assert objects_are_equal(m_ref.state_dict(), m_cpt.state_dict())
if mixed_precision != "fp32":
in_data = in_data.half()
# Needed due to checkpointing.
in_data.requires_grad = True
for model in (m_ref, m_cpt):
optim = SGD(model.parameters(), lr=0.1)
if device == "cpu" and mixed_precision != "fp32":
# Got: RuntimeError: "batch_norm"/"layer_norm" not implemented for 'Half'.
with pytest.raises(RuntimeError):
out = model(in_data)
return
else:
# Everything else work.
out = model(in_data)
out.sum().backward()
optim.step()
if torch_version() >= (1, 6, 0):
assert objects_are_equal(m_ref.state_dict(), m_cpt.state_dict())
| [
"torch.nn.Linear",
"torch.rand",
"torch.cuda.is_available"
] | 1.6.0 | ncilfone/fairscale | b434b7354898febf718f23c7ff21368a6e0bbe1a |
1.1 | import pickle
import numpy as np
import torch
from detection.src.yolov3.utils.datasets import ListDataset
class DetectionSetDataManager():
"""
Data Manager used for YOLOMAML
"""
def __init__(self, n_way, n_support, n_query, n_episode, image_size):
"""
Args:
n_way (int): number of different classes in a detection class
n_support (int): number of images in the support set with an instance of one class,
for each of the n_way classes
n_query (int): number of images in the query set with an instance of one class,
for each of the n_way classes
n_episode (int): number of episodes per epoch
image_size (int): size of images (square)
"""
self.n_way = n_way
self.n_support = n_support
self.n_query = n_query
self.n_episode = n_episode
self.image_size = image_size
def get_data_loader(self, path_to_data_file, path_to_images_per_label=None):
"""
Args:
path_to_data_file (str): path to file containing paths to images
path_to_images_per_label (str): path to pkl file containing images_per_label dictionary (optional)
Returns:
DataLoader: samples data in the shape of a detection task
"""
dataset = ListDataset(path_to_data_file, img_size=self.image_size)
sampler = DetectionTaskSampler(
dataset,
self.n_way,
self.n_support,
self.n_query,
self.n_episode,
path_to_images_per_label,
)
data_loader = torch.utils.data.DataLoader(dataset,
batch_sampler=sampler,
num_workers=12,
collate_fn=dataset.collate_fn_episodic,
)
return data_loader
def create_dict_images_per_label(data_source):
"""
Compute and returns dictionary of images per label
Args:
data_source (ListDataset) : The data set containing the images
Returns:
dict: each key maps to a list of the images which contain at least one target which label is the key
"""
images_per_label={}
for index in range(len(data_source)):
try:
targets = data_source[index][2]
if targets is not None:
for target in targets:
label = int(target[1])
if label not in images_per_label:
images_per_label[label] = []
if len(images_per_label[label]) == 0 or images_per_label[label][-1] != index:
images_per_label[label].append(index)
if index % 100 == 0:
print('{index}/{length_data_source} images considered'.format(
index=index,
length_data_source=len(data_source))
)
except OSError:
print('Corrupted image : {image_index}'.format(image_index=index))
return images_per_label
class DetectionTaskSampler(torch.utils.data.Sampler):
"""
Samples elements in detection episodes of defined shape.
"""
def __init__(self, data_source, n_way, n_support, n_query, n_episodes, path_to_images_per_label=None):
"""
Args:
data_source (ListDataset): source dataset
n_way (int): number of different classes in a detection class
n_support (int): number of images in the support set with an instance of one class,
for each of the n_way classes
n_query (int): number of images in the query set with an instance of one class,
for each of the n_way classes
n_episodes (int): number of episodes per epoch
path_to_images_per_label (str): path to a pickle file containing a dictionary of images per label
"""
self.data_source = data_source
self.n_way = n_way
self.n_support = n_support
self.n_query = n_query
self.n_episodes = n_episodes
self.images_per_label = self.get_images_per_label(path_to_images_per_label)
self.label_list = self.get_label_list()
def get_images_per_label(self, path):
"""
Returns dictionary of images per label from a file if specified or compute it from scratch
Args:
path (str) : path to a pickle file containing a dictionary of images per label
Returns:
dict: each key maps to a list of the images which contain at least one target which label is the key
"""
if path:
with open(path, 'rb') as dictionary_file:
images_per_label = pickle.load(dictionary_file)
else:
images_per_label = create_dict_images_per_label(self.data_source)
return images_per_label
def get_label_list(self):
"""
Returns:
list: list of appropriate labels, i.e. labels that are present in at least n_support+n_query images
"""
label_list = []
for label in self.images_per_label:
if len(self.images_per_label[label]) >= self.n_support + self.n_query:
label_list.append(label)
return label_list
def sample_labels(self):
"""
Returns:
numpy.ndarray: n_way labels sampled at random from all available labels
"""
labels = np.random.choice(self.label_list, self.n_way, replace=False)
return labels
def sample_images_from_labels(self, labels):
"""
For each label in labels, samples n_support+n_query images containing at least one box associated with label
The first n_way elements of the returned tensor will be used to determine the sampled labels
Args:
labels (numpy.ndarray): labels from which images will be sampled
Returns:
torch.Tensor: length = n_way*(1+n_support+n_query) information about the labels,
and indices of images constituting an episode
"""
#TODO: images can appear twice
images_indices = list(-labels-1)
for label in labels:
images_from_label = np.random.choice(
self.images_per_label[label],
self.n_support+self.n_query,
replace=False
)
images_indices.extend(images_from_label)
return torch.tensor(images_indices, dtype=torch.int32)
def __len__(self):
return self.n_episodes
def __iter__(self):
for i in range(self.n_episodes):
labels = self.sample_labels()
yield self.sample_images_from_labels(labels)
| [
"torch.utils.data.DataLoader",
"torch.tensor"
] | 1.1.0 | artificially-ai/FewShotVision | 909bc414ea27ef0300091e1dd6baba4fb063324b |
1.7 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author: romanshen
@file: distributions.py
@time: 2021/05/07
@contact: [email protected]
"""
import torch
# Priors
def gaussian_prior(name, log2pi, mu, sigma, device):
"""
Args:
*args: {"mu": , "sigma":, "log2pi"}
Returns: log_gaussian_pdf that takes a weight of arbitrary shape
"""
if mu == 0 and sigma == 1:
# We handle this case slightly differently as it is common and can be made more efficient
def log_gaussian_pdf(x):
x = x.view(x.shape[0], -1)
return -log2pi * x.shape[1] / 2 - torch.sum(x ** 2) / 2.0
return log_gaussian_pdf
else:
mu_tensor = torch.tensor(
mu, requires_grad=False, dtype=torch.float32, device=device
)
sigma_tensor = torch.tensor(
sigma, requires_grad=False, dtype=torch.float32, device=device
)
two_sigma_squared = 2 * (sigma_tensor ** 2)
log_sigma = torch.log(sigma_tensor)
def log_gaussian_pdf(x):
x = x.view(x.shape[0], -1)
log_pd = -log2pi * x.shape[1] / 2
log_pd = log_pd - torch.sum((x - mu_tensor) ** 2, dim=1) / two_sigma_squared
log_pd = log_pd - log_sigma * x.shape[1] / 2
return log_pd
return log_gaussian_pdf
# Sampling noise distributions
def radial(size):
"""
Creates a distribution that is unit Gaussian along r and uniform over \theta.
:param size: The size of the weight distribution to be generated.
Zeroth dimension is variational samples.
1+ dimensions are the weight for each sample from the variational distribution.
The same weight is applied to each example in a batch.
:return: noise distribution
"""
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
# First we find a random direction (\epsilon_{\text{MFVI}} in equation (3) on page 4)
epsilon_mfvi = torch.randn(size, device=device)
# Then we pick a distance (r in equation (3) on page 4)
distance = torch.randn((size[0]), device=device)
# Then we normalize each variational sample independently
if len(size) == 2:
normalizing_factor = torch.norm(
epsilon_mfvi.view(size[0], -1), p=2, dim=1
).unsqueeze(1)
distance = distance.unsqueeze(1)
elif len(size) == 3:
normalizing_factor = (
torch.norm(epsilon_mfvi.view(size[0], -1), p=2, dim=1)
.unsqueeze(1)
.unsqueeze(1)
)
distance = distance.unsqueeze(1).unsqueeze(1)
elif len(size) == 5:
# Here we have a CNN with dimensions (var samples, out_channels, in_channels, kernel, kernel)
normalizing_factor = (
torch.norm(epsilon_mfvi.view(size[0], -1), p=2, dim=1)
.unsqueeze(1)
.unsqueeze(1)
.unsqueeze(1)
.unsqueeze(1)
)
distance = distance.unsqueeze(1).unsqueeze(1).unsqueeze(1).unsqueeze(1)
else:
raise ValueError(
"Number of dimensions for epsilon not expected. Are you sure you wanted size {}".format(
size
)
)
direction = epsilon_mfvi / normalizing_factor
epsilon_radial = direction * distance
return epsilon_radial
def gaussian(size):
"""
Returns a tensor of random epsilon using the default gaussian unit distribution
:param size: shape of tensor to return (tuple)
:return: FloatTensor of Size
"""
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
epsilon_mfvi = torch.randn(size, device=device)
return epsilon_mfvi
| [
"torch.cuda.is_available",
"torch.tensor",
"torch.log",
"torch.randn",
"torch.sum"
] | 1.7.1 | RomanShen/radial-bnn | 7c8bc85397c1461a6fd5ea9adf0631f9ade27f6c |
1.1 | from __future__ import division
import torch
import torch.nn as nn
from .base import BaseDetector
from .test_mixins import RPNTestMixin
from .. import builder
from ..registry import DETECTORS
from mmdet.core import (build_assigner, bbox2roi, bbox2result, build_sampler,
merge_aug_masks)
@DETECTORS.register_module
class CascadeRCNN(BaseDetector, RPNTestMixin):
def __init__(self,
num_stages,
backbone,
neck=None,
shared_head=None,
rpn_head=None,
bbox_roi_extractor=None,
bbox_head=None,
mask_roi_extractor=None,
mask_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
assert bbox_roi_extractor is not None
assert bbox_head is not None
super(CascadeRCNN, self).__init__()
self.num_stages = num_stages
self.backbone = builder.build_backbone(backbone)
if neck is not None:
self.neck = builder.build_neck(neck)
if rpn_head is not None:
self.rpn_head = builder.build_head(rpn_head)
if shared_head is not None:
self.shared_head = builder.build_shared_head(shared_head)
if bbox_head is not None:
self.bbox_roi_extractor = nn.ModuleList()
self.bbox_head = nn.ModuleList()
if not isinstance(bbox_roi_extractor, list):
bbox_roi_extractor = [
bbox_roi_extractor for _ in range(num_stages)
]
if not isinstance(bbox_head, list):
bbox_head = [bbox_head for _ in range(num_stages)]
assert len(bbox_roi_extractor) == len(bbox_head) == self.num_stages
for roi_extractor, head in zip(bbox_roi_extractor, bbox_head):
self.bbox_roi_extractor.append(
builder.build_roi_extractor(roi_extractor))
self.bbox_head.append(builder.build_head(head))
if mask_head is not None:
self.mask_head = nn.ModuleList()
if not isinstance(mask_head, list):
mask_head = [mask_head for _ in range(num_stages)]
assert len(mask_head) == self.num_stages
for head in mask_head:
self.mask_head.append(builder.build_head(head))
if mask_roi_extractor is not None:
self.share_roi_extractor = False
self.mask_roi_extractor = nn.ModuleList()
if not isinstance(mask_roi_extractor, list):
mask_roi_extractor = [
mask_roi_extractor for _ in range(num_stages)
]
assert len(mask_roi_extractor) == self.num_stages
for roi_extractor in mask_roi_extractor:
self.mask_roi_extractor.append(
builder.build_roi_extractor(roi_extractor))
else:
self.share_roi_extractor = True
self.mask_roi_extractor = self.bbox_roi_extractor
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
@property
def with_rpn(self):
return hasattr(self, 'rpn_head') and self.rpn_head is not None
def init_weights(self, pretrained=None):
super(CascadeRCNN, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
if self.with_neck:
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
if self.with_rpn:
self.rpn_head.init_weights()
if self.with_shared_head:
self.shared_head.init_weights(pretrained=pretrained)
for i in range(self.num_stages):
if self.with_bbox:
self.bbox_roi_extractor[i].init_weights()
self.bbox_head[i].init_weights()
if self.with_mask:
if not self.share_roi_extractor:
self.mask_roi_extractor[i].init_weights()
self.mask_head[i].init_weights()
def extract_feat(self, img):
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_train(self,
img,
img_meta,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
proposals=None):
x = self.extract_feat(img)
losses = dict()
if self.with_rpn:
rpn_outs = self.rpn_head(x)
rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta,
self.train_cfg.rpn)
rpn_losses = self.rpn_head.loss(
*rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
losses.update(rpn_losses)
proposal_cfg = self.train_cfg.get('rpn_proposal',
self.test_cfg.rpn)
proposal_inputs = rpn_outs + (img_meta, proposal_cfg)
proposal_list = self.rpn_head.get_bboxes(*proposal_inputs)
else:
proposal_list = proposals
for i in range(self.num_stages):
self.current_stage = i
rcnn_train_cfg = self.train_cfg.rcnn[i]
lw = self.train_cfg.stage_loss_weights[i]
# assign gts and sample proposals
sampling_results = []
if self.with_bbox or self.with_mask:
bbox_assigner = build_assigner(rcnn_train_cfg.assigner)
bbox_sampler = build_sampler(
rcnn_train_cfg.sampler, context=self)
num_imgs = img.size(0)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
for j in range(num_imgs):
assign_result = bbox_assigner.assign(
proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j],
gt_labels[j])
sampling_result = bbox_sampler.sample(
assign_result,
proposal_list[j],
gt_bboxes[j],
gt_labels[j],
feats=[lvl_feat[j][None] for lvl_feat in x])
sampling_results.append(sampling_result)
# bbox head forward and loss
bbox_roi_extractor = self.bbox_roi_extractor[i]
bbox_head = self.bbox_head[i]
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],
rois)
if self.with_shared_head:
bbox_feats = self.shared_head(bbox_feats)
cls_score, bbox_pred = bbox_head(bbox_feats)
bbox_targets = bbox_head.get_target(sampling_results, gt_bboxes,
gt_labels, rcnn_train_cfg)
loss_bbox = bbox_head.loss(cls_score, bbox_pred, *bbox_targets)
for name, value in loss_bbox.items():
losses['s{}.{}'.format(i, name)] = (
value * lw if 'loss' in name else value)
# mask head forward and loss
if self.with_mask:
if not self.share_roi_extractor:
mask_roi_extractor = self.mask_roi_extractor[i]
pos_rois = bbox2roi(
[res.pos_bboxes for res in sampling_results])
mask_feats = mask_roi_extractor(
x[:mask_roi_extractor.num_inputs], pos_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
else:
# reuse positive bbox feats
pos_inds = []
device = bbox_feats.device
for res in sampling_results:
pos_inds.append(
torch.ones(
res.pos_bboxes.shape[0],
device=device,
dtype=torch.uint8))
pos_inds.append(
torch.zeros(
res.neg_bboxes.shape[0],
device=device,
dtype=torch.uint8))
pos_inds = torch.cat(pos_inds)
mask_feats = bbox_feats[pos_inds]
mask_head = self.mask_head[i]
mask_pred = mask_head(mask_feats)
mask_targets = mask_head.get_target(sampling_results, gt_masks,
rcnn_train_cfg)
pos_labels = torch.cat(
[res.pos_gt_labels for res in sampling_results])
loss_mask = mask_head.loss(mask_pred, mask_targets, pos_labels)
for name, value in loss_mask.items():
losses['s{}.{}'.format(i, name)] = (
value * lw if 'loss' in name else value)
# refine bboxes
if i < self.num_stages - 1:
pos_is_gts = [res.pos_is_gt for res in sampling_results]
roi_labels = bbox_targets[0] # bbox_targets is a tuple
with torch.no_grad():
proposal_list = bbox_head.refine_bboxes(
rois, roi_labels, bbox_pred, pos_is_gts, img_meta)
return losses
def simple_test(self, img, img_meta, proposals=None, rescale=False):
x = self.extract_feat(img)
proposal_list = self.simple_test_rpn(
x, img_meta, self.test_cfg.rpn) if proposals is None else proposals
img_shape = img_meta[0]['img_shape']
ori_shape = img_meta[0]['ori_shape']
scale_factor = img_meta[0]['scale_factor']
# "ms" in variable names means multi-stage
ms_bbox_result = {}
ms_segm_result = {}
ms_scores = []
rcnn_test_cfg = self.test_cfg.rcnn
rois = bbox2roi(proposal_list)
for i in range(self.num_stages):
bbox_roi_extractor = self.bbox_roi_extractor[i]
bbox_head = self.bbox_head[i]
bbox_feats = bbox_roi_extractor(
x[:len(bbox_roi_extractor.featmap_strides)], rois)
if self.with_shared_head:
bbox_feats = self.shared_head(bbox_feats)
cls_score, bbox_pred = bbox_head(bbox_feats)
ms_scores.append(cls_score)
if self.test_cfg.keep_all_stages:
det_bboxes, det_labels = bbox_head.get_det_bboxes(
rois,
cls_score,
bbox_pred,
img_shape,
scale_factor,
rescale=rescale,
cfg=rcnn_test_cfg)
bbox_result = bbox2result(det_bboxes, det_labels,
bbox_head.num_classes)
ms_bbox_result['stage{}'.format(i)] = bbox_result
if self.with_mask:
mask_roi_extractor = self.mask_roi_extractor[i]
mask_head = self.mask_head[i]
if det_bboxes.shape[0] == 0:
mask_classes = mask_head.num_classes - 1
segm_result = [[] for _ in range(mask_classes)]
else:
_bboxes = (
det_bboxes[:, :4] *
scale_factor if rescale else det_bboxes)
mask_rois = bbox2roi([_bboxes])
mask_feats = mask_roi_extractor(
x[:len(mask_roi_extractor.featmap_strides)],
mask_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats, i)
mask_pred = mask_head(mask_feats)
segm_result = mask_head.get_seg_masks(
mask_pred, _bboxes, det_labels, rcnn_test_cfg,
ori_shape, scale_factor, rescale)
ms_segm_result['stage{}'.format(i)] = segm_result
if i < self.num_stages - 1:
bbox_label = cls_score.argmax(dim=1)
rois = bbox_head.regress_by_class(rois, bbox_label, bbox_pred,
img_meta[0])
cls_score = sum(ms_scores) / self.num_stages
det_bboxes, det_labels = self.bbox_head[-1].get_det_bboxes(
rois,
cls_score,
bbox_pred,
img_shape,
scale_factor,
rescale=rescale,
cfg=rcnn_test_cfg)
bbox_result = bbox2result(det_bboxes, det_labels,
self.bbox_head[-1].num_classes)
ms_bbox_result['ensemble'] = bbox_result
if self.with_mask:
if det_bboxes.shape[0] == 0:
mask_classes = self.mask_head[-1].num_classes - 1
segm_result = [[] for _ in range(mask_classes)]
else:
_bboxes = (
det_bboxes[:, :4] *
scale_factor if rescale else det_bboxes)
mask_rois = bbox2roi([_bboxes])
aug_masks = []
for i in range(self.num_stages):
mask_roi_extractor = self.mask_roi_extractor[i]
mask_feats = mask_roi_extractor(
x[:len(mask_roi_extractor.featmap_strides)], mask_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
mask_pred = self.mask_head[i](mask_feats)
aug_masks.append(mask_pred.sigmoid().cpu().numpy())
merged_masks = merge_aug_masks(aug_masks,
[img_meta] * self.num_stages,
self.test_cfg.rcnn)
segm_result = self.mask_head[-1].get_seg_masks(
merged_masks, _bboxes, det_labels, rcnn_test_cfg,
ori_shape, scale_factor, rescale)
ms_segm_result['ensemble'] = segm_result
if not self.test_cfg.keep_all_stages:
if self.with_mask:
results = (ms_bbox_result['ensemble'],
ms_segm_result['ensemble'])
else:
results = ms_bbox_result['ensemble']
else:
if self.with_mask:
results = {
stage: (ms_bbox_result[stage], ms_segm_result[stage])
for stage in ms_bbox_result
}
else:
results = ms_bbox_result
return results
def aug_test(self, img, img_meta, proposals=None, rescale=False):
raise NotImplementedError
def show_result(self, data, result, img_norm_cfg, **kwargs):
if self.with_mask:
ms_bbox_result, ms_segm_result = result
if isinstance(ms_bbox_result, dict):
result = (ms_bbox_result['ensemble'],
ms_segm_result['ensemble'])
else:
if isinstance(result, dict):
result = result['ensemble']
super(CascadeRCNN, self).show_result(data, result, img_norm_cfg,
**kwargs)
| [
"torch.zeros",
"torch.cat",
"torch.nn.ModuleList",
"torch.no_grad",
"torch.ones"
] | 1.1 | WangY0906/mmdetection-for-study | c89703006a2a5250f4d1c71e0aad958d72526885 |
1.4 | import torch
import torch.nn as nn
import torch.nn.functional as F
import hdvw.models.layers as layers
import hdvw.models.gates as gates
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_channels, channels,
stride=1, groups=1, width_per_group=64, rate=0.3, sd=0.0,
reduction=16, **block_kwargs):
super(BasicBlock, self).__init__()
if groups != 1 or width_per_group != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
width = int(channels * (width_per_group / 64.)) * groups
self.rate = rate
self.shortcut = []
if stride != 1 or in_channels != channels * self.expansion:
self.shortcut.append(layers.conv1x1(in_channels, channels * self.expansion, stride=stride))
self.shortcut.append(layers.bn(channels * self.expansion))
self.shortcut = nn.Sequential(*self.shortcut)
self.conv1 = nn.Sequential(
layers.conv3x3(in_channels, width, stride=stride),
layers.bn(width),
layers.relu(),
)
self.conv2 = nn.Sequential(
layers.conv3x3(width, channels * self.expansion),
layers.bn(channels * self.expansion),
)
self.relu = layers.relu()
self.sd = layers.DropPath(sd) if sd > 0.0 else nn.Identity()
self.gate = gates.ChannelGate(channels * self.expansion, reduction, max_pool=False)
def forward(self, x):
skip = self.shortcut(x)
x = self.conv1(x)
x = F.dropout(x, p=self.rate)
x = self.conv2(x)
x = self.gate(x)
x = self.sd(x) + skip
x = self.relu(x)
return x
def extra_repr(self):
return "rate=%.3e" % self.rate
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_channels, channels,
stride=1, groups=1, width_per_group=64, rate=0.3, sd=0.0,
reduction=16, **block_kwargs):
super(Bottleneck, self).__init__()
width = int(channels * (width_per_group / 64.)) * groups
self.rate = rate
self.shortcut = []
if stride != 1 or in_channels != channels * self.expansion:
self.shortcut.append(layers.conv1x1(
in_channels, channels * self.expansion, stride=stride))
self.shortcut.append(layers.bn(channels * self.expansion))
self.shortcut = nn.Sequential(*self.shortcut)
self.conv1 = nn.Sequential(
layers.conv1x1(in_channels, width),
layers.bn(width),
layers.relu(),
)
self.conv2 = nn.Sequential(
layers.conv3x3(width, width, stride=stride, groups=groups),
layers.bn(width),
layers.relu(),
)
self.conv3 = nn.Sequential(
layers.conv1x1(width, channels * self.expansion),
layers.bn(channels * self.expansion),
)
self.relu = layers.relu()
self.sd = layers.DropPath(sd) if sd > 0.0 else nn.Identity()
self.gate = gates.ChannelGate(channels * self.expansion, reduction, max_pool=False)
def forward(self, x):
skip = self.shortcut(x)
x = self.conv1(x)
x = self.conv2(x)
x = F.dropout(x, p=self.rate)
x = self.conv3(x)
x = self.gate(x)
x = self.sd(x) + skip
x = self.relu(x)
return x
def extra_repr(self):
return "rate=%.3e" % self.rate
| [
"torch.nn.Sequential",
"torch.nn.functional.dropout",
"torch.nn.Identity"
] | 1.4 | shaoshitong/hdvw | fbb39da9ad8a765f74225eec7e9614978c740dde |
1.6 | import torch
import numpy as np
import math
def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-9):
# Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
box2 = box2.T # 4xn
# Get the coordinates of bounding boxes
if x1y1x2y2: # x1, y1, x2, y2 = box1
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
else: # transform from xywh to xyxy
b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
# Intersection area
inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
(torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
# Union Area
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
union = w1 * h1 + w2 * h2 - inter + eps
iou = inter / union
if GIoU or DIoU or CIoU:
cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 +
(b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared
if DIoU:
return iou - rho2 / c2 # DIoU
elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
with torch.no_grad():
alpha = v / ((1 + eps) - iou + v)
# return torch.nan_to_num(iou - (rho2 / c2 + v * alpha), nan=1.0) # CIoU
return iou - (rho2 / c2 + v * alpha) # CIoU
else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
c_area = cw * ch + eps # convex area
return iou - (c_area - union) / c_area # GIoU
else:
return iou # IoU
if __name__ == '__main__':
# ground truth
box1 = torch.tensor([150, 120, 50, 30]) # xmin, ymin, widht, height
# detections
box2 = torch.tensor([
[150, 120, 50, 30], # perfect match
[150, 120, 30, 50],
[140, 130, 50, 30],
[10, 20, 50, 30], # non overlapping
[0, 0, 0, 0], # invalid
])
iou = bbox_iou(box1, box2, x1y1x2y2=False, GIoU=False, DIoU=False, CIoU=False)
print('IoU:', iou, '==> bbox loss:', (1.0 - iou).mean())
iou = bbox_iou(box1, box2, x1y1x2y2=False, GIoU=True, DIoU=False, CIoU=False)
print('GIoU:', iou, '==> bbox loss:', (1.0 - iou).mean())
iou = bbox_iou(box1, box2, x1y1x2y2=False, GIoU=False, DIoU=True, CIoU=False)
print('DIoU:', iou, '==> bbox loss:', (1.0 - iou).mean())
iou = bbox_iou(box1, box2, x1y1x2y2=False, GIoU=False, DIoU=False, CIoU=True)
print('CIoU:', iou, '==> bbox loss:', (1.0 - iou).mean())
# special case checking
box1 = torch.tensor([0, 0, 0, 0]) # xmin, ymin, widht, height
box2 = torch.tensor([[0, 0, 0, 0]]) # xmin, ymin, widht, height
iou = bbox_iou(box1, box2, x1y1x2y2=False, GIoU=False, DIoU=False, CIoU=False)
print('IoU:', iou, '==> bbox loss:', (1.0 - iou).mean())
iou = bbox_iou(box1, box2, x1y1x2y2=False, GIoU=True, DIoU=False, CIoU=False)
print('GIoU:', iou, '==> bbox loss:', (1.0 - iou).mean())
iou = bbox_iou(box1, box2, x1y1x2y2=False, GIoU=False, DIoU=True, CIoU=False)
print('DIoU:', iou, '==> bbox loss:', (1.0 - iou).mean())
iou = bbox_iou(box1, box2, x1y1x2y2=False, GIoU=False, DIoU=False, CIoU=True)
print('CIoU:', iou, '==> bbox loss:', (1.0 - iou).mean())
| [
"torch.min",
"torch.max",
"torch.no_grad",
"torch.atan",
"torch.tensor"
] | 1.6.0 | cheind/pytorch-blender-dr | fd2e449dd81723bb1978f005736104f27cc1770b |
1.3 | import os
import pytest
import torch
import torch.distributed as dist
import ignite.distributed as idist
from ignite.distributed.utils import has_native_dist_support
from tests.ignite.distributed.utils import (
_test_distrib_all_gather,
_test_distrib_all_reduce,
_test_distrib_barrier,
_test_distrib_broadcast,
_test_distrib_config,
_test_distrib_one_rank_only,
_test_distrib_one_rank_only_with_engine,
_test_sync,
)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_native_distrib_single_node_launch_tool_gloo(local_rank, world_size):
import os
from datetime import timedelta
timeout = timedelta(seconds=20)
rank = local_rank
os.environ["RANK"] = f"{rank}"
idist.initialize("gloo", timeout=timeout)
_test_distrib_config(local_rank, "gloo", world_size, "cpu", rank)
idist.finalize()
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_native_distrib_single_node_launch_tool_nccl(local_rank, world_size):
import os
rank = local_rank
os.environ["RANK"] = f"{rank}"
idist.initialize("nccl")
_test_distrib_config(local_rank, "nccl", world_size, "cuda", rank)
idist.finalize()
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_native_distrib_single_node_spawn_gloo():
from datetime import timedelta
timeout = timedelta(seconds=20)
world_size = 4
idist.spawn(
"gloo", _test_distrib_config, args=("gloo", world_size, "cpu"), nproc_per_node=world_size, timeout=timeout
)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_native_distrib_single_node_spawn_nccl():
world_size = torch.cuda.device_count()
idist.spawn("nccl", _test_distrib_config, args=("nccl", world_size, "cuda"), nproc_per_node=world_size)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_sync_as_native_gloo(distributed_context_single_node_gloo):
from ignite.distributed.comp_models.native import _NativeDistModel
_test_sync(_NativeDistModel)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_sync_as_native_nccl(distributed_context_single_node_nccl):
from ignite.distributed.comp_models.native import _NativeDistModel
_test_sync(_NativeDistModel)
def _test_idist_methods_in_native_context(backend, device, local_rank):
# We explicitly set _model as _SerialModel
# then call idist.* methods and check that they give correct values
from ignite.distributed.utils import _set_model, _SerialModel
_set_model(_SerialModel())
ws = dist.get_world_size()
rank = dist.get_rank()
_test_distrib_config(local_rank, backend=backend, ws=ws, true_device=device, rank=rank)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_idist_methods_in_native_gloo_context(distributed_context_single_node_gloo):
local_rank = distributed_context_single_node_gloo["local_rank"]
_test_idist_methods_in_native_context("gloo", "cpu", local_rank)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_idist_methods_in_native_nccl_context(distributed_context_single_node_nccl):
local_rank = distributed_context_single_node_nccl["local_rank"]
_test_idist_methods_in_native_context("nccl", "cuda", local_rank)
def _test_idist_methods_in_native_context_set_local_rank(backend, device, local_rank):
# We explicitly set _model as _SerialModel
# then call idist.* methods and check that they give correct values
from ignite.distributed.utils import _set_model, _SerialModel
_set_model(_SerialModel())
lrank = int(os.environ["LOCAL_RANK"])
del os.environ["LOCAL_RANK"]
ws = dist.get_world_size()
rank = dist.get_rank()
idist.set_local_rank(local_rank)
_test_distrib_config(local_rank=local_rank, backend=backend, ws=ws, true_device=device, rank=rank)
os.environ["LOCAL_RANK"] = str(lrank)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_idist_methods_in_native_gloo_context_set_local_rank(distributed_context_single_node_gloo):
local_rank = distributed_context_single_node_gloo["local_rank"]
_test_idist_methods_in_native_context_set_local_rank("gloo", "cpu", local_rank)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_idist_methods_in_native_nccl_context_set_local_rank(distributed_context_single_node_nccl):
local_rank = distributed_context_single_node_nccl["local_rank"]
_test_idist_methods_in_native_context_set_local_rank("nccl", "cuda", local_rank)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_idist_all_reduce_nccl(distributed_context_single_node_nccl):
device = f"cuda:{distributed_context_single_node_nccl['local_rank']}"
_test_distrib_all_reduce(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_idist_all_reduce_gloo(distributed_context_single_node_gloo):
device = "cpu"
_test_distrib_all_reduce(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_idist_all_gather_nccl(distributed_context_single_node_nccl):
device = f"cuda:{distributed_context_single_node_nccl['local_rank']}"
_test_distrib_all_gather(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_idist_all_gather_gloo(distributed_context_single_node_gloo):
device = "cpu"
_test_distrib_all_gather(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_idist_broadcast_nccl(distributed_context_single_node_nccl):
device = f"cuda:{distributed_context_single_node_nccl['local_rank']}"
_test_distrib_broadcast(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_idist_broadcast_gloo(distributed_context_single_node_gloo):
device = "cpu"
_test_distrib_broadcast(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_idist_barrier_nccl(distributed_context_single_node_nccl):
device = f"cuda:{distributed_context_single_node_nccl['local_rank']}"
_test_distrib_barrier(device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_idist_barrier_gloo(distributed_context_single_node_gloo):
device = "cpu"
_test_distrib_barrier(device)
def _test_idist_methods_overhead(ok_factor):
import time
n = 100000
m = 5
t2 = 0.0
t1 = 0.0
for j in range(m):
start = time.time()
for _ in range(n):
_ = dist.get_world_size()
_ = dist.get_rank()
elapsed = time.time() - start
t2 += elapsed / n / m
start = time.time()
for _ in range(n):
_ = idist.get_world_size()
_ = idist.get_rank()
elapsed = time.time() - start
t1 += elapsed / n / m
overhead_factor = t1 / t2
assert overhead_factor < ok_factor, f"{overhead_factor} vs {ok_factor} | {t2} vs {t1}"
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(
not torch.cuda.is_available(), reason="Do not want to run this test on Github or Travis, but CircleCI"
)
def test_idist_methods_overhead_gloo(distributed_context_single_node_gloo):
_test_idist_methods_overhead(2.5)
idist.sync()
from ignite.distributed.utils import _model
from ignite.distributed.comp_models.native import _NativeDistModel
assert isinstance(_model, _NativeDistModel)
_test_idist_methods_overhead(1.7)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_idist_methods_overhead_nccl(distributed_context_single_node_nccl):
_test_idist_methods_overhead(2.5)
idist.sync()
from ignite.distributed.utils import _model
from ignite.distributed.comp_models.native import _NativeDistModel
assert isinstance(_model, _NativeDistModel)
_test_idist_methods_overhead(1.7)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test_idist_one_rank_only_gloo(distributed_context_single_node_gloo):
device = "cpu"
_test_distrib_one_rank_only(device=device)
_test_distrib_one_rank_only_with_engine(device=device)
@pytest.mark.distributed
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_idist_one_rank_only_nccl(local_rank, distributed_context_single_node_nccl):
device = f"cuda:{local_rank}"
_test_distrib_one_rank_only(device=device)
_test_distrib_one_rank_only_with_engine(device=device)
| [
"torch.distributed.get_world_size",
"torch.distributed.get_rank",
"torch.cuda.is_available",
"torch.cuda.device_count"
] | 1.3 | jkhenning/ignite | 2485fd42c6ef4d3e97fd606a52f8c6e5d940357e |
1.3 | import numbers
from typing import Callable, Optional, Sequence, Tuple, Union
import torch
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
from ignite.metrics.metrics_lambda import MetricsLambda
__all__ = ["ConfusionMatrix", "mIoU", "IoU", "DiceCoefficient", "cmAccuracy", "cmPrecision", "cmRecall"]
class ConfusionMatrix(Metric):
"""Calculates confusion matrix for multi-class data.
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y_pred` must contain logits and has the following shape (batch_size, num_categories, ...)
- `y` should have the following shape (batch_size, ...) and contains ground-truth class indices
with or without the background class. During the computation, argmax of `y_pred` is taken to determine
predicted classes.
Args:
num_classes (int): number of classes. See notes for more details.
average (str, optional): confusion matrix values averaging schema: None, "samples", "recall", "precision".
Default is None. If `average="samples"` then confusion matrix values are normalized by the number of seen
samples. If `average="recall"` then confusion matrix values are normalized such that diagonal values
represent class recalls. If `average="precision"` then confusion matrix values are normalized such that
diagonal values represent class precisions.
output_transform (callable, optional): a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
device (str or torch.device): specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
Note:
In case of the targets `y` in `(batch_size, ...)` format, target indices between 0 and `num_classes` only
contribute to the confusion matrix and others are neglected. For example, if `num_classes=20` and target index
equal 255 is encountered, then it is filtered out.
"""
def __init__(
self,
num_classes: int,
average: Optional[str] = None,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
):
if average is not None and average not in ("samples", "recall", "precision"):
raise ValueError("Argument average can None or one of 'samples', 'recall', 'precision'")
self.num_classes = num_classes
self._num_examples = 0
self.average = average
super(ConfusionMatrix, self).__init__(output_transform=output_transform, device=device)
@reinit__is_reduced
def reset(self) -> None:
self.confusion_matrix = torch.zeros(self.num_classes, self.num_classes, dtype=torch.int64, device=self._device)
self._num_examples = 0
def _check_shape(self, output: Sequence[torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
if y_pred.ndimension() < 2:
raise ValueError(f"y_pred must have shape (batch_size, num_categories, ...), but given {y_pred.shape}")
if y_pred.shape[1] != self.num_classes:
raise ValueError(
f"y_pred does not have correct number of categories: {y_pred.shape[1]} vs {self.num_classes}"
)
if not (y.ndimension() + 1 == y_pred.ndimension()):
raise ValueError(
"y_pred must have shape (batch_size, num_categories, ...) and y must have "
"shape of (batch_size, ...), "
f"but given {y.shape} vs {y_pred.shape}."
)
y_shape = y.shape
y_pred_shape = y_pred.shape # type: Tuple[int, ...]
if y.ndimension() + 1 == y_pred.ndimension():
y_pred_shape = (y_pred_shape[0],) + y_pred_shape[2:]
if y_shape != y_pred_shape:
raise ValueError("y and y_pred must have compatible shapes.")
@reinit__is_reduced
def update(self, output: Sequence[torch.Tensor]) -> None:
self._check_shape(output)
y_pred, y = output[0].detach(), output[1].detach()
self._num_examples += y_pred.shape[0]
# target is (batch_size, ...)
y_pred = torch.argmax(y_pred, dim=1).flatten()
y = y.flatten()
target_mask = (y >= 0) & (y < self.num_classes)
y = y[target_mask]
y_pred = y_pred[target_mask]
indices = self.num_classes * y + y_pred
m = torch.bincount(indices, minlength=self.num_classes ** 2).reshape(self.num_classes, self.num_classes)
self.confusion_matrix += m.to(self.confusion_matrix)
@sync_all_reduce("confusion_matrix", "_num_examples")
def compute(self) -> torch.Tensor:
if self._num_examples == 0:
raise NotComputableError("Confusion matrix must have at least one example before it can be computed.")
if self.average:
self.confusion_matrix = self.confusion_matrix.float()
if self.average == "samples":
return self.confusion_matrix / self._num_examples
else:
return self.normalize(self.confusion_matrix, self.average)
return self.confusion_matrix
@staticmethod
def normalize(matrix: torch.Tensor, average: str) -> torch.Tensor:
if average == "recall":
return matrix / (matrix.sum(dim=1).unsqueeze(1) + 1e-15)
elif average == "precision":
return matrix / (matrix.sum(dim=0) + 1e-15)
else:
raise ValueError("Argument average should be one of 'samples', 'recall', 'precision'")
def IoU(cm: ConfusionMatrix, ignore_index: Optional[int] = None) -> MetricsLambda:
"""Calculates Intersection over Union using :class:`~ignite.metrics.ConfusionMatrix` metric.
Args:
cm (ConfusionMatrix): instance of confusion matrix metric
ignore_index (int, optional): index to ignore, e.g. background index
Returns:
MetricsLambda
Examples:
.. code-block:: python
train_evaluator = ...
cm = ConfusionMatrix(num_classes=num_classes)
IoU(cm, ignore_index=0).attach(train_evaluator, 'IoU')
state = train_evaluator.run(train_dataset)
# state.metrics['IoU'] -> tensor of shape (num_classes - 1, )
"""
if not isinstance(cm, ConfusionMatrix):
raise TypeError(f"Argument cm should be instance of ConfusionMatrix, but given {type(cm)}")
if not (cm.average in (None, "samples")):
raise ValueError("ConfusionMatrix should have average attribute either None or 'samples'")
if ignore_index is not None:
if not (isinstance(ignore_index, numbers.Integral) and 0 <= ignore_index < cm.num_classes):
raise ValueError(f"ignore_index should be non-negative integer, but given {ignore_index}")
# Increase floating point precision and pass to CPU
cm = cm.type(torch.DoubleTensor)
iou = cm.diag() / (cm.sum(dim=1) + cm.sum(dim=0) - cm.diag() + 1e-15) # type: MetricsLambda
if ignore_index is not None:
ignore_idx = ignore_index # type: int # used due to typing issues with mympy
def ignore_index_fn(iou_vector: torch.Tensor) -> torch.Tensor:
if ignore_idx >= len(iou_vector):
raise ValueError(f"ignore_index {ignore_idx} is larger than the length of IoU vector {len(iou_vector)}")
indices = list(range(len(iou_vector)))
indices.remove(ignore_idx)
return iou_vector[indices]
return MetricsLambda(ignore_index_fn, iou)
else:
return iou
def mIoU(cm: ConfusionMatrix, ignore_index: Optional[int] = None) -> MetricsLambda:
"""Calculates mean Intersection over Union using :class:`~ignite.metrics.ConfusionMatrix` metric.
Args:
cm (ConfusionMatrix): instance of confusion matrix metric
ignore_index (int, optional): index to ignore, e.g. background index
Returns:
MetricsLambda
Examples:
.. code-block:: python
train_evaluator = ...
cm = ConfusionMatrix(num_classes=num_classes)
mIoU(cm, ignore_index=0).attach(train_evaluator, 'mean IoU')
state = train_evaluator.run(train_dataset)
# state.metrics['mean IoU'] -> scalar
"""
iou = IoU(cm=cm, ignore_index=ignore_index).mean() # type: MetricsLambda
return iou
def cmAccuracy(cm: ConfusionMatrix) -> MetricsLambda:
"""Calculates accuracy using :class:`~ignite.metrics.ConfusionMatrix` metric.
Args:
cm (ConfusionMatrix): instance of confusion matrix metric
Returns:
MetricsLambda
"""
# Increase floating point precision and pass to CPU
cm = cm.type(torch.DoubleTensor)
accuracy = cm.diag().sum() / (cm.sum() + 1e-15) # type: MetricsLambda
return accuracy
def cmPrecision(cm: ConfusionMatrix, average: bool = True) -> MetricsLambda:
"""Calculates precision using :class:`~ignite.metrics.ConfusionMatrix` metric.
Args:
cm (ConfusionMatrix): instance of confusion matrix metric
average (bool, optional): if True metric value is averaged over all classes
Returns:
MetricsLambda
"""
# Increase floating point precision and pass to CPU
cm = cm.type(torch.DoubleTensor)
precision = cm.diag() / (cm.sum(dim=0) + 1e-15) # type: MetricsLambda
if average:
mean = precision.mean() # type: MetricsLambda
return mean
return precision
def cmRecall(cm: ConfusionMatrix, average: bool = True) -> MetricsLambda:
"""
Calculates recall using :class:`~ignite.metrics.ConfusionMatrix` metric.
Args:
cm (ConfusionMatrix): instance of confusion matrix metric
average (bool, optional): if True metric value is averaged over all classes
Returns:
MetricsLambda
"""
# Increase floating point precision and pass to CPU
cm = cm.type(torch.DoubleTensor)
recall = cm.diag() / (cm.sum(dim=1) + 1e-15) # type: MetricsLambda
if average:
mean = recall.mean() # type: MetricsLambda
return mean
return recall
def DiceCoefficient(cm: ConfusionMatrix, ignore_index: Optional[int] = None) -> MetricsLambda:
"""Calculates Dice Coefficient for a given :class:`~ignite.metrics.ConfusionMatrix` metric.
Args:
cm (ConfusionMatrix): instance of confusion matrix metric
ignore_index (int, optional): index to ignore, e.g. background index
"""
if not isinstance(cm, ConfusionMatrix):
raise TypeError(f"Argument cm should be instance of ConfusionMatrix, but given {type(cm)}")
if ignore_index is not None:
if not (isinstance(ignore_index, numbers.Integral) and 0 <= ignore_index < cm.num_classes):
raise ValueError(f"ignore_index should be non-negative integer, but given {ignore_index}")
# Increase floating point precision and pass to CPU
cm = cm.type(torch.DoubleTensor)
dice = 2.0 * cm.diag() / (cm.sum(dim=1) + cm.sum(dim=0) + 1e-15) # type: MetricsLambda
if ignore_index is not None:
ignore_idx = ignore_index # type: int # used due to typing issues with mympy
def ignore_index_fn(dice_vector: torch.Tensor) -> torch.Tensor:
if ignore_idx >= len(dice_vector):
raise ValueError(
f"ignore_index {ignore_idx} is larger than the length of Dice vector {len(dice_vector)}"
)
indices = list(range(len(dice_vector)))
indices.remove(ignore_idx)
return dice_vector[indices]
return MetricsLambda(ignore_index_fn, dice)
else:
return dice
| [
"torch.zeros",
"torch.device",
"torch.bincount",
"torch.argmax"
] | 1.3 | jkhenning/ignite | 2485fd42c6ef4d3e97fd606a52f8c6e5d940357e |
1.5 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors, The HuggingFace Inc. Team and deepset Team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Acknowledgements: Many of the modeling parts here come from the great transformers repository: https://github.com/huggingface/transformers.
Thanks for the great work! """
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import os
import io
from pathlib import Path
from collections import OrderedDict
from dotmap import DotMap
from tqdm import tqdm
import copy
import numpy as np
import torch
from torch import nn
logger = logging.getLogger(__name__)
from transformers.modeling_bert import BertModel, BertConfig
from transformers.modeling_roberta import RobertaModel, RobertaConfig
from transformers.modeling_xlnet import XLNetModel, XLNetConfig
from transformers.modeling_albert import AlbertModel, AlbertConfig
from transformers.modeling_xlm_roberta import XLMRobertaModel, XLMRobertaConfig
from transformers.modeling_distilbert import DistilBertModel, DistilBertConfig
from transformers.modeling_electra import ElectraModel, ElectraConfig
from transformers.modeling_camembert import CamembertModel, CamembertConfig
from transformers.modeling_utils import SequenceSummary
from transformers.tokenization_bert import load_vocab
from farm.modeling import wordembedding_utils
from farm.modeling.wordembedding_utils import s3e_pooling
# These are the names of the attributes in various model configs which refer to the number of dimensions
# in the output vectors
OUTPUT_DIM_NAMES = ["dim", "hidden_size", "d_model"]
class LanguageModel(nn.Module):
"""
The parent class for any kind of model that can embed language into a semantic vector space. Practically
speaking, these models read in tokenized sentences and return vectors that capture the meaning of sentences
or of tokens.
"""
subclasses = {}
def __init_subclass__(cls, **kwargs):
""" This automatically keeps track of all available subclasses.
Enables generic load() or all specific LanguageModel implementation.
"""
super().__init_subclass__(**kwargs)
cls.subclasses[cls.__name__] = cls
def forward(self, input_ids, padding_mask, **kwargs):
raise NotImplementedError
@classmethod
def from_scratch(cls, model_type, vocab_size):
if model_type.lower() == "bert":
model = Bert
return model.from_scratch(vocab_size)
@classmethod
def load(cls, pretrained_model_name_or_path, n_added_tokens=0, language_model_class=None, **kwargs):
"""
Load a pretrained language model either by
1. specifying its name and downloading it
2. or pointing to the directory it is saved in.
Available remote models:
* bert-base-uncased
* bert-large-uncased
* bert-base-cased
* bert-large-cased
* bert-base-multilingual-uncased
* bert-base-multilingual-cased
* bert-base-chinese
* bert-base-german-cased
* roberta-base
* roberta-large
* xlnet-base-cased
* xlnet-large-cased
* xlm-roberta-base
* xlm-roberta-large
* albert-base-v2
* albert-large-v2
* distilbert-base-german-cased
* distilbert-base-multilingual-cased
* google/electra-small-discriminator
* google/electra-base-discriminator
* google/electra-large-discriminator
See all supported model variations here: https://huggingface.co/models
The appropriate language model class is inferred automatically from `pretrained_model_name_or_path`
or can be manually supplied via `language_model_class`.
:param pretrained_model_name_or_path: The path of the saved pretrained model or its name.
:type pretrained_model_name_or_path: str
:param language_model_class: (Optional) Name of the language model class to load (e.g. `Bert`)
:type language_model_class: str
"""
config_file = Path(pretrained_model_name_or_path) / "language_model_config.json"
if os.path.exists(config_file):
# it's a local directory in FARM format
config = json.load(open(config_file))
language_model = cls.subclasses[config["name"]].load(pretrained_model_name_or_path)
else:
if language_model_class is None:
# it's transformers format (either from model hub or local)
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
if "xlm" in pretrained_model_name_or_path and "roberta" in pretrained_model_name_or_path:
language_model_class = 'XLMRoberta'
elif 'roberta' in pretrained_model_name_or_path:
language_model_class = 'Roberta'
elif 'camembert' in pretrained_model_name_or_path or 'umberto' in pretrained_model_name_or_path:
language_model_class = "Camembert"
elif 'albert' in pretrained_model_name_or_path:
language_model_class = 'Albert'
elif 'distilbert' in pretrained_model_name_or_path:
language_model_class = 'DistilBert'
elif 'bert' in pretrained_model_name_or_path:
language_model_class = 'Bert'
elif 'xlnet' in pretrained_model_name_or_path:
language_model_class = 'XLNet'
elif 'electra' in pretrained_model_name_or_path:
language_model_class = 'Electra'
elif "word2vec" in pretrained_model_name_or_path.lower() or "glove" in pretrained_model_name_or_path.lower():
language_model_class = 'WordEmbedding_LM'
if language_model_class:
language_model = cls.subclasses[language_model_class].load(pretrained_model_name_or_path, **kwargs)
else:
language_model = None
if not language_model:
raise Exception(
f"Model not found for {pretrained_model_name_or_path}. Either supply the local path for a saved "
f"model or one of bert/roberta/xlnet/albert/distilbert models that can be downloaded from remote. "
f"Ensure that the model class name can be inferred from the directory name when loading a "
f"Transformers' model. Here's a list of available models: "
f"https://farm.deepset.ai/api/modeling.html#farm.modeling.language_model.LanguageModel.load"
)
# resize embeddings in case of custom vocab
if n_added_tokens != 0:
# TODO verify for other models than BERT
model_emb_size = language_model.model.resize_token_embeddings(new_num_tokens=None).num_embeddings
vocab_size = model_emb_size + n_added_tokens
logger.info(
f"Resizing embedding layer of LM from {model_emb_size} to {vocab_size} to cope with custom vocab.")
language_model.model.resize_token_embeddings(vocab_size)
# verify
model_emb_size = language_model.model.resize_token_embeddings(new_num_tokens=None).num_embeddings
assert vocab_size == model_emb_size
return language_model
def get_output_dims(self):
config = self.model.config
for odn in OUTPUT_DIM_NAMES:
if odn in dir(config):
return getattr(config, odn)
else:
raise Exception("Could not infer the output dimensions of the language model")
def freeze(self, layers):
""" To be implemented"""
raise NotImplementedError()
def unfreeze(self):
""" To be implemented"""
raise NotImplementedError()
def save_config(self, save_dir):
save_filename = Path(save_dir) / "language_model_config.json"
with open(save_filename, "w") as file:
setattr(self.model.config, "name", self.__class__.__name__)
setattr(self.model.config, "language", self.language)
string = self.model.config.to_json_string()
file.write(string)
def save(self, save_dir):
"""
Save the model state_dict and its config file so that it can be loaded again.
:param save_dir: The directory in which the model should be saved.
:type save_dir: str
"""
# Save Weights
save_name = Path(save_dir) / "language_model.bin"
model_to_save = (
self.model.module if hasattr(self.model, "module") else self.model
) # Only save the model it-self
torch.save(model_to_save.state_dict(), save_name)
self.save_config(save_dir)
@classmethod
def _get_or_infer_language_from_name(cls, language, name):
if language is not None:
return language
else:
return cls._infer_language_from_name(name)
@classmethod
def _infer_language_from_name(cls, name):
known_languages = (
"german",
"english",
"chinese",
"indian",
"french",
"polish",
"spanish",
"multilingual",
)
matches = [lang for lang in known_languages if lang in name]
if "camembert" in name:
language = "french"
logger.info(
f"Automatically detected language from language model name: {language}"
)
elif "umberto" in name:
language = "italian"
logger.info(
f"Automatically detected language from language model name: {language}"
)
elif len(matches) == 0:
language = "english"
logger.warning(
"Could not automatically detect from language model name what language it is. \n"
"\t We guess it's an *ENGLISH* model ... \n"
"\t If not: Init the language model by supplying the 'language' param."
)
elif len(matches) > 1:
logger.warning(
"Could not automatically detect from language model name what language it is.\n"
f"\t Found multiple matches: {matches}\n"
"\t Please init the language model by manually supplying the 'language' as a parameter.\n"
f"\t Using {matches[0]} as language parameter for now.\n"
)
language = matches[0]
else:
language = matches[0]
logger.info(
f"Automatically detected language from language model name: {language}"
)
return language
def formatted_preds(self, logits, samples, ignore_first_token=True,
padding_mask=None, input_ids=None, **kwargs):
"""
Extracting vectors from language model (e.g. for extracting sentence embeddings).
Different pooling strategies and layers are available and will be determined from the object attributes
`extraction_layer` and `extraction_strategy`. Both should be set via the Inferencer:
Example: Inferencer(extraction_strategy='cls_token', extraction_layer=-1)
:param logits: Tuple of (sequence_output, pooled_output) from the language model.
Sequence_output: one vector per token, pooled_output: one vector for whole sequence
:param samples: For each item in logits we need additional meta information to format the prediction (e.g. input text).
This is created by the Processor and passed in here from the Inferencer.
:param ignore_first_token: Whether to include the first token for pooling operations (e.g. reduce_mean).
Many models have here a special token like [CLS] that you don't want to include into your average of token embeddings.
:param padding_mask: Mask for the padding tokens. Those will also not be included in the pooling operations to prevent a bias by the number of padding tokens.
:param input_ids: ids of the tokens in the vocab
:param kwargs: kwargs
:return: list of dicts containing preds, e.g. [{"context": "some text", "vec": [-0.01, 0.5 ...]}]
"""
if not hasattr(self, "extraction_layer") or not hasattr(self, "extraction_strategy"):
raise ValueError("`extraction_layer` or `extraction_strategy` not specified for LM. "
"Make sure to set both, e.g. via Inferencer(extraction_strategy='cls_token', extraction_layer=-1)`")
# unpack the tuple from LM forward pass
sequence_output = logits[0][0]
pooled_output = logits[0][1]
# aggregate vectors
if self.extraction_strategy == "pooled":
if self.extraction_layer != -1:
raise ValueError(f"Pooled output only works for the last layer, but got extraction_layer = {self.extraction_layer}. Please set `extraction_layer=-1`.)")
vecs = pooled_output.cpu().numpy()
elif self.extraction_strategy == "per_token":
vecs = sequence_output.cpu().numpy()
elif self.extraction_strategy == "reduce_mean":
vecs = self._pool_tokens(sequence_output, padding_mask, self.extraction_strategy, ignore_first_token=ignore_first_token)
elif self.extraction_strategy == "reduce_max":
vecs = self._pool_tokens(sequence_output, padding_mask, self.extraction_strategy, ignore_first_token=ignore_first_token)
elif self.extraction_strategy == "cls_token":
vecs = sequence_output[:, 0, :].cpu().numpy()
elif self.extraction_strategy == "s3e":
vecs = self._pool_tokens(sequence_output, padding_mask, self.extraction_strategy,
ignore_first_token=ignore_first_token,
input_ids=input_ids, s3e_stats=self.s3e_stats)
else:
raise NotImplementedError
preds = []
for vec, sample in zip(vecs, samples):
pred = {}
pred["context"] = sample.tokenized["tokens"]
pred["vec"] = vec
preds.append(pred)
return preds
def _pool_tokens(self, sequence_output, padding_mask, strategy, ignore_first_token, input_ids=None, s3e_stats=None):
token_vecs = sequence_output.cpu().numpy()
# we only take the aggregated value of non-padding tokens
padding_mask = padding_mask.cpu().numpy()
ignore_mask_2d = padding_mask == 0
# sometimes we want to exclude the CLS token as well from our aggregation operation
if ignore_first_token:
ignore_mask_2d[:, 0] = True
ignore_mask_3d = np.zeros(token_vecs.shape, dtype=bool)
ignore_mask_3d[:, :, :] = ignore_mask_2d[:, :, np.newaxis]
if strategy == "reduce_max":
pooled_vecs = np.ma.array(data=token_vecs, mask=ignore_mask_3d).max(axis=1).data
if strategy == "reduce_mean":
pooled_vecs = np.ma.array(data=token_vecs, mask=ignore_mask_3d).mean(axis=1).data
if strategy == "s3e":
input_ids = input_ids.cpu().numpy()
pooled_vecs = s3e_pooling(token_embs=token_vecs,
token_ids=input_ids,
token_weights=s3e_stats["token_weights"],
centroids=s3e_stats["centroids"],
token_to_cluster=s3e_stats["token_to_cluster"],
svd_components=s3e_stats.get("svd_components", None),
mask=padding_mask == 0)
return pooled_vecs
class Bert(LanguageModel):
"""
A BERT model that wraps HuggingFace's implementation
(https://github.com/huggingface/transformers) to fit the LanguageModel class.
Paper: https://arxiv.org/abs/1810.04805
"""
def __init__(self):
super(Bert, self).__init__()
self.model = None
self.name = "bert"
@classmethod
def from_scratch(cls, vocab_size, name="bert", language="en"):
bert = cls()
bert.name = name
bert.language = language
config = BertConfig(vocab_size=vocab_size)
bert.model = BertModel(config)
return bert
@classmethod
def load(cls, pretrained_model_name_or_path, language=None, **kwargs):
"""
Load a pretrained model by supplying
* the name of a remote model on s3 ("bert-base-cased" ...)
* OR a local path of a model trained via transformers ("some_dir/huggingface_model")
* OR a local path of a model trained via FARM ("some_dir/farm_model")
:param pretrained_model_name_or_path: The path of the saved pretrained model or its name.
:type pretrained_model_name_or_path: str
"""
bert = cls()
if "farm_lm_name" in kwargs:
bert.name = kwargs["farm_lm_name"]
else:
bert.name = pretrained_model_name_or_path
# We need to differentiate between loading model using FARM format and Pytorch-Transformers format
farm_lm_config = Path(pretrained_model_name_or_path) / "language_model_config.json"
if os.path.exists(farm_lm_config):
# FARM style
bert_config = BertConfig.from_pretrained(farm_lm_config)
farm_lm_model = Path(pretrained_model_name_or_path) / "language_model.bin"
bert.model = BertModel.from_pretrained(farm_lm_model, config=bert_config, **kwargs)
bert.language = bert.model.config.language
else:
# Pytorch-transformer Style
bert.model = BertModel.from_pretrained(str(pretrained_model_name_or_path), **kwargs)
bert.language = cls._get_or_infer_language_from_name(language, pretrained_model_name_or_path)
return bert
def forward(
self,
input_ids,
segment_ids,
padding_mask,
**kwargs,
):
"""
Perform the forward pass of the BERT model.
:param input_ids: The ids of each token in the input sequence. Is a tensor of shape [batch_size, max_seq_len]
:type input_ids: torch.Tensor
:param segment_ids: The id of the segment. For example, in next sentence prediction, the tokens in the
first sentence are marked with 0 and those in the second are marked with 1.
It is a tensor of shape [batch_size, max_seq_len]
:type segment_ids: torch.Tensor
:param padding_mask: A mask that assigns a 1 to valid input tokens and 0 to padding tokens
of shape [batch_size, max_seq_len]
:return: Embeddings for each token in the input sequence.
"""
output_tuple = self.model(
input_ids,
token_type_ids=segment_ids,
attention_mask=padding_mask,
)
if self.model.encoder.output_hidden_states == True:
sequence_output, pooled_output, all_hidden_states = output_tuple[0], output_tuple[1], output_tuple[2]
return sequence_output, pooled_output, all_hidden_states
else:
sequence_output, pooled_output = output_tuple[0], output_tuple[1]
return sequence_output, pooled_output
def enable_hidden_states_output(self):
self.model.encoder.output_hidden_states = True
def disable_hidden_states_output(self):
self.model.encoder.output_hidden_states = False
class Albert(LanguageModel):
"""
An ALBERT model that wraps the HuggingFace's implementation
(https://github.com/huggingface/transformers) to fit the LanguageModel class.
"""
def __init__(self):
super(Albert, self).__init__()
self.model = None
self.name = "albert"
@classmethod
def load(cls, pretrained_model_name_or_path, language=None, **kwargs):
"""
Load a language model either by supplying
* the name of a remote model on s3 ("albert-base" ...)
* or a local path of a model trained via transformers ("some_dir/huggingface_model")
* or a local path of a model trained via FARM ("some_dir/farm_model")
:param pretrained_model_name_or_path: name or path of a model
:param language: (Optional) Name of language the model was trained for (e.g. "german").
If not supplied, FARM will try to infer it from the model name.
:return: Language Model
"""
albert = cls()
if "farm_lm_name" in kwargs:
albert.name = kwargs["farm_lm_name"]
else:
albert.name = pretrained_model_name_or_path
# We need to differentiate between loading model using FARM format and Pytorch-Transformers format
farm_lm_config = Path(pretrained_model_name_or_path) / "language_model_config.json"
if os.path.exists(farm_lm_config):
# FARM style
config = AlbertConfig.from_pretrained(farm_lm_config)
farm_lm_model = Path(pretrained_model_name_or_path) / "language_model.bin"
albert.model = AlbertModel.from_pretrained(farm_lm_model, config=config, **kwargs)
albert.language = albert.model.config.language
else:
# Huggingface transformer Style
albert.model = AlbertModel.from_pretrained(str(pretrained_model_name_or_path), **kwargs)
albert.language = cls._get_or_infer_language_from_name(language, pretrained_model_name_or_path)
return albert
def forward(
self,
input_ids,
segment_ids,
padding_mask,
**kwargs,
):
"""
Perform the forward pass of the Albert model.
:param input_ids: The ids of each token in the input sequence. Is a tensor of shape [batch_size, max_seq_len]
:type input_ids: torch.Tensor
:param segment_ids: The id of the segment. For example, in next sentence prediction, the tokens in the
first sentence are marked with 0 and those in the second are marked with 1.
It is a tensor of shape [batch_size, max_seq_len]
:type segment_ids: torch.Tensor
:param padding_mask: A mask that assigns a 1 to valid input tokens and 0 to padding tokens
of shape [batch_size, max_seq_len]
:return: Embeddings for each token in the input sequence.
"""
output_tuple = self.model(
input_ids,
token_type_ids=segment_ids,
attention_mask=padding_mask,
)
if self.model.encoder.output_hidden_states == True:
sequence_output, pooled_output, all_hidden_states = output_tuple[0], output_tuple[1], output_tuple[2]
return sequence_output, pooled_output, all_hidden_states
else:
sequence_output, pooled_output = output_tuple[0], output_tuple[1]
return sequence_output, pooled_output
def enable_hidden_states_output(self):
self.model.encoder.output_hidden_states = True
def disable_hidden_states_output(self):
self.model.encoder.output_hidden_states = False
class Roberta(LanguageModel):
"""
A roberta model that wraps the HuggingFace's implementation
(https://github.com/huggingface/transformers) to fit the LanguageModel class.
Paper: https://arxiv.org/abs/1907.11692
"""
def __init__(self):
super(Roberta, self).__init__()
self.model = None
self.name = "roberta"
@classmethod
def load(cls, pretrained_model_name_or_path, language=None, **kwargs):
"""
Load a language model either by supplying
* the name of a remote model on s3 ("roberta-base" ...)
* or a local path of a model trained via transformers ("some_dir/huggingface_model")
* or a local path of a model trained via FARM ("some_dir/farm_model")
:param pretrained_model_name_or_path: name or path of a model
:param language: (Optional) Name of language the model was trained for (e.g. "german").
If not supplied, FARM will try to infer it from the model name.
:return: Language Model
"""
roberta = cls()
if "farm_lm_name" in kwargs:
roberta.name = kwargs["farm_lm_name"]
else:
roberta.name = pretrained_model_name_or_path
# We need to differentiate between loading model using FARM format and Pytorch-Transformers format
farm_lm_config = Path(pretrained_model_name_or_path) / "language_model_config.json"
if os.path.exists(farm_lm_config):
# FARM style
config = RobertaConfig.from_pretrained(farm_lm_config)
farm_lm_model = Path(pretrained_model_name_or_path) / "language_model.bin"
roberta.model = RobertaModel.from_pretrained(farm_lm_model, config=config, **kwargs)
roberta.language = roberta.model.config.language
else:
# Huggingface transformer Style
roberta.model = RobertaModel.from_pretrained(str(pretrained_model_name_or_path), **kwargs)
roberta.language = cls._get_or_infer_language_from_name(language, pretrained_model_name_or_path)
return roberta
def forward(
self,
input_ids,
segment_ids,
padding_mask,
**kwargs,
):
"""
Perform the forward pass of the Roberta model.
:param input_ids: The ids of each token in the input sequence. Is a tensor of shape [batch_size, max_seq_len]
:type input_ids: torch.Tensor
:param segment_ids: The id of the segment. For example, in next sentence prediction, the tokens in the
first sentence are marked with 0 and those in the second are marked with 1.
It is a tensor of shape [batch_size, max_seq_len]
:type segment_ids: torch.Tensor
:param padding_mask: A mask that assigns a 1 to valid input tokens and 0 to padding tokens
of shape [batch_size, max_seq_len]
:return: Embeddings for each token in the input sequence.
"""
output_tuple = self.model(
input_ids,
token_type_ids=segment_ids,
attention_mask=padding_mask,
)
if self.model.encoder.output_hidden_states == True:
sequence_output, pooled_output, all_hidden_states = output_tuple[0], output_tuple[1], output_tuple[2]
return sequence_output, pooled_output, all_hidden_states
else:
sequence_output, pooled_output = output_tuple[0], output_tuple[1]
return sequence_output, pooled_output
def enable_hidden_states_output(self):
self.model.encoder.output_hidden_states = True
def disable_hidden_states_output(self):
self.model.encoder.output_hidden_states = False
class XLMRoberta(LanguageModel):
"""
A roberta model that wraps the HuggingFace's implementation
(https://github.com/huggingface/transformers) to fit the LanguageModel class.
Paper: https://arxiv.org/abs/1907.11692
"""
def __init__(self):
super(XLMRoberta, self).__init__()
self.model = None
self.name = "xlm_roberta"
@classmethod
def load(cls, pretrained_model_name_or_path, language=None, **kwargs):
"""
Load a language model either by supplying
* the name of a remote model on s3 ("xlm-roberta-base" ...)
* or a local path of a model trained via transformers ("some_dir/huggingface_model")
* or a local path of a model trained via FARM ("some_dir/farm_model")
:param pretrained_model_name_or_path: name or path of a model
:param language: (Optional) Name of language the model was trained for (e.g. "german").
If not supplied, FARM will try to infer it from the model name.
:return: Language Model
"""
xlm_roberta = cls()
if "farm_lm_name" in kwargs:
xlm_roberta.name = kwargs["farm_lm_name"]
else:
xlm_roberta.name = pretrained_model_name_or_path
# We need to differentiate between loading model using FARM format and Pytorch-Transformers format
farm_lm_config = Path(pretrained_model_name_or_path) / "language_model_config.json"
if os.path.exists(farm_lm_config):
# FARM style
config = XLMRobertaConfig.from_pretrained(farm_lm_config)
farm_lm_model = Path(pretrained_model_name_or_path) / "language_model.bin"
xlm_roberta.model = XLMRobertaModel.from_pretrained(farm_lm_model, config=config, **kwargs)
xlm_roberta.language = xlm_roberta.model.config.language
else:
# Huggingface transformer Style
xlm_roberta.model = XLMRobertaModel.from_pretrained(str(pretrained_model_name_or_path), **kwargs)
xlm_roberta.language = cls._get_or_infer_language_from_name(language, pretrained_model_name_or_path)
return xlm_roberta
def forward(
self,
input_ids,
segment_ids,
padding_mask,
**kwargs,
):
"""
Perform the forward pass of the XLMRoberta model.
:param input_ids: The ids of each token in the input sequence. Is a tensor of shape [batch_size, max_seq_len]
:type input_ids: torch.Tensor
:param segment_ids: The id of the segment. For example, in next sentence prediction, the tokens in the
first sentence are marked with 0 and those in the second are marked with 1.
It is a tensor of shape [batch_size, max_seq_len]
:type segment_ids: torch.Tensor
:param padding_mask: A mask that assigns a 1 to valid input tokens and 0 to padding tokens
of shape [batch_size, max_seq_len]
:return: Embeddings for each token in the input sequence.
"""
output_tuple = self.model(
input_ids,
token_type_ids=segment_ids,
attention_mask=padding_mask,
)
if self.model.encoder.output_hidden_states == True:
sequence_output, pooled_output, all_hidden_states = output_tuple[0], output_tuple[1], output_tuple[2]
return sequence_output, pooled_output, all_hidden_states
else:
sequence_output, pooled_output = output_tuple[0], output_tuple[1]
return sequence_output, pooled_output
def enable_hidden_states_output(self):
self.model.encoder.output_hidden_states = True
def disable_hidden_states_output(self):
self.model.encoder.output_hidden_states = False
class DistilBert(LanguageModel):
"""
A DistilBERT model that wraps HuggingFace's implementation
(https://github.com/huggingface/transformers) to fit the LanguageModel class.
NOTE:
- DistilBert doesn’t have token_type_ids, you don’t need to indicate which
token belongs to which segment. Just separate your segments with the separation
token tokenizer.sep_token (or [SEP])
- Unlike the other BERT variants, DistilBert does not output the
pooled_output. An additional pooler is initialized.
"""
def __init__(self):
super(DistilBert, self).__init__()
self.model = None
self.name = "distilbert"
self.pooler = None
@classmethod
def load(cls, pretrained_model_name_or_path, language=None, **kwargs):
"""
Load a pretrained model by supplying
* the name of a remote model on s3 ("distilbert-base-german-cased" ...)
* OR a local path of a model trained via transformers ("some_dir/huggingface_model")
* OR a local path of a model trained via FARM ("some_dir/farm_model")
:param pretrained_model_name_or_path: The path of the saved pretrained model or its name.
:type pretrained_model_name_or_path: str
"""
distilbert = cls()
if "farm_lm_name" in kwargs:
distilbert.name = kwargs["farm_lm_name"]
else:
distilbert.name = pretrained_model_name_or_path
# We need to differentiate between loading model using FARM format and Pytorch-Transformers format
farm_lm_config = Path(pretrained_model_name_or_path) / "language_model_config.json"
if os.path.exists(farm_lm_config):
# FARM style
config = AlbertConfig.from_pretrained(farm_lm_config)
farm_lm_model = Path(pretrained_model_name_or_path) / "language_model.bin"
distilbert.model = DistilBertModel.from_pretrained(farm_lm_model, config=config, **kwargs)
distilbert.language = distilbert.model.config.language
else:
# Pytorch-transformer Style
distilbert.model = DistilBertModel.from_pretrained(str(pretrained_model_name_or_path), **kwargs)
distilbert.language = cls._get_or_infer_language_from_name(language, pretrained_model_name_or_path)
config = distilbert.model.config
# DistilBERT does not provide a pooled_output by default. Therefore, we need to initialize an extra pooler.
# The pooler takes the first hidden representation & feeds it to a dense layer of (hidden_dim x hidden_dim).
# We don't want a dropout in the end of the pooler, since we do that already in the adaptive model before we
# feed everything to the prediction head
config.summary_last_dropout = 0
config.summary_type = 'first'
config.summary_activation = 'tanh'
distilbert.pooler = SequenceSummary(config)
distilbert.pooler.apply(distilbert.model._init_weights)
return distilbert
def forward(
self,
input_ids,
padding_mask,
**kwargs,
):
"""
Perform the forward pass of the DistilBERT model.
:param input_ids: The ids of each token in the input sequence. Is a tensor of shape [batch_size, max_seq_len]
:type input_ids: torch.Tensor
:param padding_mask: A mask that assigns a 1 to valid input tokens and 0 to padding tokens
of shape [batch_size, max_seq_len]
:return: Embeddings for each token in the input sequence.
"""
output_tuple = self.model(
input_ids,
attention_mask=padding_mask,
)
# We need to manually aggregate that to get a pooled output (one vec per seq)
pooled_output = self.pooler(output_tuple[0])
if self.model.config.output_hidden_states == True:
sequence_output, all_hidden_states = output_tuple[0], output_tuple[1]
return sequence_output, pooled_output
else:
sequence_output = output_tuple[0]
return sequence_output, pooled_output
def enable_hidden_states_output(self):
self.model.config.output_hidden_states = True
def disable_hidden_states_output(self):
self.model.config.output_hidden_states = False
class XLNet(LanguageModel):
"""
A XLNet model that wraps the HuggingFace's implementation
(https://github.com/huggingface/transformers) to fit the LanguageModel class.
Paper: https://arxiv.org/abs/1906.08237
"""
def __init__(self):
super(XLNet, self).__init__()
self.model = None
self.name = "xlnet"
self.pooler = None
@classmethod
def load(cls, pretrained_model_name_or_path, language=None, **kwargs):
"""
Load a language model either by supplying
* the name of a remote model on s3 ("xlnet-base-cased" ...)
* or a local path of a model trained via transformers ("some_dir/huggingface_model")
* or a local path of a model trained via FARM ("some_dir/farm_model")
:param pretrained_model_name_or_path: name or path of a model
:param language: (Optional) Name of language the model was trained for (e.g. "german").
If not supplied, FARM will try to infer it from the model name.
:return: Language Model
"""
xlnet = cls()
if "farm_lm_name" in kwargs:
xlnet.name = kwargs["farm_lm_name"]
else:
xlnet.name = pretrained_model_name_or_path
# We need to differentiate between loading model using FARM format and Pytorch-Transformers format
farm_lm_config = Path(pretrained_model_name_or_path) / "language_model_config.json"
if os.path.exists(farm_lm_config):
# FARM style
config = XLNetConfig.from_pretrained(farm_lm_config)
farm_lm_model = Path(pretrained_model_name_or_path) / "language_model.bin"
xlnet.model = XLNetModel.from_pretrained(farm_lm_model, config=config, **kwargs)
xlnet.language = xlnet.model.config.language
else:
# Pytorch-transformer Style
xlnet.model = XLNetModel.from_pretrained(str(pretrained_model_name_or_path), **kwargs)
xlnet.language = cls._get_or_infer_language_from_name(language, pretrained_model_name_or_path)
config = xlnet.model.config
# XLNet does not provide a pooled_output by default. Therefore, we need to initialize an extra pooler.
# The pooler takes the last hidden representation & feeds it to a dense layer of (hidden_dim x hidden_dim).
# We don't want a dropout in the end of the pooler, since we do that already in the adaptive model before we
# feed everything to the prediction head
config.summary_last_dropout = 0
xlnet.pooler = SequenceSummary(config)
xlnet.pooler.apply(xlnet.model._init_weights)
return xlnet
def forward(
self,
input_ids,
segment_ids,
padding_mask,
**kwargs,
):
"""
Perform the forward pass of the XLNet model.
:param input_ids: The ids of each token in the input sequence. Is a tensor of shape [batch_size, max_seq_len]
:type input_ids: torch.Tensor
:param segment_ids: The id of the segment. For example, in next sentence prediction, the tokens in the
first sentence are marked with 0 and those in the second are marked with 1.
It is a tensor of shape [batch_size, max_seq_len]
:type segment_ids: torch.Tensor
:param padding_mask: A mask that assigns a 1 to valid input tokens and 0 to padding tokens
of shape [batch_size, max_seq_len]
:return: Embeddings for each token in the input sequence.
"""
# Note: XLNet has a couple of special input tensors for pretraining / text generation (perm_mask, target_mapping ...)
# We will need to implement them, if we wanna support LM adaptation
output_tuple = self.model(
input_ids,
token_type_ids=segment_ids,
attention_mask=padding_mask,
)
# XLNet also only returns the sequence_output (one vec per token)
# We need to manually aggregate that to get a pooled output (one vec per seq)
# TODO verify that this is really doing correct pooling
pooled_output = self.pooler(output_tuple[0])
if self.model.output_hidden_states == True:
sequence_output, all_hidden_states = output_tuple[0], output_tuple[1]
return sequence_output, pooled_output, all_hidden_states
else:
sequence_output = output_tuple[0]
return sequence_output, pooled_output
def enable_hidden_states_output(self):
self.model.output_hidden_states = True
def disable_hidden_states_output(self):
self.model.output_hidden_states = False
class EmbeddingConfig():
"""
Config for Word Embeddings Models.
Necessary to work with Bert and other LM style functionality
"""
def __init__(self,
name=None,
embeddings_filename=None,
vocab_filename=None,
vocab_size=None,
hidden_size=None,
language=None,
**kwargs):
"""
:param name: Name of config
:param embeddings_filename:
:param vocab_filename:
:param vocab_size:
:param hidden_size:
:param language:
:param kwargs:
"""
self.name = name
self.embeddings_filename = embeddings_filename
self.vocab_filename = vocab_filename
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.language = language
if len(kwargs) > 0:
logger.info(f"Passed unused params {str(kwargs)} to the EmbeddingConfig. Might not be a problem.")
def to_dict(self):
"""
Serializes this instance to a Python dictionary.
Returns:
:obj:`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
"""
output = copy.deepcopy(self.__dict__)
if hasattr(self.__class__, "model_type"):
output["model_type"] = self.__class__.model_type
return output
def to_json_string(self):
"""
Serializes this instance to a JSON string.
Returns:
:obj:`string`: String containing all the attributes that make up this configuration instance in JSON format.
"""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class EmbeddingModel():
"""
Embedding Model that combines
- Embeddings
- Config Object
- Vocab
Necessary to work with Bert and other LM style functionality
"""
def __init__(self,
embedding_file,
config_dict,
vocab_file):
"""
:param embedding_file: filename of embeddings. Usually in txt format, with the word and associated vector on each line
:type embedding_file: str
:param config_dict: dictionary containing config elements
:type config_dict: dict
:param vocab_file: filename of vocab, each line contains a word
:type vocab_file: str
"""
self.config = EmbeddingConfig(**config_dict)
self.vocab = load_vocab(vocab_file)
temp = wordembedding_utils.load_embedding_vectors(embedding_file=embedding_file, vocab=self.vocab)
self.embeddings = torch.from_numpy(temp).float()
assert "[UNK]" in self.vocab, "No [UNK] symbol in Wordembeddingmodel! Aborting"
self.unk_idx = self.vocab["[UNK]"]
def save(self,save_dir):
# Save Weights
save_name = Path(save_dir) / self.config.embeddings_filename
embeddings = self.embeddings.cpu().numpy()
with open(save_name, "w") as f:
for w, vec in tqdm(zip(self.vocab, embeddings), desc="Saving embeddings", total=embeddings.shape[0]):
f.write(w + " " + " ".join(["%.6f" % v for v in vec]) + "\n")
f.close()
# Save vocab
save_name = Path(save_dir) / self.config.vocab_filename
with open(save_name, "w") as f:
for w in self.vocab:
f.write(w + "\n")
f.close()
def resize_token_embeddings(self, new_num_tokens=None):
# function is called as a vocab length validation inside FARM
# fast way of returning an object with num_embeddings attribute (needed for some checks)
# TODO add functionality to add words/tokens to a wordembeddingmodel after initialization
temp = {}
temp["num_embeddings"] = len(self.vocab)
temp = DotMap(temp)
return temp
class WordEmbedding_LM(LanguageModel):
"""
A Language Model based only on word embeddings
- Inside FARM, WordEmbedding Language Models must have a fixed vocabulary
- Each (known) word in some text input is projected to its vector representation
- Pooling operations can be applied for representing whole text sequences
"""
def __init__(self):
super(WordEmbedding_LM, self).__init__()
self.model = None
self.name = "WordEmbedding_LM"
self.pooler = None
@classmethod
def load(cls, pretrained_model_name_or_path, language=None, **kwargs):
"""
Load a language model either by supplying
* a local path of a model trained via FARM ("some_dir/farm_model")
* the name of a remote model on s3
:param pretrained_model_name_or_path: name or path of a model
:param language: (Optional) Name of language the model was trained for (e.g. "german").
If not supplied, FARM will try to infer it from the model name.
:return: Language Model
"""
wordembedding_LM = cls()
if "farm_lm_name" in kwargs:
wordembedding_LM.name = kwargs["farm_lm_name"]
else:
wordembedding_LM.name = pretrained_model_name_or_path
# We need to differentiate between loading model from local or remote
farm_lm_config = Path(pretrained_model_name_or_path) / "language_model_config.json"
if os.path.exists(farm_lm_config):
# local dir
config = json.load(open(farm_lm_config,"r"))
farm_lm_model = Path(pretrained_model_name_or_path) / config["embeddings_filename"]
vocab_filename = Path(pretrained_model_name_or_path) / config["vocab_filename"]
wordembedding_LM.model = EmbeddingModel(embedding_file=str(farm_lm_model), config_dict=config, vocab_file=str(vocab_filename))
wordembedding_LM.language = config.get("language", None)
else:
# from remote or cache
config_dict, resolved_vocab_file, resolved_model_file = wordembedding_utils.load_model(pretrained_model_name_or_path, **kwargs)
model = EmbeddingModel(embedding_file=resolved_model_file,
config_dict=config_dict,
vocab_file=resolved_vocab_file)
wordembedding_LM.model = model
wordembedding_LM.language = model.config.language
# taking the mean for getting the pooled representation
# TODO: extend this to other pooling operations or remove
wordembedding_LM.pooler = lambda x: torch.mean(x, dim=0)
return wordembedding_LM
def save(self, save_dir):
"""
Save the model embeddings and its config file so that it can be loaded again.
# TODO make embeddings trainable and save trained embeddings
# TODO save model weights as pytorch model bin for more efficient loading and saving
:param save_dir: The directory in which the model should be saved.
:type save_dir: str
"""
#save model
self.model.save(save_dir=save_dir)
#save config
self.save_config(save_dir=save_dir)
def forward(self, input_ids, **kwargs,):
"""
Perform the forward pass of the wordembedding model.
This is just the mapping of words to their corresponding embeddings
"""
sequence_output = []
pooled_output = []
# TODO do not use padding items in pooled output
for sample in input_ids:
sample_embeddings = []
for index in sample:
#if index != self.model.unk_idx:
sample_embeddings.append(self.model.embeddings[index])
sample_embeddings = torch.stack(sample_embeddings)
sequence_output.append(sample_embeddings)
pooled_output.append(self.pooler(sample_embeddings))
sequence_output = torch.stack(sequence_output)
pooled_output = torch.stack(pooled_output)
m = nn.BatchNorm1d(pooled_output.shape[1])
# use batchnorm for more stable learning
# but disable it, if we have batch size of one (cannot compute batchnorm stats with only one sample)
if pooled_output.shape[0] > 1:
pooled_output = m(pooled_output)
return sequence_output, pooled_output
def trim_vocab(self, token_counts, processor, min_threshold):
""" Remove embeddings for rare tokens in your corpus (< `min_threshold` occurrences) to reduce model size"""
logger.info(f"Removing tokens with less than {min_threshold} occurrences from model vocab")
new_vocab = OrderedDict()
valid_tok_indices = []
cnt = 0
old_num_emb = self.model.embeddings.shape[0]
for token, tok_idx in self.model.vocab.items():
if token_counts.get(token, 0) >= min_threshold or token in ("[CLS]","[SEP]","[UNK]","[PAD]","[MASK]"):
new_vocab[token] = cnt
valid_tok_indices.append(tok_idx)
cnt += 1
self.model.vocab = new_vocab
self.model.embeddings = self.model.embeddings[valid_tok_indices, :]
# update tokenizer vocab in place
processor.tokenizer.vocab = self.model.vocab
processor.tokenizer.ids_to_tokens = OrderedDict()
for k, v in processor.tokenizer.vocab.items():
processor.tokenizer.ids_to_tokens[v] = k
logger.info(f"Reduced vocab from {old_num_emb} to {self.model.embeddings.shape[0]}")
def normalize_embeddings(self, zero_mean=True, pca_removal=False, pca_n_components=300, pca_n_top_components=10,
use_mean_vec_for_special_tokens=True, n_special_tokens=5):
""" Normalize word embeddings as in https://arxiv.org/pdf/1808.06305.pdf
(e.g. used for S3E Pooling of sentence embeddings)
:param zero_mean: Whether to center embeddings via subtracting mean
:type zero_mean: bool
:param pca_removal: Whether to remove PCA components
:type pca_removal: bool
:param pca_n_components: Number of PCA components to use for fitting
:type pca_n_components: int
:param pca_n_top_components: Number of PCA components to remove
:type pca_n_top_components: int
:param use_mean_vec_for_special_tokens: Whether to replace embedding of special tokens with the mean embedding
:type use_mean_vec_for_special_tokens: bool
:param n_special_tokens: Number of special tokens like CLS, UNK etc. (used if `use_mean_vec_for_special_tokens`).
Note: We expect the special tokens to be the first `n_special_tokens` entries of the vocab.
:type n_special_tokens: int
:return: None
"""
if zero_mean:
logger.info('Removing mean from embeddings')
# self.model.embeddings[:n_special_tokens, :] = torch.zeros((n_special_tokens, 300))
mean_vec = torch.mean(self.model.embeddings, 0)
self.model.embeddings = self.model.embeddings - mean_vec
if use_mean_vec_for_special_tokens:
self.model.embeddings[:n_special_tokens, :] = mean_vec
if pca_removal:
from sklearn.decomposition import PCA
logger.info('Removing projections on top PCA components from embeddings (see https://arxiv.org/pdf/1808.06305.pdf)')
pca = PCA(n_components=pca_n_components)
pca.fit(self.model.embeddings.cpu().numpy())
U1 = pca.components_
explained_variance = pca.explained_variance_
# Removing projections on top components
PVN_dims = pca_n_top_components
for emb_idx in tqdm(range(self.model.embeddings.shape[0]), desc="Removing projections"):
for pca_idx, u in enumerate(U1[0:PVN_dims]):
ratio = (explained_variance[pca_idx] - explained_variance[PVN_dims]) / explained_variance[pca_idx]
self.model.embeddings[emb_idx] = self.model.embeddings[emb_idx] - ratio * np.dot(u.transpose(), self.model.embeddings[emb_idx]) * u
class Electra(LanguageModel):
"""
ELECTRA is a new pre-training approach which trains two transformer models:
the generator and the discriminator. The generator replaces tokens in a sequence,
and is therefore trained as a masked language model. The discriminator, which is
the model we're interested in, tries to identify which tokens were replaced by
the generator in the sequence.
The ELECTRA model here wraps HuggingFace's implementation
(https://github.com/huggingface/transformers) to fit the LanguageModel class.
NOTE:
- Electra does not output the pooled_output. An additional pooler is initialized.
"""
def __init__(self):
super(Electra, self).__init__()
self.model = None
self.name = "electra"
self.pooler = None
@classmethod
def load(cls, pretrained_model_name_or_path, language=None, **kwargs):
"""
Load a pretrained model by supplying
* the name of a remote model on s3 ("google/electra-base-discriminator" ...)
* OR a local path of a model trained via transformers ("some_dir/huggingface_model")
* OR a local path of a model trained via FARM ("some_dir/farm_model")
:param pretrained_model_name_or_path: The path of the saved pretrained model or its name.
:type pretrained_model_name_or_path: str
"""
electra = cls()
if "farm_lm_name" in kwargs:
electra.name = kwargs["farm_lm_name"]
else:
electra.name = pretrained_model_name_or_path
# We need to differentiate between loading model using FARM format and Transformers format
farm_lm_config = Path(pretrained_model_name_or_path) / "language_model_config.json"
if os.path.exists(farm_lm_config):
# FARM style
config = ElectraConfig.from_pretrained(farm_lm_config)
farm_lm_model = Path(pretrained_model_name_or_path) / "language_model.bin"
electra.model = ElectraModel.from_pretrained(farm_lm_model, config=config, **kwargs)
electra.language = electra.model.config.language
else:
# Transformers Style
electra.model = ElectraModel.from_pretrained(str(pretrained_model_name_or_path), **kwargs)
electra.language = cls._get_or_infer_language_from_name(language, pretrained_model_name_or_path)
config = electra.model.config
# ELECTRA does not provide a pooled_output by default. Therefore, we need to initialize an extra pooler.
# The pooler takes the first hidden representation & feeds it to a dense layer of (hidden_dim x hidden_dim).
# We don't want a dropout in the end of the pooler, since we do that already in the adaptive model before we
# feed everything to the prediction head.
# Note: ELECTRA uses gelu as activation (BERT uses tanh instead)
config.summary_last_dropout = 0
config.summary_type = 'first'
config.summary_activation = 'gelu'
electra.pooler = SequenceSummary(config)
electra.pooler.apply(electra.model._init_weights)
return electra
def forward(
self,
input_ids,
segment_ids,
padding_mask,
**kwargs,
):
"""
Perform the forward pass of the ELECTRA model.
:param input_ids: The ids of each token in the input sequence. Is a tensor of shape [batch_size, max_seq_len]
:type input_ids: torch.Tensor
:param padding_mask: A mask that assigns a 1 to valid input tokens and 0 to padding tokens
of shape [batch_size, max_seq_len]
:return: Embeddings for each token in the input sequence.
"""
output_tuple = self.model(
input_ids,
token_type_ids=segment_ids,
attention_mask=padding_mask,
)
# We need to manually aggregate that to get a pooled output (one vec per seq)
pooled_output = self.pooler(output_tuple[0])
if self.model.config.output_hidden_states == True:
sequence_output, all_hidden_states = output_tuple[0], output_tuple[1]
return sequence_output, pooled_output
else:
sequence_output = output_tuple[0]
return sequence_output, pooled_output
def enable_hidden_states_output(self):
self.model.config.output_hidden_states = True
def disable_hidden_states_output(self):
self.model.config.output_hidden_states = False
class Camembert(Roberta):
"""
A Camembert model that wraps the HuggingFace's implementation
(https://github.com/huggingface/transformers) to fit the LanguageModel class.
"""
def __init__(self):
super(Camembert, self).__init__()
self.model = None
self.name = "camembert"
@classmethod
def load(cls, pretrained_model_name_or_path, language=None, **kwargs):
"""
Load a language model either by supplying
* the name of a remote model on s3 ("camembert-base" ...)
* or a local path of a model trained via transformers ("some_dir/huggingface_model")
* or a local path of a model trained via FARM ("some_dir/farm_model")
:param pretrained_model_name_or_path: name or path of a model
:param language: (Optional) Name of language the model was trained for (e.g. "german").
If not supplied, FARM will try to infer it from the model name.
:return: Language Model
"""
camembert = cls()
if "farm_lm_name" in kwargs:
camembert.name = kwargs["farm_lm_name"]
else:
camembert.name = pretrained_model_name_or_path
# We need to differentiate between loading model using FARM format and Pytorch-Transformers format
farm_lm_config = Path(pretrained_model_name_or_path) / "language_model_config.json"
if os.path.exists(farm_lm_config):
# FARM style
config = CamembertConfig.from_pretrained(farm_lm_config)
farm_lm_model = Path(pretrained_model_name_or_path) / "language_model.bin"
camembert.model = CamembertModel.from_pretrained(farm_lm_model, config=config, **kwargs)
camembert.language = camembert.model.config.language
else:
# Huggingface transformer Style
camembert.model = CamembertModel.from_pretrained(str(pretrained_model_name_or_path), **kwargs)
camembert.language = cls._get_or_infer_language_from_name(language, pretrained_model_name_or_path)
return camembert
| [
"torch.stack",
"torch.from_numpy",
"torch.nn.BatchNorm1d",
"torch.mean"
] | 1.5.0 | bvanaken/FARM | 09767092457e73860c3a604b5060562c2004f03d |
1.4 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
r"""
The pyro.infer.autoguide.initialization module contains initialization functions for
automatic guides.
The standard interface for initialization is a function that inputs a Pyro
trace ``site`` dict and returns an appropriately sized ``value`` to serve
as an initial constrained value for a guide estimate.
"""
import torch
from torch.distributions import transform_to
from pyro.distributions.torch import Independent
from pyro.distributions.torch_distribution import MaskedDistribution
from pyro.infer.util import is_validation_enabled
from pyro.poutine.messenger import Messenger
from pyro.util import torch_isnan
def _is_multivariate(d):
while isinstance(d, (Independent, MaskedDistribution)):
d = d.base_dist
return any(size > 1 for size in d.event_shape)
def init_to_feasible(site):
"""
Initialize to an arbitrary feasible point, ignoring distribution
parameters.
"""
value = site["fn"].sample().detach()
t = transform_to(site["fn"].support)
return t(torch.zeros_like(t.inv(value)))
def init_to_sample(site):
"""
Initialize to a random sample from the prior.
"""
return site["fn"].sample().detach()
def init_to_median(site, num_samples=15):
"""
Initialize to the prior median; fallback to a feasible point if median is
undefined.
"""
# The median undefined for multivariate distributions.
if _is_multivariate(site["fn"]):
return init_to_feasible(site)
try:
# Try to compute empirical median.
samples = site["fn"].sample(sample_shape=(num_samples,))
value = samples.median(dim=0)[0]
if torch_isnan(value):
raise ValueError
if hasattr(site["fn"], "_validate_sample"):
site["fn"]._validate_sample(value)
return value
except (RuntimeError, ValueError):
# Fall back to feasible point.
return init_to_feasible(site)
def init_to_mean(site):
"""
Initialize to the prior mean; fallback to median if mean is undefined.
"""
try:
# Try .mean() method.
value = site["fn"].mean.detach()
if torch_isnan(value):
raise ValueError
if hasattr(site["fn"], "_validate_sample"):
site["fn"]._validate_sample(value)
return value
except (NotImplementedError, ValueError):
# Fall back to a median.
# This is requred for distributions with infinite variance, e.g. Cauchy.
return init_to_median(site)
class InitMessenger(Messenger):
"""
Initializes a site by replacing ``.sample()`` calls with values
drawn from an initialization strategy. This is mainly for internal use by
autoguide classes.
:param callable init_fn: An initialization function.
"""
def __init__(self, init_fn):
self.init_fn = init_fn
super(InitMessenger, self).__init__()
def _pyro_sample(self, msg):
if msg["done"] or msg["is_observed"] or type(msg["fn"]).__name__ == "_Subsample":
return
with torch.no_grad():
value = self.init_fn(msg)
if is_validation_enabled() and msg["value"] is not None:
if not isinstance(value, type(msg["value"])):
raise ValueError(
"{} provided invalid type for site {}:\nexpected {}\nactual {}"
.format(self.init_fn, msg["name"], type(msg["value"]), type(value)))
if value.shape != msg["value"].shape:
raise ValueError(
"{} provided invalid shape for site {}:\nexpected {}\nactual {}"
.format(self.init_fn, msg["name"], msg["value"].shape, value.shape))
msg["value"] = value
msg["done"] = True
| [
"torch.no_grad",
"torch.distributions.transform_to"
] | 1.4.0 | ludkinm/pyro | d24c808a9d86d79c43a99990fe9e418ce5976613 |
1.4 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
"""
An example to use Pyro Gaussian Process module to classify MNIST and binary MNIST.
Follow the idea from reference [1], we will combine a convolutional neural network
(CNN) with a RBF kernel to create a "deep" kernel:
>>> deep_kernel = gp.kernels.Warping(rbf, iwarping_fn=cnn)
SparseVariationalGP model allows us train the data in mini-batch (time complexity
scales linearly to the number of data points).
Note that the implementation here is different from [1]. There the authors
use CNN as a feature extraction layer, then add a Gaussian Process layer on the
top of CNN. Hence, their inducing points lie in the space of extracted features.
Here we join CNN module and RBF kernel together to make it a deep kernel.
Hence, our inducing points lie in the space of original images.
After 16 epochs with default hyperparameters, the accuaracy of 10-class MNIST
is 98.45% and the accuaracy of binary MNIST is 99.41%.
Reference:
[1] Stochastic Variational Deep Kernel Learning
Andrew G. Wilson, Zhiting Hu, Ruslan R. Salakhutdinov, Eric P. Xing
"""
# Code adapted from https://github.com/pytorch/examples/tree/master/mnist
import argparse
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
import pyro
import pyro.contrib.gp as gp
import pyro.infer as infer
from pyro.contrib.examples.util import get_data_loader, get_data_directory
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2(x), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def train(args, train_loader, gpmodule, optimizer, loss_fn, epoch):
for batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
if args.binary:
target = (target % 2).float() # convert numbers 0->9 to 0 or 1
gpmodule.set_data(data, target)
optimizer.zero_grad()
loss = loss_fn(gpmodule.model, gpmodule.guide)
loss.backward()
optimizer.step()
batch_idx = batch_idx + 1
if batch_idx % args.log_interval == 0:
print("Train Epoch: {:2d} [{:5d}/{} ({:2.0f}%)]\tLoss: {:.6f}"
.format(epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss))
def test(args, test_loader, gpmodule):
correct = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
if args.binary:
target = (target % 2).float() # convert numbers 0->9 to 0 or 1
# get prediction of GP model on new data
f_loc, f_var = gpmodule(data)
# use its likelihood to give prediction class
pred = gpmodule.likelihood(f_loc, f_var)
# compare prediction and target to count accuaracy
correct += pred.eq(target).long().cpu().sum().item()
print("\nTest set: Accuracy: {}/{} ({:.2f}%)\n"
.format(correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset)))
def main(args):
data_dir = args.data_dir if args.data_dir is not None else get_data_directory(__file__)
train_loader = get_data_loader(dataset_name='MNIST',
data_dir=data_dir,
batch_size=args.batch_size,
dataset_transforms=[transforms.Normalize((0.1307,), (0.3081,))],
is_training_set=True,
shuffle=True)
test_loader = get_data_loader(dataset_name='MNIST',
data_dir=data_dir,
batch_size=args.test_batch_size,
dataset_transforms=[transforms.Normalize((0.1307,), (0.3081,))],
is_training_set=False,
shuffle=False)
if args.cuda:
train_loader.num_workers = 1
test_loader.num_workers = 1
cnn = CNN()
# Create deep kernel by warping RBF with CNN.
# CNN will transform a high dimension image into a low dimension 2D tensors for RBF kernel.
# This kernel accepts inputs are inputs of CNN and gives outputs are covariance matrix of RBF
# on outputs of CNN.
rbf = gp.kernels.RBF(input_dim=10, lengthscale=torch.ones(10))
deep_kernel = gp.kernels.Warping(rbf, iwarping_fn=cnn)
# init inducing points (taken randomly from dataset)
batches = []
for i, (data, _) in enumerate(train_loader):
batches.append(data)
if i >= ((args.num_inducing - 1) // args.batch_size):
break
Xu = torch.cat(batches)[:args.num_inducing].clone()
if args.binary:
likelihood = gp.likelihoods.Binary()
latent_shape = torch.Size([])
else:
# use MultiClass likelihood for 10-class classification problem
likelihood = gp.likelihoods.MultiClass(num_classes=10)
# Because we use Categorical distribution in MultiClass likelihood, we need GP model
# returns a list of probabilities of each class. Hence it is required to use
# latent_shape = 10.
latent_shape = torch.Size([10])
# Turns on "whiten" flag will help optimization for variational models.
gpmodule = gp.models.VariationalSparseGP(X=Xu, y=None, kernel=deep_kernel, Xu=Xu,
likelihood=likelihood, latent_shape=latent_shape,
num_data=60000, whiten=True, jitter=2e-6)
if args.cuda:
gpmodule.cuda()
optimizer = torch.optim.Adam(gpmodule.parameters(), lr=args.lr)
elbo = infer.JitTraceMeanField_ELBO() if args.jit else infer.TraceMeanField_ELBO()
loss_fn = elbo.differentiable_loss
for epoch in range(1, args.epochs + 1):
start_time = time.time()
train(args, train_loader, gpmodule, optimizer, loss_fn, epoch)
with torch.no_grad():
test(args, test_loader, gpmodule)
print("Amount of time spent for epoch {}: {}s\n"
.format(epoch, int(time.time() - start_time)))
if __name__ == '__main__':
assert pyro.__version__.startswith('1.1.0')
parser = argparse.ArgumentParser(description='Pyro GP MNIST Example')
parser.add_argument('--data-dir', type=str, default=None, metavar='PATH',
help='default directory to cache MNIST data')
parser.add_argument('--num-inducing', type=int, default=70, metavar='N',
help='number of inducing input (default: 70)')
parser.add_argument('--binary', action='store_true', default=False,
help='do binary classification')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--jit', action='store_true', default=False,
help='enables PyTorch jit')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
args = parser.parse_args()
pyro.set_rng_seed(args.seed)
if args.cuda:
torch.backends.cudnn.deterministic = True
main(args)
| [
"torch.nn.Linear",
"torch.Size",
"torch.cat",
"torch.no_grad",
"torch.ones",
"torch.nn.Conv2d"
] | 1.4.0 | ludkinm/pyro | d24c808a9d86d79c43a99990fe9e418ce5976613 |
1.4 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import argparse
import torch
from torch.distributions import constraints
import pyro
import pyro.distributions as dist
from pyro.contrib.autoname import named
from pyro.infer import SVI, JitTrace_ELBO, Trace_ELBO
from pyro.optim import Adam
# This is a simple gaussian mixture model.
#
# The example demonstrates how to pass named.Objects() from a global model to
# a local model implemented as a helper function.
def model(data, k):
latent = named.Object("latent")
# Create parameters for a Gaussian mixture model.
latent.probs.param_(torch.ones(k) / k, constraint=constraints.simplex)
latent.locs.param_(torch.zeros(k))
latent.scales.param_(torch.ones(k), constraint=constraints.positive)
# Observe all the data. We pass a local latent in to the local_model.
latent.local = named.List()
for x in data:
local_model(latent.local.add(), latent.probs, latent.locs, latent.scales, obs=x)
def local_model(latent, ps, locs, scales, obs=None):
i = latent.id.sample_(dist.Categorical(ps))
return latent.x.sample_(dist.Normal(locs[i], scales[i]), obs=obs)
def guide(data, k):
latent = named.Object("latent")
latent.local = named.List()
for x in data:
# We pass a local latent in to the local_guide.
local_guide(latent.local.add(), k)
def local_guide(latent, k):
# The local guide simply guesses category assignments.
latent.probs.param_(torch.ones(k) / k, constraint=constraints.positive)
latent.id.sample_(dist.Categorical(latent.probs))
def main(args):
pyro.set_rng_seed(0)
pyro.enable_validation(__debug__)
optim = Adam({"lr": 0.1})
elbo = JitTrace_ELBO() if args.jit else Trace_ELBO()
inference = SVI(model, guide, optim, loss=elbo)
data = torch.tensor([0.0, 1.0, 2.0, 20.0, 30.0, 40.0])
k = 2
print('Step\tLoss')
loss = 0.0
for step in range(args.num_epochs):
if step and step % 10 == 0:
print('{}\t{:0.5g}'.format(step, loss))
loss = 0.0
loss += inference.step(data, k=k)
print('Parameters:')
for name, value in sorted(pyro.get_param_store().items()):
print('{} = {}'.format(name, value.detach().cpu().numpy()))
if __name__ == '__main__':
assert pyro.__version__.startswith('1.1.0')
parser = argparse.ArgumentParser(description="parse args")
parser.add_argument('-n', '--num-epochs', default=200, type=int)
parser.add_argument('--jit', action='store_true')
args = parser.parse_args()
main(args)
| [
"torch.zeros",
"torch.tensor",
"torch.ones"
] | 1.4.0 | ludkinm/pyro | d24c808a9d86d79c43a99990fe9e418ce5976613 |
1.8 | import torch
import torch.nn as nn
class MNIST(nn.Module):
def __init__(self):
super(MNIST, self).__init__()
self.shared_encoder = torch.nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
nn.Flatten()
)
self.private_encoder = torch.nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
nn.Flatten()
)
self.clf = torch.nn.Sequential(
torch.nn.Linear(64*7*7*2, 512), # 乘2因为global_feat和local_feat拼在一起
torch.nn.ReLU(inplace=True),
torch.nn.Linear(512, 10)
)
def forward(self, x):
gFeature = self.shared_encoder(x)
lFeature = self.private_encoder(x)
feature = torch.cat((gFeature, lFeature), dim=-1)
output = self.clf(feature)
return output
if __name__ == '__main__':
model = MNIST()
_x = torch.rand((50, 1, 28, 28))
_output = model(_x)
print(f'{_x.shape}->{_output.shape}')
print("Parameters in total {}".format(sum(x.numel() for x in model.parameters())))
print("Comm.")
total = 0
for key, param in model.named_parameters():
if key.startswith('shared'):
total += param.numel()
print("Comm. Parameters {}".format(total)) | [
"torch.nn.Linear",
"torch.rand",
"torch.cat",
"torch.nn.MaxPool2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.Flatten"
] | 1.8.1 | tdye24/LightningFL | 48bb4a452082411e051cdb3a2e98ede6bbc91bbf |
1.7 | import torch
def rel_positions_grid(grid_sizes):
"""Generates a flattened grid of (x,y,...) coordinates in a range of -1 to 1.
sidelen: int
dim: int
"""
tensors = []
for size in grid_sizes:
tensors.append(torch.linspace(-1, 1, steps=size))
# tensors = tuple(dim * [torch.linspace(-1, 1, steps=grid_length)])
relpos_grid = torch.stack(torch.meshgrid(*tensors), dim=-0)
return relpos_grid
| [
"torch.meshgrid",
"torch.linspace"
] | 1.7.1 | boczekbartek/flexconv | 610b5be3a846bcc1436275daaad89482b6b8e7cc |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.