repo
stringlengths
1
99
file
stringlengths
13
215
code
stringlengths
12
59.2M
file_length
int64
12
59.2M
avg_line_length
float64
3.82
1.48M
max_line_length
int64
12
2.51M
extension_type
stringclasses
1 value
RegularizedBN
RegularizedBN-main/examples/speech_recognition/data/asr_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import numpy as np from fairseq.data import FairseqDataset from . import data_utils from .collaters import Seq2SeqCollater class AsrDataset(FairseqDataset): """ A dataset representing speech and corresponding transcription. Args: aud_paths: (List[str]): A list of str with paths to audio files. aud_durations_ms (List[int]): A list of int containing the durations of audio files. tgt (List[torch.LongTensor]): A list of LongTensors containing the indices of target transcriptions. tgt_dict (~fairseq.data.Dictionary): target vocabulary. ids (List[str]): A list of utterance IDs. speakers (List[str]): A list of speakers corresponding to utterances. num_mel_bins (int): Number of triangular mel-frequency bins (default: 80) frame_length (float): Frame length in milliseconds (default: 25.0) frame_shift (float): Frame shift in milliseconds (default: 10.0) """ def __init__( self, aud_paths, aud_durations_ms, tgt, tgt_dict, ids, speakers, num_mel_bins=80, frame_length=25.0, frame_shift=10.0 ): assert frame_length > 0 assert frame_shift > 0 assert all(x > frame_length for x in aud_durations_ms) self.frame_sizes = [ int(1 + (d - frame_length) / frame_shift) for d in aud_durations_ms ] assert len(aud_paths) > 0 assert len(aud_paths) == len(aud_durations_ms) assert len(aud_paths) == len(tgt) assert len(aud_paths) == len(ids) assert len(aud_paths) == len(speakers) self.aud_paths = aud_paths self.tgt_dict = tgt_dict self.tgt = tgt self.ids = ids self.speakers = speakers self.num_mel_bins = num_mel_bins self.frame_length = frame_length self.frame_shift = frame_shift self.s2s_collater = Seq2SeqCollater( 0, 1, pad_index=self.tgt_dict.pad(), eos_index=self.tgt_dict.eos(), move_eos_to_beginning=True ) def __getitem__(self, index): import torchaudio import torchaudio.compliance.kaldi as kaldi tgt_item = self.tgt[index] if self.tgt is not None else None path = self.aud_paths[index] if not os.path.exists(path): raise FileNotFoundError("Audio file not found: {}".format(path)) sound, sample_rate = torchaudio.load_wav(path) output = kaldi.fbank( sound, num_mel_bins=self.num_mel_bins, frame_length=self.frame_length, frame_shift=self.frame_shift ) output_cmvn = data_utils.apply_mv_norm(output) return {"id": index, "data": [output_cmvn.detach(), tgt_item]} def __len__(self): return len(self.aud_paths) def collater(self, samples): """Merge a list of samples to form a mini-batch. Args: samples (List[int]): sample indices to collate Returns: dict: a mini-batch suitable for forwarding with a Model """ return self.s2s_collater.collate(samples) def num_tokens(self, index): return self.frame_sizes[index] def size(self, index): """Return an example's size as a float or tuple. This value is used when filtering a dataset with ``--max-positions``.""" return ( self.frame_sizes[index], len(self.tgt[index]) if self.tgt is not None else 0, ) def ordered_indices(self): """Return an ordered list of indices. Batches will be constructed based on this order.""" return np.arange(len(self))
3,870
33.5625
82
py
RegularizedBN
RegularizedBN-main/examples/speech_recognition/tasks/speech_recognition.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import json import os import re import sys import torch from fairseq.data import Dictionary from fairseq.tasks import FairseqTask, register_task from examples.speech_recognition.data import AsrDataset from examples.speech_recognition.data.replabels import replabel_symbol def get_asr_dataset_from_json(data_json_path, tgt_dict): """ Parse data json and create dataset. See scripts/asr_prep_json.py which pack json from raw files Json example: { "utts": { "4771-29403-0025": { "input": { "length_ms": 170, "path": "/tmp/file1.flac" }, "output": { "text": "HELLO \n", "token": "HE LLO", "tokenid": "4815, 861" } }, "1564-142299-0096": { ... } } """ if not os.path.isfile(data_json_path): raise FileNotFoundError("Dataset not found: {}".format(data_json_path)) with open(data_json_path, "rb") as f: data_samples = json.load(f)["utts"] assert len(data_samples) != 0 sorted_samples = sorted( data_samples.items(), key=lambda sample: int(sample[1]["input"]["length_ms"]), reverse=True, ) aud_paths = [s[1]["input"]["path"] for s in sorted_samples] ids = [s[0] for s in sorted_samples] speakers = [] for s in sorted_samples: m = re.search("(.+?)-(.+?)-(.+?)", s[0]) speakers.append(m.group(1) + "_" + m.group(2)) frame_sizes = [s[1]["input"]["length_ms"] for s in sorted_samples] tgt = [ [int(i) for i in s[1]["output"]["tokenid"].split(", ")] for s in sorted_samples ] # append eos tgt = [[*t, tgt_dict.eos()] for t in tgt] return AsrDataset(aud_paths, frame_sizes, tgt, tgt_dict, ids, speakers) @register_task("speech_recognition") class SpeechRecognitionTask(FairseqTask): """ Task for training speech recognition model. """ @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" parser.add_argument("data", help="path to data directory") parser.add_argument( "--silence-token", default="\u2581", help="token for silence (used by w2l)" ) parser.add_argument('--max-source-positions', default=sys.maxsize, type=int, metavar='N', help='max number of frames in the source sequence') parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N', help='max number of tokens in the target sequence') def __init__(self, args, tgt_dict): super().__init__(args) self.tgt_dict = tgt_dict @classmethod def setup_task(cls, args, **kwargs): """Setup the task (e.g., load dictionaries).""" dict_path = os.path.join(args.data, "dict.txt") if not os.path.isfile(dict_path): raise FileNotFoundError("Dict not found: {}".format(dict_path)) tgt_dict = Dictionary.load(dict_path) if args.criterion == "ctc_loss": tgt_dict.add_symbol("<ctc_blank>") elif args.criterion == "asg_loss": for i in range(1, args.max_replabel + 1): tgt_dict.add_symbol(replabel_symbol(i)) print("| dictionary: {} types".format(len(tgt_dict))) return cls(args, tgt_dict) def load_dataset(self, split, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ data_json_path = os.path.join(self.args.data, "{}.json".format(split)) self.datasets[split] = get_asr_dataset_from_json(data_json_path, self.tgt_dict) def build_generator(self, models, args): w2l_decoder = getattr(args, "w2l_decoder", None) if w2l_decoder == "viterbi": from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder return W2lViterbiDecoder(args, self.target_dictionary) elif w2l_decoder == "kenlm": from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder return W2lKenLMDecoder(args, self.target_dictionary) else: return super().build_generator(models, args) @property def target_dictionary(self): """Return the :class:`~fairseq.data.Dictionary` for the language model.""" return self.tgt_dict @property def source_dictionary(self): """Return the source :class:`~fairseq.data.Dictionary` (if applicable for this task).""" return None def max_positions(self): """Return the max speech and sentence length allowed by the task.""" return (self.args.max_source_positions, self.args.max_target_positions)
5,094
34.381944
97
py
RegularizedBN
RegularizedBN-main/examples/byte_level_bpe/gru_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn import torch.nn.functional as F from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import TransformerModel, TransformerEncoder @register_model("gru_transformer") class GRUTransformerModel(TransformerModel): @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return GRUTransformerEncoder(args, src_dict, embed_tokens) class GRUTransformerEncoder(TransformerEncoder): def __init__(self, args, dictionary, embed_tokens): super().__init__(args, dictionary, embed_tokens) self.emb_ctx = nn.GRU(input_size=embed_tokens.embedding_dim, hidden_size=embed_tokens.embedding_dim // 2, num_layers=1, bidirectional=True) def forward_embedding(self, src_tokens): # embed tokens and positions x = embed = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x = embed + self.embed_positions(src_tokens) # contextualize embeddings x = x.transpose(0, 1) x = self.dropout_module(x) x, _ = self.emb_ctx.forward(x) x = x.transpose(0, 1) if self.layernorm_embedding is not None: x = self.layernorm_embedding(x) x = self.dropout_module(x) return x, embed @register_model_architecture("gru_transformer", "gru_transformer") def gru_transformer_base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.no_cross_attention = getattr(args, "no_cross_attention", False) args.cross_self_attention = getattr(args, "cross_self_attention", False) args.layer_wise_attention = getattr(args, "layer_wise_attention", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.layernorm_embedding = getattr(args, "layernorm_embedding", False) @register_model_architecture("gru_transformer", "gru_transformer_big") def gru_transformer_big(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.dropout = getattr(args, "dropout", 0.3) gru_transformer_base_architecture(args)
5,028
46.895238
87
py
RegularizedBN
RegularizedBN-main/examples/simultaneous_translation/modules/monotonic_multihead_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn.functional as F import torch.nn as nn from fairseq import utils from fairseq.modules import MultiheadAttention from examples.simultaneous_translation.utils.functions import ( exclusive_cumprod, lengths_to_mask ) from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.utils import convert_padding_direction from . import register_monotonic_attention @with_incremental_state class MonotonicAttention(nn.Module): """ Abstract class of monotonic attentions """ def __init__(self, args): self.eps = args.attention_eps self.mass_preservation = args.mass_preservation self.noise_mean = args.noise_mean self.noise_var = args.noise_var self.energy_bias_init = args.energy_bias_init self.energy_bias = ( nn.Parameter(self.energy_bias_init * torch.ones([1])) if args.energy_bias is True else 0 ) @staticmethod def add_args(parser): # fmt: off parser.add_argument('--no-mass-preservation', action="store_false", dest="mass_preservation", help='Do not stay on the last token when decoding') parser.add_argument('--mass-preservation', action="store_true", dest="mass_preservation", help='Stay on the last token when decoding') parser.set_defaults(mass_preservation=True) parser.add_argument('--noise-var', type=float, default=1.0, help='Variance of discretness noise') parser.add_argument('--noise-mean', type=float, default=0.0, help='Mean of discretness noise') parser.add_argument('--energy-bias', action="store_true", default=False, help='Bias for energy') parser.add_argument('--energy-bias-init', type=float, default=-2.0, help='Initial value of the bias for energy') parser.add_argument('--attention-eps', type=float, default=1e-6, help='Epsilon when calculating expected attention') # fmt: on def p_choose(self, *args): raise NotImplementedError def input_projections(self, *args): raise NotImplementedError def attn_energy(self, q_proj, k_proj, key_padding_mask=None): """ Calculating monotonic energies ============================================================ Expected input size q_proj: bsz * num_heads, tgt_len, self.head_dim k_proj: bsz * num_heads, src_len, self.head_dim key_padding_mask: bsz, src_len attn_mask: tgt_len, src_len """ bsz, tgt_len, embed_dim = q_proj.size() bsz = bsz // self.num_heads src_len = k_proj.size(1) attn_energy = torch.bmm(q_proj, k_proj.transpose(1, 2)) + self.energy_bias attn_energy = attn_energy.view(bsz, self.num_heads, tgt_len, src_len) if key_padding_mask is not None: attn_energy = attn_energy.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2).bool(), float('-inf'), ) return attn_energy def expected_alignment_train(self, p_choose, key_padding_mask): """ Calculating expected alignment for MMA Mask is not need because p_choose will be 0 if masked q_ij = (1 − p_{ij−1})q_{ij−1} + a+{i−1j} a_ij = p_ij q_ij parellel solution: ai = p_i * cumprod(1 − pi) * cumsum(a_i / cumprod(1 − pi)) ============================================================ Expected input size p_choose: bsz * num_heads, tgt_len, src_len """ # p_choose: bsz * num_heads, tgt_len, src_len bsz_num_heads, tgt_len, src_len = p_choose.size() # cumprod_1mp : bsz * num_heads, tgt_len, src_len cumprod_1mp = exclusive_cumprod(1 - p_choose, dim=2, eps=self.eps) cumprod_1mp_clamp = torch.clamp(cumprod_1mp, self.eps, 1.0) init_attention = p_choose.new_zeros([bsz_num_heads, 1, src_len]) init_attention[:, :, 0] = 1.0 previous_attn = [init_attention] for i in range(tgt_len): # p_choose: bsz * num_heads, tgt_len, src_len # cumprod_1mp_clamp : bsz * num_heads, tgt_len, src_len # previous_attn[i]: bsz * num_heads, 1, src_len # alpha_i: bsz * num_heads, src_len alpha_i = ( p_choose[:, i] * cumprod_1mp[:, i] * torch.cumsum( previous_attn[i][:, 0] / cumprod_1mp_clamp[:, i], dim=1 ) ).clamp(0, 1.0) previous_attn.append(alpha_i.unsqueeze(1)) # alpha: bsz * num_heads, tgt_len, src_len alpha = torch.cat(previous_attn[1:], dim=1) if self.mass_preservation: # Last token has the residual probabilities alpha[:, :, -1] = 1 - alpha[:, :, :-1].sum(dim=-1).clamp(0.0, 1.0) assert not torch.isnan(alpha).any(), "NaN detected in alpha." return alpha def expected_alignment_infer(self, p_choose, key_padding_mask, incremental_state): """ Calculating mo alignment for MMA during inference time ============================================================ Expected input size p_choose: bsz * num_heads, tgt_len, src_len key_padding_mask: bsz * src_len incremental_state: dict """ # p_choose: bsz * self.num_heads, src_len bsz_num_heads, tgt_len, src_len = p_choose.size() # One token at a time assert tgt_len == 1 p_choose = p_choose[:, 0, :] monotonic_cache = self._get_monotonic_buffer(incremental_state) # prev_monotonic_step: bsz, num_heads bsz = bsz_num_heads // self.num_heads prev_monotonic_step = monotonic_cache.get( "step", p_choose.new_zeros([bsz, self.num_heads]).long() ) bsz, num_heads = prev_monotonic_step.size() assert num_heads == self.num_heads assert bsz * num_heads == bsz_num_heads # p_choose: bsz, num_heads, src_len p_choose = p_choose.view(bsz, num_heads, src_len) if key_padding_mask is not None: src_lengths = src_len - \ key_padding_mask.sum(dim=1, keepdim=True).long() else: src_lengths = prev_monotonic_step.new_ones(bsz, 1) * src_len # src_lengths: bsz, num_heads src_lengths = src_lengths.expand_as(prev_monotonic_step) # new_monotonic_step: bsz, num_heads new_monotonic_step = prev_monotonic_step step_offset = 0 if key_padding_mask is not None: if key_padding_mask[:, 0].any(): # left_pad_source = True: step_offset = key_padding_mask.sum(dim=-1, keepdim=True) max_steps = ( src_lengths - 1 if self.mass_preservation else src_lengths ) # finish_read: bsz, num_heads finish_read = new_monotonic_step.eq(max_steps) while finish_read.sum().item() < bsz * self.num_heads: # p_choose: bsz * self.num_heads, src_len # only choose the p at monotonic steps # p_choose_i: bsz , self.num_heads p_choose_i = ( p_choose .gather( 2, (step_offset + new_monotonic_step).unsqueeze(2) .clamp(0, src_len - 1) ) ).squeeze(2) action = ( (p_choose_i < 0.5) .type_as(prev_monotonic_step) .masked_fill(finish_read, 0) ) # 1 x bsz # sample actions on unfinished seq # 1 means stay, finish reading # 0 means leave, continue reading # dist = torch.distributions.bernoulli.Bernoulli(p_choose) # action = dist.sample().type_as(finish_read) * (1 - finish_read) new_monotonic_step += action finish_read = new_monotonic_step.eq(max_steps) | (action == 0) # finish_read = (~ (finish_read.sum(dim=1, keepdim=True) < self.num_heads / 2)) | finish_read monotonic_cache["step"] = new_monotonic_step # alpha: bsz * num_heads, 1, src_len # new_monotonic_step: bsz, num_heads alpha = ( p_choose .new_zeros([bsz * self.num_heads, src_len]) .scatter( 1, (step_offset + new_monotonic_step).view(bsz * self.num_heads, 1).clamp(0, src_len - 1), 1 ) ) if not self.mass_preservation: alpha = alpha.masked_fill( (new_monotonic_step == max_steps).view(bsz * self.num_heads, 1), 0 ) alpha = alpha.unsqueeze(1) self._set_monotonic_buffer(incremental_state, monotonic_cache) return alpha def v_proj_output(self, value): raise NotImplementedError def forward( self, query, key, value, key_padding_mask=None, incremental_state=None, *args, **kwargs, ): tgt_len, bsz, embed_dim = query.size() src_len = value.size(0) # stepwise prob # p_choose: bsz * self.num_heads, tgt_len, src_len p_choose = self.p_choose(query, key, key_padding_mask) # expected alignment alpha # bsz * self.num_heads, tgt_len, src_len if incremental_state is not None: alpha = self.expected_alignment_infer(p_choose, key_padding_mask, incremental_state) else: alpha = self.expected_alignment_train(p_choose, key_padding_mask) # expected attention beta # bsz * self.num_heads, tgt_len, src_len beta = self.expected_attention(alpha, query, key, value, key_padding_mask, incremental_state) attn_weights = beta v_proj = self.v_proj_output(value) attn = torch.bmm(attn_weights.type_as(v_proj), v_proj) attn = ( attn .transpose(0, 1) .contiguous() .view(tgt_len, bsz, embed_dim) ) attn = self.out_proj(attn) beta = beta.view(bsz, self.num_heads, tgt_len, src_len) alpha = alpha.view(bsz, self.num_heads, tgt_len, src_len) p_choose = p_choose.view(bsz, self.num_heads, tgt_len, src_len) return attn, {"alpha": alpha, "beta": beta, "p_choose": p_choose} def reorder_incremental_state(self, incremental_state, new_order): """Reorder buffered internal state (for incremental generation).""" super().reorder_incremental_state(incremental_state, new_order) input_buffer = self._get_monotonic_buffer(incremental_state) if input_buffer is not None: for k in input_buffer.keys(): input_buffer[k] = input_buffer[k].index_select(0, new_order) self._set_monotonic_buffer(incremental_state, input_buffer) def _get_monotonic_buffer(self, incremental_state): return utils.get_incremental_state( self, incremental_state, 'monotonic', ) or {} def _set_monotonic_buffer(self, incremental_state, buffer): utils.set_incremental_state( self, incremental_state, 'monotonic', buffer, ) def get_pointer(self, incremental_state): return utils.get_incremental_state( self, incremental_state, 'monotonic', ) or {} def get_fastest_pointer(self, incremental_state): return self.get_pointer(incremental_state)["step"].max(0)[0] def set_pointer(self, incremental_state, p_choose): curr_pointer = self.get_pointer(incremental_state) if len(curr_pointer) == 0: buffer = torch.zeros_like(p_choose) else: buffer = self.get_pointer(incremental_state)["step"] buffer += (p_choose < 0.5).type_as(buffer) utils.set_incremental_state( self, incremental_state, 'monotonic', {"step": buffer}, ) @register_monotonic_attention("hard_aligned") class MonotonicMultiheadAttentionHard(MonotonicAttention, MultiheadAttention): def __init__(self, args): MultiheadAttention.__init__( self, embed_dim=args.decoder_embed_dim, num_heads=args.decoder_attention_heads, kdim=getattr(args, 'encoder_embed_dim', None), vdim=getattr(args, 'encoder_embed_dim', None), dropout=args.attention_dropout, encoder_decoder_attention=True ) MonotonicAttention.__init__(self, args) self.k_in_proj = {"monotonic": self.k_proj} self.q_in_proj = {"monotonic": self.q_proj} self.v_in_proj = {"output": self.v_proj} def input_projections(self, query, key, value, name): """ Prepare inputs for multihead attention ============================================================ Expected input size query: tgt_len, bsz, embed_dim key: src_len, bsz, embed_dim value: src_len, bsz, embed_dim name: monotonic or soft """ if query is not None: bsz = query.size(1) q = self.q_in_proj[name](query) q *= self.scaling q = q.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) else: q = None if key is not None: bsz = key.size(1) k = self.k_in_proj[name](key) k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) else: k = None if value is not None: bsz = value.size(1) v = self.v_in_proj[name](value) v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) else: v = None return q, k, v def p_choose(self, query, key, key_padding_mask=None): """ Calculating step wise prob for reading and writing 1 to read, 0 to write ============================================================ Expected input size query: bsz, tgt_len, embed_dim key: bsz, src_len, embed_dim value: bsz, src_len, embed_dim key_padding_mask: bsz, src_len attn_mask: bsz, src_len query: bsz, tgt_len, embed_dim """ # prepare inputs q_proj, k_proj, _ = self.input_projections(query, key, None, "monotonic") # attention energy attn_energy = self.attn_energy(q_proj, k_proj, key_padding_mask) noise = 0 if self.training: # add noise here to encourage discretness noise = ( torch .normal(self.noise_mean, self.noise_var, attn_energy.size()) .type_as(attn_energy) .to(attn_energy.device) ) p_choose = torch.sigmoid(attn_energy + noise) _, _, tgt_len, src_len = p_choose.size() # p_choose: bsz * self.num_heads, tgt_len, src_len return p_choose.view(-1, tgt_len, src_len) def expected_attention(self, alpha, *args): ''' For MMA-H, beta = alpha ''' return alpha def v_proj_output(self, value): _, _, v_proj = self.input_projections(None, None, value, "output") return v_proj @register_monotonic_attention("infinite_lookback") class MonotonicMultiheadAttentionInfiniteLookback(MonotonicMultiheadAttentionHard): def __init__(self, args): super().__init__(args) self.init_soft_attention() def init_soft_attention(self): self.k_proj_soft = nn.Linear(self.kdim, self.embed_dim, bias=True) self.q_proj_soft = nn.Linear(self.embed_dim, self.embed_dim, bias=True) self.k_in_proj["soft"] = self.k_proj_soft self.q_in_proj["soft"] = self.q_proj_soft if self.qkv_same_dim: # Empirically observed the convergence to be much better with # the scaled initialization nn.init.xavier_uniform_(self.k_in_proj["soft"].weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.q_in_proj["soft"].weight, gain=1 / math.sqrt(2)) else: nn.init.xavier_uniform_(self.k_in_proj["soft"].weight) nn.init.xavier_uniform_(self.q_in_proj["soft"].weight) def expected_attention(self, alpha, query, key, value, key_padding_mask, incremental_state): # monotonic attention, we will calculate milk here bsz_x_num_heads, tgt_len, src_len = alpha.size() bsz = int(bsz_x_num_heads / self.num_heads) q, k, _ = self.input_projections(query, key, None, "soft") soft_energy = self.attn_energy(q, k, key_padding_mask) assert list(soft_energy.size()) == [bsz, self.num_heads, tgt_len, src_len] soft_energy = soft_energy.view(bsz * self.num_heads, tgt_len, src_len) if incremental_state is not None: monotonic_cache = self._get_monotonic_buffer(incremental_state) monotonic_step = monotonic_cache["step"] + 1 step_offset = 0 if key_padding_mask is not None: if key_padding_mask[:, 0].any(): # left_pad_source = True: step_offset = key_padding_mask.sum(dim=-1, keepdim=True) monotonic_step += step_offset mask = lengths_to_mask( monotonic_step.view(-1), soft_energy.size(2), 1).unsqueeze(1) soft_energy = soft_energy.masked_fill(~ mask.bool(), float('-inf')) soft_energy = soft_energy - soft_energy.max(dim=2, keepdim=True)[0] exp_soft_energy = torch.exp(soft_energy) exp_soft_energy_sum = exp_soft_energy.sum(dim=2) beta = exp_soft_energy / exp_soft_energy_sum.unsqueeze(2) else: # bsz * num_heads, tgt_len, src_len soft_energy = soft_energy - soft_energy.max(dim=2, keepdim=True)[0] exp_soft_energy = torch.exp(soft_energy) exp_soft_energy_cumsum = torch.cumsum(exp_soft_energy, dim=2) if key_padding_mask is not None: if key_padding_mask.any(): exp_soft_energy_cumsum = ( exp_soft_energy_cumsum.view(-1, self.num_heads, tgt_len, src_len) .masked_fill(key_padding_mask.unsqueeze(1).unsqueeze(1), self.eps) .view(-1, tgt_len, src_len) ) inner_items = alpha / exp_soft_energy_cumsum beta = exp_soft_energy * torch.cumsum(inner_items.flip(dims=[2]), dim=2).flip(dims=[2]) beta = self.dropout_module(beta) assert not torch.isnan(beta).any(), "NaN detected in beta." return beta @register_monotonic_attention("waitk") class MonotonicMultiheadAttentionWaitk(MonotonicMultiheadAttentionInfiniteLookback): def __init__(self, args): super().__init__(args) self.q_in_proj["soft"] = self.q_in_proj["monotonic"] self.k_in_proj["soft"] = self.k_in_proj["monotonic"] self.waitk_lagging = args.waitk_lagging assert self.waitk_lagging > 0, f"Lagging has to been larger than 0, get {self.waitk_lagging}." @staticmethod def add_args(parser): super( MonotonicMultiheadAttentionWaitk, MonotonicMultiheadAttentionWaitk, ).add_args(parser) parser.add_argument('--waitk-lagging', type=int, required=True, help='Wait k lagging') def p_choose(self, query, key, key_padding_mask=None, attn_mask=None, incremental_state=None): """ query: bsz, tgt_len key: bsz, src_len key_padding_mask: bsz, src_len """ src_len, bsz, _ = key.size() tgt_len, bsz, _ = query.size() p_choose = query.new_ones(bsz, tgt_len, src_len) p_choose = torch.tril(p_choose, diagonal=self.waitk_lagging - 1) p_choose = torch.triu(p_choose, diagonal=self.waitk_lagging - 1) if key_padding_mask is not None and key_padding_mask[:, 0].eq(1).any(): # Left pad source # add -1 to the end p_choose = p_choose.masked_fill(key_padding_mask.float().flip(1).unsqueeze(1).bool(), -1) p_choose = convert_padding_direction(p_choose.view(-1, src_len).long(), padding_idx=-1, right_to_left=True) p_choose = p_choose.view(bsz, tgt_len, src_len).type_as(query) # remove -1 p_choose[p_choose.eq(-1)] = 0 # Extend to each head p_choose = ( p_choose.contiguous().unsqueeze(1) .expand(-1, self.num_heads, -1, -1).contiguous() .view(-1, tgt_len, src_len) ) return p_choose
21,349
35.125212
119
py
RegularizedBN
RegularizedBN-main/examples/simultaneous_translation/eval/eval_latency.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from examples.simultaneous_translation.utils.latency import LatencyInference import argparse import torch import json LATENCY_METRICS = [ 'differentiable_average_lagging', 'average_lagging', 'average_proportion', ] class LatencyScorer(): def __init__(self, start_from_zero=True): self.recorder = [] self.scores = {} self.scorer = LatencyInference() self.start_from_zero = start_from_zero def update_reorder(self, list_of_dict): self.recorder = [] for info in list_of_dict: delays = [ int(x) - int(not self.start_from_zero) for x in info["delays"] ] delays = torch.LongTensor(delays).unsqueeze(0) src_len = torch.LongTensor([info["src_len"]]).unsqueeze(0) self.recorder.append(self.scorer(delays, src_len)) def cal_latency(self): self.scores = {} for metric in LATENCY_METRICS: self.scores[metric] = sum( [x[metric][0, 0].item() for x in self.recorder] ) / len(self.recorder) return self.scores @classmethod def score(cls, list_of_dict, start_from_zero=True): scorer_to_return = cls(start_from_zero) scorer_to_return.update_reorder(list_of_dict) scorer_to_return.cal_latency() return scorer_to_return.scores if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--input", required=True) parser.add_argument("--start-from-zero", action="store_true") args = parser.parse_args() scorer = LatencyInference() recorder = [] with open(args.input, 'r') as f: for line in f: info = json.loads(line) delays = [int(x) - int(not args.start_from_zero) for x in info["delays"]] delays = torch.LongTensor(delays).unsqueeze(0) src_len = torch.LongTensor([info["src_len"]]).unsqueeze(0) recorder.append(scorer(delays, src_len)) average_results = {} for metric in LATENCY_METRICS: average_results[metric] = sum( [x[metric][0, 0].item() for x in recorder] ) / len(recorder) print(f"{metric}: {average_results[metric]}")
2,432
29.037037
85
py
RegularizedBN
RegularizedBN-main/examples/simultaneous_translation/models/transformer_monotonic_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F from fairseq.models import ( register_model, register_model_architecture, ) from fairseq.models.transformer import ( TransformerModel, TransformerEncoder, TransformerDecoder, base_architecture, transformer_iwslt_de_en, transformer_vaswani_wmt_en_de_big, ) from examples.simultaneous_translation.modules.monotonic_transformer_layer import ( TransformerMonotonicDecoderLayer, TransformerMonotonicEncoderLayer ) DEFAULT_MAX_SOURCE_POSITIONS = 1024 DEFAULT_MAX_TARGET_POSITIONS = 1024 @register_model('transformer_unidirectional') class TransformerUnidirectionalModel(TransformerModel): @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerMonotonicEncoder(args, src_dict, embed_tokens) @register_model('transformer_monotonic') class TransformerMonotonicModel(TransformerModel): @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerMonotonicEncoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerMonotonicDecoder(args, tgt_dict, embed_tokens) def _indices_from_states(self, states): if type(states["indices"]["src"]) == list: if next(self.parameters()).is_cuda: tensor = torch.cuda.LongTensor else: tensor = torch.LongTensor src_indices = tensor( [states["indices"]["src"][: 1 + states["steps"]["src"]]] ) tgt_indices = tensor( [ [self.decoder.dictionary.eos()] + states["indices"]["tgt"] ] ) else: src_indices = states["indices"]["src"][: 1 + states["steps"]["src"]] tgt_indices = states["indices"]["tgt"] return src_indices, None, tgt_indices def predict_from_states(self, states): decoder_states = self.decoder.output_layer( states["decoder_features"] ) lprobs = self.get_normalized_probs( [decoder_states[:, -1:]], log_probs=True ) index = lprobs.argmax(dim=-1) token = self.decoder.dictionary.string(index) return token, index[0, 0].item() def decision_from_states(self, states): ''' This funcion take states dictionary as input, and gives the agent a decision of whether read a token from server. Moreover, the decoder states are also calculated here so we can directly generate a target token without recompute every thing ''' self.eval() if len(states["tokens"]["src"]) == 0: return 0 src_indices, src_lengths, tgt_indices = self._indices_from_states( states) # Update encoder states if needed if ( "encoder_states" not in states or states["encoder_states"][0].size(1) <= states["steps"]["src"] ): encoder_out_dict = self.encoder(src_indices, src_lengths) states["encoder_states"] = encoder_out_dict else: encoder_out_dict = states["encoder_states"] # online means we still need tokens to feed the model states["model_states"]["online"] = not ( states["finish_read"] and len(states["tokens"]["src"]) == states["steps"]["src"] ) states["model_states"]["steps"] = states["steps"] x, outputs = self.decoder.forward( prev_output_tokens=tgt_indices, encoder_out=encoder_out_dict, incremental_state=states["model_states"], features_only=True, ) states["decoder_features"] = x return outputs["action"] class TransformerMonotonicEncoder(TransformerEncoder): def __init__(self, args, dictionary, embed_tokens): super().__init__(args, dictionary, embed_tokens) self.dictionary = dictionary self.layers = nn.ModuleList([]) self.layers.extend([ TransformerMonotonicEncoderLayer(args) for i in range(args.encoder_layers) ]) class TransformerMonotonicDecoder(TransformerDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(args, dictionary, embed_tokens, no_encoder_attn=False) self.dictionary = dictionary self.layers = nn.ModuleList([]) self.layers.extend([ TransformerMonotonicDecoderLayer(args, no_encoder_attn) for _ in range(args.decoder_layers) ]) def pre_attention( self, prev_output_tokens, encoder_out_dict, incremental_state=None ): positions = self.embed_positions( prev_output_tokens, incremental_state=incremental_state, ) if self.embed_positions is not None else None if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = self.dropout_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) encoder_out = encoder_out_dict.encoder_out encoder_padding_mask = encoder_out_dict.encoder_padding_mask return x, encoder_out, encoder_padding_mask def post_attention(self, x): if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x def extract_features( self, prev_output_tokens, encoder_out, incremental_state=None, **unused ): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ # incremental_state = None ( x, encoder_outs, encoder_padding_mask ) = self.pre_attention( prev_output_tokens, encoder_out, incremental_state ) attn = None inner_states = [x] attn_list = [] step_list = [] for i, layer in enumerate(self.layers): x, attn, _ = layer( x=x, encoder_out=encoder_outs, encoder_padding_mask=encoder_padding_mask, incremental_state=incremental_state, self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None, ) inner_states.append(x) attn_list.append(attn) if incremental_state is not None: curr_steps = layer.get_steps(incremental_state) step_list.append(curr_steps) if incremental_state.get("online", False): p_choose = attn["p_choose"].squeeze(0).squeeze(1).gather(1, curr_steps.t()) new_steps = ( curr_steps + (p_choose < 0.5).t().type_as(curr_steps) ) if (new_steps >= incremental_state["steps"]["src"]).any(): # We need to prune the last self_attn saved_state # if model decide not to read # otherwise there will be duplicated saved_state for j in range(i + 1): self.layers[j].prune_incremental_state( incremental_state) return x, {"action": 0} if ( incremental_state is not None and not incremental_state.get("online", False) ): # Here is for fast evaluation fastest_step = torch.max( torch.cat(step_list, dim=1), dim=1, keepdim=True )[0] + 1 if "fastest_step" in incremental_state: incremental_state["fastest_step"] = torch.cat( [incremental_state["fastest_step"], fastest_step], dim=1 ) else: incremental_state["fastest_step"] = fastest_step x = self.post_attention(x) return x, { "action": 1, "attn_list": attn_list, "step_list": step_list, "encoder_out": encoder_out, "encoder_padding_mask": encoder_padding_mask, } def reorder_incremental_state(self, incremental_state, new_order): super().reorder_incremental_state(incremental_state, new_order) if "fastest_step" in incremental_state: incremental_state["fastest_step"] = ( incremental_state["fastest_step"] .index_select(0, new_order) ) @register_model_architecture( 'transformer_monotonic', 'transformer_monotonic' ) def base_monotonic_rchitecture(args): base_architecture(args) args.encoder_unidirectional = getattr( args, 'encoder_unidirectional', False) @register_model_architecture( 'transformer_monotonic', 'transformer_monotonic_iwslt_de_en' ) def transformer_monotonic_iwslt_de_en(args): transformer_iwslt_de_en(args) base_monotonic_rchitecture(args) # parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017) @register_model_architecture( 'transformer_monotonic', 'transformer_monotonic_vaswani_wmt_en_de_big' ) def transformer_monotonic_vaswani_wmt_en_de_big(args): transformer_vaswani_wmt_en_de_big(args) @register_model_architecture( 'transformer_monotonic', 'transformer_monotonic_vaswani_wmt_en_fr_big' ) def transformer_monotonic_vaswani_wmt_en_fr_big(args): transformer_monotonic_vaswani_wmt_en_fr_big(args) @register_model_architecture( 'transformer_unidirectional', 'transformer_unidirectional_iwslt_de_en' ) def transformer_unidirectional_iwslt_de_en(args): transformer_iwslt_de_en(args)
11,249
30.163435
95
py
RegularizedBN
RegularizedBN-main/examples/simultaneous_translation/utils/functions.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch def exclusive_cumprod(tensor, dim: int, eps: float = 1e-10): """ Implementing exclusive cumprod. There is cumprod in pytorch, however there is no exclusive mode. cumprod(x) = [x1, x1x2, x2x3x4, ..., prod_{i=1}^n x_i] exclusive means cumprod(x) = [1, x1, x1x2, x1x2x3, ..., prod_{i=1}^{n-1} x_i] """ tensor_size = list(tensor.size()) tensor_size[dim] = 1 return_tensor = safe_cumprod( torch.cat([torch.ones(tensor_size).type_as(tensor), tensor], dim=dim), dim=dim, eps=eps ) if dim == 0: return return_tensor[:-1] elif dim == 1: return return_tensor[:, :-1] elif dim == 2: return return_tensor[:, :, :-1] else: raise RuntimeError("Cumprod on dimension 3 and more is not implemented") def safe_cumprod(tensor, dim: int, eps: float = 1e-10): """ An implementation of cumprod to prevent precision issue. cumprod(x) = [x1, x1x2, x1x2x3, ....] = [exp(log(x1)), exp(log(x1) + log(x2)), exp(log(x1) + log(x2) + log(x3)), ...] = exp(cumsum(log(x))) """ if (tensor + eps < 0).any().item(): raise RuntimeError( "Safe cumprod can only take non-negative tensors as input." "Consider use torch.cumprod if you want to calculate negative values." ) log_tensor = torch.log(tensor + eps) cumsum_log_tensor = torch.cumsum(log_tensor, dim) exp_cumsum_log_tensor = torch.exp(cumsum_log_tensor) return exp_cumsum_log_tensor def lengths_to_mask(lengths, max_len: int, dim: int = 0, negative_mask: bool = False): """ Convert a tensor of lengths to mask For example, lengths = [[2, 3, 4]], max_len = 5 mask = [[1, 1, 1], [1, 1, 1], [0, 1, 1], [0, 0, 1], [0, 0, 0]] """ assert len(lengths.size()) <= 2 if len(lengths) == 2: if dim == 1: lengths = lengths.t() lengths = lengths else: lengths = lengths.unsqueeze(1) # lengths : batch_size, 1 lengths = lengths.view(-1, 1) batch_size = lengths.size(0) # batch_size, max_len mask = torch.arange(max_len).expand(batch_size, max_len).type_as(lengths) < lengths if negative_mask: mask = ~mask if dim == 0: # max_len, batch_size mask = mask.t() return mask def moving_sum(x, start_idx: int, end_idx: int): """ From MONOTONIC CHUNKWISE ATTENTION https://arxiv.org/pdf/1712.05382.pdf Equation (18) x = [x_1, x_2, ..., x_N] MovingSum(x, start_idx, end_idx)_n = Sigma_{m=n−(start_idx−1)}^{n+end_idx-1} x_m for n in {1, 2, 3, ..., N} x : src_len, batch_size start_idx : start idx end_idx : end idx Example src_len = 5 batch_size = 3 x = [[ 0, 5, 10], [ 1, 6, 11], [ 2, 7, 12], [ 3, 8, 13], [ 4, 9, 14]] MovingSum(x, 3, 1) = [[ 0, 5, 10], [ 1, 11, 21], [ 3, 18, 33], [ 6, 21, 36], [ 9, 24, 39]] MovingSum(x, 1, 3) = [[ 3, 18, 33], [ 6, 21, 36], [ 9, 24, 39], [ 7, 17, 27], [ 4, 9, 14]] """ assert start_idx > 0 and end_idx > 0 assert len(x.size()) == 2 src_len, batch_size = x.size() # batch_size, 1, src_len x = x.t().unsqueeze(1) # batch_size, 1, src_len moving_sum_weight = x.new_ones([1, 1, end_idx + start_idx - 1]) moving_sum = torch.nn.functional.conv1d( x, moving_sum_weight, padding=start_idx + end_idx - 1 ).squeeze(1).t() moving_sum = moving_sum[end_idx: -start_idx] assert src_len == moving_sum.size(0) assert batch_size == moving_sum.size(1) return moving_sum
3,908
25.773973
95
py
RegularizedBN
RegularizedBN-main/examples/simultaneous_translation/utils/latency.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch class LatencyMetric(object): @staticmethod def length_from_padding_mask(padding_mask, batch_first: bool = False): dim = 1 if batch_first else 0 return padding_mask.size(dim) - padding_mask.sum(dim=dim, keepdim=True) def prepare_latency_metric( self, delays, src_lens, target_padding_mask=None, batch_first: bool = False, start_from_zero: bool = True ): assert len(delays.size()) == 2 assert len(src_lens.size()) == 2 if start_from_zero: delays = delays + 1 if batch_first: # convert to batch_last delays = delays.t() src_lens = src_lens.t() tgt_len, bsz = delays.size() _, bsz_1 = src_lens.size() if target_padding_mask is not None: target_padding_mask = target_padding_mask.t() tgt_len_1, bsz_2 = target_padding_mask.size() assert tgt_len == tgt_len_1 assert bsz == bsz_2 assert bsz == bsz_1 if target_padding_mask is None: tgt_lens = tgt_len * delays.new_ones([1, bsz]).float() else: # 1, batch_size tgt_lens = self.length_from_padding_mask(target_padding_mask, False).float() delays = delays.masked_fill(target_padding_mask, 0) return delays, src_lens, tgt_lens, target_padding_mask def __call__( self, delays, src_lens, target_padding_mask=None, batch_first: bool = False, start_from_zero: bool = True, ): delays, src_lens, tgt_lens, target_padding_mask = self.prepare_latency_metric( delays, src_lens, target_padding_mask, batch_first, start_from_zero ) return self.cal_metric(delays, src_lens, tgt_lens, target_padding_mask) @staticmethod def cal_metric(delays, src_lens, tgt_lens, target_padding_mask): """ Expected sizes: delays: tgt_len, batch_size src_lens: 1, batch_size target_padding_mask: tgt_len, batch_size """ raise NotImplementedError class AverageProportion(LatencyMetric): """ Function to calculate Average Proportion from Can neural machine translation do simultaneous translation? (https://arxiv.org/abs/1606.02012) Delays are monotonic steps, range from 1 to src_len. Give src x tgt y, AP is calculated as: AP = 1 / (|x||y]) sum_i^|Y| deleys_i """ @staticmethod def cal_metric(delays, src_lens, tgt_lens, target_padding_mask): if target_padding_mask is not None: AP = torch.sum(delays.masked_fill(target_padding_mask, 0), dim=0, keepdim=True) else: AP = torch.sum(delays, dim=0, keepdim=True) AP = AP / (src_lens * tgt_lens) return AP class AverageLagging(LatencyMetric): """ Function to calculate Average Lagging from STACL: Simultaneous Translation with Implicit Anticipation and Controllable Latency using Prefix-to-Prefix Framework (https://arxiv.org/abs/1810.08398) Delays are monotonic steps, range from 1 to src_len. Give src x tgt y, AP is calculated as: AL = 1 / tau sum_i^tau delays_i - (i - 1) / gamma Where gamma = |y| / |x| tau = argmin_i(delays_i = |x|) """ @staticmethod def cal_metric(delays, src_lens, tgt_lens, target_padding_mask): # tau = argmin_i(delays_i = |x|) tgt_len, bsz = delays.size() lagging_padding_mask = delays >= src_lens lagging_padding_mask = torch.nn.functional.pad(lagging_padding_mask.t(), (1, 0)).t()[:-1, :] gamma = tgt_lens / src_lens lagging = delays - torch.arange(delays.size(0)).unsqueeze(1).type_as(delays).expand_as(delays) / gamma lagging.masked_fill_(lagging_padding_mask, 0) tau = (1 - lagging_padding_mask.type_as(lagging)).sum(dim=0, keepdim=True) AL = lagging.sum(dim=0, keepdim=True) / tau return AL class DifferentiableAverageLagging(LatencyMetric): """ Function to calculate Differentiable Average Lagging from Monotonic Infinite Lookback Attention for Simultaneous Machine Translation (https://arxiv.org/abs/1906.05218) Delays are monotonic steps, range from 0 to src_len-1. (In the original paper thery are from 1 to src_len) Give src x tgt y, AP is calculated as: DAL = 1 / |Y| sum_i^|Y| delays'_i - (i - 1) / gamma Where delays'_i = 1. delays_i if i == 1 2. max(delays_i, delays'_{i-1} + 1 / gamma) """ @staticmethod def cal_metric(delays, src_lens, tgt_lens, target_padding_mask): tgt_len, bsz = delays.size() gamma = tgt_lens / src_lens new_delays = torch.zeros_like(delays) for i in range(delays.size(0)): if i == 0: new_delays[i] = delays[i] else: new_delays[i] = torch.cat( [ new_delays[i - 1].unsqueeze(0) + 1 / gamma, delays[i].unsqueeze(0) ], dim=0 ).max(dim=0)[0] DAL = ( new_delays - torch.arange(delays.size(0)).unsqueeze(1).type_as(delays).expand_as(delays) / gamma ) if target_padding_mask is not None: DAL = DAL.masked_fill(target_padding_mask, 0) DAL = DAL.sum(dim=0, keepdim=True) / tgt_lens return DAL class LatencyMetricVariance(LatencyMetric): def prepare_latency_metric( self, delays, src_lens, target_padding_mask=None, batch_first: bool = True, start_from_zero: bool = True ): assert batch_first assert len(delays.size()) == 3 assert len(src_lens.size()) == 2 if start_from_zero: delays = delays + 1 # convert to batch_last bsz, num_heads_x_layers, tgt_len = delays.size() bsz_1, _ = src_lens.size() assert bsz == bsz_1 if target_padding_mask is not None: bsz_2, tgt_len_1 = target_padding_mask.size() assert tgt_len == tgt_len_1 assert bsz == bsz_2 if target_padding_mask is None: tgt_lens = tgt_len * delays.new_ones([bsz, tgt_len]).float() else: # batch_size, 1 tgt_lens = self.length_from_padding_mask(target_padding_mask, True).float() delays = delays.masked_fill(target_padding_mask.unsqueeze(1), 0) return delays, src_lens, tgt_lens, target_padding_mask class VarianceDelay(LatencyMetricVariance): @staticmethod def cal_metric(delays, src_lens, tgt_lens, target_padding_mask): """ delays : bsz, num_heads_x_layers, tgt_len src_lens : bsz, 1 target_lens : bsz, 1 target_padding_mask: bsz, tgt_len or None """ if delays.size(1) == 1: return delays.new_zeros([1]) variance_delays = delays.var(dim=1) if target_padding_mask is not None: variance_delays.masked_fill_(target_padding_mask, 0) return variance_delays.sum(dim=1, keepdim=True) / tgt_lens class LatencyInference(object): def __init__(self, start_from_zero=True): self.metric_calculator = { "differentiable_average_lagging": DifferentiableAverageLagging(), "average_lagging": AverageLagging(), "average_proportion": AverageProportion(), } self.start_from_zero = start_from_zero def __call__(self, monotonic_step, src_lens): """ monotonic_step range from 0 to src_len. src_len means eos delays: bsz, tgt_len src_lens: bsz, 1 """ if not self.start_from_zero: monotonic_step -= 1 src_lens = src_lens delays = ( monotonic_step .view(monotonic_step.size(0), -1, monotonic_step.size(-1)) .max(dim=1)[0] ) delays = ( delays.masked_fill(delays >= src_lens, 0) + (src_lens - 1) .expand_as(delays) .masked_fill(delays < src_lens, 0) ) return_dict = {} for key, func in self.metric_calculator.items(): return_dict[key] = func( delays.float(), src_lens.float(), target_padding_mask=None, batch_first=True, start_from_zero=True ).t() return return_dict class LatencyTraining(object): def __init__( self, avg_weight, var_weight, avg_type, var_type, stay_on_last_token, average_method, ): self.avg_weight = avg_weight self.var_weight = var_weight self.avg_type = avg_type self.var_type = var_type self.stay_on_last_token = stay_on_last_token self.average_method = average_method self.metric_calculator = { "differentiable_average_lagging": DifferentiableAverageLagging(), "average_lagging": AverageLagging(), "average_proportion": AverageProportion(), } self.variance_calculator = { "variance_delay": VarianceDelay(), } def expected_delays_from_attention( self, attention, source_padding_mask=None, target_padding_mask=None ): if type(attention) == list: # bsz, num_heads, tgt_len, src_len bsz, num_heads, tgt_len, src_len = attention[0].size() attention = torch.cat(attention, dim=1) bsz, num_heads_x_layers, tgt_len, src_len = attention.size() # bsz * num_heads * num_layers, tgt_len, src_len attention = attention.view(-1, tgt_len, src_len) else: # bsz * num_heads * num_layers, tgt_len, src_len bsz, tgt_len, src_len = attention.size() num_heads_x_layers = 1 attention = attention.view(-1, tgt_len, src_len) if not self.stay_on_last_token: residual_attention = \ 1 - attention[:, :, :-1].sum(dim=2, keepdim=True) attention = torch.cat( [attention[:, :, :-1], residual_attention], dim=2 ) # bsz * num_heads_x_num_layers, tgt_len, src_len for MMA steps = ( torch .arange(1, 1 + src_len) .unsqueeze(0) .unsqueeze(1) .expand_as(attention) .type_as(attention) ) if source_padding_mask is not None: src_offset = ( source_padding_mask.type_as(attention) .sum(dim=1, keepdim=True) .expand(bsz, num_heads_x_layers) .contiguous() .view(-1, 1) ) src_lens = src_len - src_offset if source_padding_mask[:, 0].any(): # Pad left src_offset = src_offset.view(-1, 1, 1) steps = steps - src_offset steps = steps.masked_fill(steps <= 0, 0) else: src_lens = attention.new_ones([bsz, num_heads_x_layers]) * src_len src_lens = src_lens.view(-1, 1) # bsz * num_heads_num_layers, tgt_len, src_len expected_delays = (steps * attention).sum(dim=2).view( bsz, num_heads_x_layers, tgt_len ) if target_padding_mask is not None: expected_delays.masked_fill_( target_padding_mask.unsqueeze(1), 0 ) return expected_delays, src_lens def avg_loss(self, expected_delays, src_lens, target_padding_mask): bsz, num_heads_x_layers, tgt_len = expected_delays.size() target_padding_mask = ( target_padding_mask .unsqueeze(1) .expand_as(expected_delays) .contiguous() .view(-1, tgt_len) ) if self.average_method == "average": # bsz * tgt_len expected_delays = expected_delays.mean(dim=1) elif self.average_method == "weighted_average": weights = torch.nn.functional.softmax(expected_delays, dim=1) expected_delays = torch.sum(expected_delays * weights, dim=1) elif self.average_method == "max": # bsz * num_heads_x_num_layers, tgt_len expected_delays = expected_delays.max(dim=1)[0] else: raise RuntimeError(f"{self.average_method} is not supported") src_lens = src_lens.view(bsz, -1)[:, :1] target_padding_mask = target_padding_mask.view(bsz, -1, tgt_len)[:, 0] if self.avg_weight > 0.0: if self.avg_type in self.metric_calculator: average_delays = self.metric_calculator[self.avg_type]( expected_delays, src_lens, target_padding_mask, batch_first=True, start_from_zero=False ) else: raise RuntimeError(f"{self.avg_type} is not supported.") # bsz * num_heads_x_num_layers, 1 return self.avg_weight * average_delays.sum() else: return 0.0 def var_loss(self, expected_delays, src_lens, target_padding_mask): src_lens = src_lens.view(expected_delays.size(0), expected_delays.size(1))[:, :1] if self.var_weight > 0.0: if self.var_type in self.variance_calculator: variance_delays = self.variance_calculator[self.var_type]( expected_delays, src_lens, target_padding_mask, batch_first=True, start_from_zero=False ) else: raise RuntimeError(f"{self.var_type} is not supported.") return self.var_weight * variance_delays.sum() else: return 0.0 def loss(self, attention, source_padding_mask=None, target_padding_mask=None): expected_delays, src_lens = self.expected_delays_from_attention( attention, source_padding_mask, target_padding_mask ) latency_loss = 0 latency_loss += self.avg_loss(expected_delays, src_lens, target_padding_mask) latency_loss += self.var_loss(expected_delays, src_lens, target_padding_mask) return latency_loss
14,598
32.407323
110
py
RegularizedBN
RegularizedBN-main/scripts/average_checkpoints.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import collections import torch import os import re from fairseq.file_io import PathManager def average_checkpoints(inputs): """Loads checkpoints from inputs and returns a model with averaged weights. Args: inputs: An iterable of string paths of checkpoints to load from. Returns: A dict of string keys mapping to various values. The 'model' key from the returned dict should correspond to an OrderedDict mapping string parameter names to torch Tensors. """ params_dict = collections.OrderedDict() params_keys = None new_state = None num_models = len(inputs) for fpath in inputs: with PathManager.open(fpath, 'rb') as f: state = torch.load( f, map_location=( lambda s, _: torch.serialization.default_restore_location(s, 'cpu') ), ) # Copies over the settings from the first checkpoint if new_state is None: new_state = state model_params = state['model'] model_params_keys = list(model_params.keys()) if params_keys is None: params_keys = model_params_keys elif params_keys != model_params_keys: raise KeyError( 'For checkpoint {}, expected list of params: {}, ' 'but found: {}'.format(f, params_keys, model_params_keys) ) for k in params_keys: p = model_params[k] if isinstance(p, torch.HalfTensor): p = p.float() if k not in params_dict: params_dict[k] = p.clone() # NOTE: clone() is needed in case of p is a shared parameter else: params_dict[k] += p averaged_params = collections.OrderedDict() for k, v in params_dict.items(): averaged_params[k] = v if averaged_params[k].is_floating_point(): averaged_params[k].div_(num_models) else: averaged_params[k] //= num_models new_state['model'] = averaged_params return new_state def last_n_checkpoints(paths, n, update_based, upper_bound=None): assert len(paths) == 1 path = paths[0] if update_based: pt_regexp = re.compile(r'checkpoint_\d+_(\d+)\.pt') else: pt_regexp = re.compile(r'checkpoint(\d+)\.pt') files = PathManager.ls(path) entries = [] for f in files: m = pt_regexp.fullmatch(f) if m is not None: sort_key = int(m.group(1)) if upper_bound is None or sort_key <= upper_bound: entries.append((sort_key, m.group(0))) if len(entries) < n: raise Exception('Found {} checkpoint files but need at least {}', len(entries), n) #print(sorted(entries, reverse=True)[n:2*n]);exit() return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)[:n]] def main(): parser = argparse.ArgumentParser( description='Tool to average the params of input checkpoints to ' 'produce a new checkpoint', ) # fmt: off parser.add_argument('--inputs', required=True, nargs='+', help='Input checkpoint file paths.') parser.add_argument('--output', required=True, metavar='FILE', help='Write the new checkpoint containing the averaged weights to this path.') num_group = parser.add_mutually_exclusive_group() num_group.add_argument('--num-epoch-checkpoints', type=int, help='if set, will try to find checkpoints with names checkpoint_xx.pt in the path specified by input, ' 'and average last this many of them.') num_group.add_argument('--num-update-checkpoints', type=int, help='if set, will try to find checkpoints with names checkpoint_ee_xx.pt in the path specified by input, ' 'and average last this many of them.') parser.add_argument('--checkpoint-upper-bound', type=int, help='when using --num-epoch-checkpoints, this will set an upper bound on which epoch to use, ' 'when using --num-update-checkpoints, this will set an upper bound on which update to use' 'e.g., with --num-epoch-checkpoints=10 --checkpoint-upper-bound=50, checkpoints 41-50 would be averaged.' 'e.g., with --num-update-checkpoints=10 --checkpoint-upper-bound=50000, checkpoints 40500-50000 would be averaged assuming --save-interval-updates 500' ) # fmt: on args = parser.parse_args() print(args) num = None is_update_based = False if args.num_update_checkpoints is not None: num = args.num_update_checkpoints is_update_based = True elif args.num_epoch_checkpoints is not None: num = args.num_epoch_checkpoints assert args.checkpoint_upper_bound is None or (args.num_epoch_checkpoints is not None or args.num_update_checkpoints is not None), \ '--checkpoint-upper-bound requires --num-epoch-checkpoints or --num-update-checkpoints' assert args.num_epoch_checkpoints is None or args.num_update_checkpoints is None, \ 'Cannot combine --num-epoch-checkpoints and --num-update-checkpoints' if num is not None: args.inputs = last_n_checkpoints( args.inputs, num, is_update_based, upper_bound=args.checkpoint_upper_bound, ) print('averaging checkpoints: ', args.inputs) new_state = average_checkpoints(args.inputs) with PathManager.open(args.output, 'wb') as f: torch.save(new_state, f) print('Finished writing averaged checkpoint to {}'.format(args.output)) if __name__ == '__main__': main()
6,012
38.821192
175
py
RegularizedBN
RegularizedBN-main/tests/test_train.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import logging import unittest from io import StringIO from unittest.mock import MagicMock, patch import torch from fairseq import data, checkpoint_utils def mock_trainer(epoch, num_updates, iterations_in_epoch): trainer = MagicMock() trainer.load_checkpoint.return_value = { 'train_iterator': { 'epoch': epoch, 'iterations_in_epoch': iterations_in_epoch, 'shuffle': False, }, } trainer.get_num_updates.return_value = num_updates return trainer def mock_dict(): d = MagicMock() d.pad.return_value = 1 d.eos.return_value = 2 d.unk.return_value = 3 return d def get_trainer_and_epoch_itr(epoch, epoch_size, num_updates, iterations_in_epoch): tokens = torch.LongTensor(list(range(epoch_size))).view(1, -1) tokens_ds = data.TokenBlockDataset( tokens, sizes=[tokens.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) trainer = mock_trainer(epoch, num_updates, iterations_in_epoch) dataset = data.LanguagePairDataset(tokens_ds, tokens_ds.sizes, mock_dict(), shuffle=False) epoch_itr = data.EpochBatchIterator( dataset=dataset, collate_fn=dataset.collater, batch_sampler=[[i] for i in range(epoch_size)], ) return trainer, epoch_itr def get_mock_args(finetune_from_model=None): args_mock = MagicMock() args_mock.optimizer_overrides = '{}' args_mock.reset_dataloader = False args_mock.reset_meters = False args_mock.reset_optimizer = False args_mock.reset_lr_scheduler = False args_mock.finetune_from_model = finetune_from_model args_mock.model_parallel_size = 1 return args_mock class TestLoadCheckpoint(unittest.TestCase): def setUp(self): self.args_mock = get_mock_args() self.patches = { 'os.makedirs': MagicMock(), 'os.path.join': MagicMock(), 'os.path.isfile': MagicMock(return_value=True), 'os.path.isabs': MagicMock(return_value=False), 'fairseq.file_io.PathManager.exists': MagicMock(return_value=False), } self.applied_patches = [patch(p, d) for p, d in self.patches.items()] [p.start() for p in self.applied_patches] logging.disable(logging.CRITICAL) def tearDown(self): patch.stopall() logging.disable(logging.NOTSET) def test_load_partial_checkpoint(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(2, 150, 200, 50) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) _, epoch_itr = checkpoint_utils.load_checkpoint(self.args_mock, trainer) self.assertEqual(epoch_itr.epoch, 2) self.assertEqual(epoch_itr.iterations_in_epoch, 50) itr = epoch_itr.next_epoch_itr(shuffle=False) self.assertEqual(epoch_itr.epoch, 2) self.assertEqual(epoch_itr.iterations_in_epoch, 50) self.assertEqual(next(itr)['net_input']['src_tokens'][0].item(), 50) self.assertEqual(epoch_itr.iterations_in_epoch, 51) for _ in range(150 - 52): next(itr) self.assertEqual(epoch_itr.iterations_in_epoch, 149) self.assertTrue(itr.has_next()) next(itr) self.assertFalse(itr.has_next()) itr = epoch_itr.next_epoch_itr(shuffle=False) self.assertTrue(itr.has_next()) self.assertEqual(epoch_itr.epoch, 3) self.assertEqual(epoch_itr.iterations_in_epoch, 0) def test_load_full_checkpoint(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(2, 150, 300, 150) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) _, epoch_itr = checkpoint_utils.load_checkpoint(self.args_mock, trainer) itr = epoch_itr.next_epoch_itr(shuffle=False) self.assertEqual(epoch_itr.epoch, 3) self.assertEqual(epoch_itr.iterations_in_epoch, 0) self.assertEqual(next(itr)['net_input']['src_tokens'][0].item(), 0) def test_load_no_checkpoint(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) self.patches['os.path.isfile'].return_value = False _, epoch_itr = checkpoint_utils.load_checkpoint(self.args_mock, trainer) itr = epoch_itr.next_epoch_itr(shuffle=False) self.assertEqual(epoch_itr.epoch, 1) self.assertEqual(epoch_itr.iterations_in_epoch, 0) self.assertEqual(next(itr)['net_input']['src_tokens'][0].item(), 0) def test_finetune_from_model_args_conflict(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) for arg in ['reset_optimizer', 'reset_lr_scheduler', 'reset_meters', 'reset_dataloader']: with self.subTest(arg=arg): args_mock = get_mock_args("/temp/checkpoint_pretrained.pt") setattr(args_mock, arg, True) with self.assertRaises(Exception) as context: _, _ = checkpoint_utils.load_checkpoint(args_mock, trainer) self.assertTrue( "--finetune-from-model can not be set together with either --reset-optimizer" " or reset_lr_scheduler or reset_meters or reset_dataloader" in str(context.exception) ) def test_finetune_from_model(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) from_model_path = "/temp/checkpoint_pretrained.pt" args_mock = get_mock_args(from_model_path) args_mock.restore_file = "checkpoint_last.pt" def mock_finetune_exist(path): if path == from_model_path: return True else: return False self.patches['fairseq.file_io.PathManager.exists'].side_effect = mock_finetune_exist _, _ = checkpoint_utils.load_checkpoint(args_mock, trainer) checkpoint_path, reset_optimizer, reset_lr_scheduler, \ optimizer_overrides = trainer.load_checkpoint.call_args[0] reset_meters = trainer.load_checkpoint.call_args[1]['reset_meters'] self.assertTrue(reset_optimizer) self.assertTrue(reset_lr_scheduler) self.assertTrue(reset_meters) def test_finetune_from_model_resume(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) from_model_path = "/temp/checkpoint_pretrained.pt" args_mock = get_mock_args(from_model_path) args_mock.restore_file = "checkpoint_last.pt" # launch second time # both restore_file=checkpoint_last.pt and finetune_from_model are set def mock_finetune_exist(path): if path == from_model_path or path.endsWith('checkpoint_last.pt'): return True else: return False self.patches['fairseq.file_io.PathManager.exists'].side_effect = mock_finetune_exist _, _ = checkpoint_utils.load_checkpoint(args_mock, trainer) checkpoint_path, reset_optimizer, reset_lr_scheduler, \ optimizer_overrides = trainer.load_checkpoint.call_args[0] reset_meters = trainer.load_checkpoint.call_args[1]['reset_meters'] self.assertFalse(reset_optimizer) self.assertFalse(reset_lr_scheduler) self.assertFalse(reset_meters) if __name__ == '__main__': unittest.main()
8,428
40.318627
110
py
RegularizedBN
RegularizedBN-main/tests/test_average_checkpoints.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import collections import os import tempfile import unittest import shutil import numpy as np import torch from torch import nn from scripts.average_checkpoints import average_checkpoints class ModelWithSharedParameter(nn.Module): def __init__(self): super(ModelWithSharedParameter, self).__init__() self.embedding = nn.Embedding(1000, 200) self.FC1 = nn.Linear(200, 200) self.FC2 = nn.Linear(200, 200) # tie weight in FC2 to FC1 self.FC2.weight = nn.Parameter(self.FC1.weight) self.FC2.bias = nn.Parameter(self.FC1.bias) self.relu = nn.ReLU() def forward(self, input): return self.FC2(self.ReLU(self.FC1(input))) + self.FC1(input) class TestAverageCheckpoints(unittest.TestCase): def test_average_checkpoints(self): params_0 = collections.OrderedDict( [ ('a', torch.DoubleTensor([100.0])), ('b', torch.FloatTensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])), ('c', torch.IntTensor([7, 8, 9])), ] ) params_1 = collections.OrderedDict( [ ('a', torch.DoubleTensor([1.0])), ('b', torch.FloatTensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])), ('c', torch.IntTensor([2, 2, 2])), ] ) params_avg = collections.OrderedDict( [ ('a', torch.DoubleTensor([50.5])), ('b', torch.FloatTensor([[1.0, 1.5, 2.0], [2.5, 3.0, 3.5]])), # We expect truncation for integer division ('c', torch.IntTensor([4, 5, 5])), ] ) fd_0, path_0 = tempfile.mkstemp() fd_1, path_1 = tempfile.mkstemp() torch.save(collections.OrderedDict([('model', params_0)]), path_0) torch.save(collections.OrderedDict([('model', params_1)]), path_1) output = average_checkpoints([path_0, path_1])['model'] os.close(fd_0) os.remove(path_0) os.close(fd_1) os.remove(path_1) for (k_expected, v_expected), (k_out, v_out) in zip( params_avg.items(), output.items()): self.assertEqual( k_expected, k_out, 'Key mismatch - expected {} but found {}. ' '(Expected list of keys: {} vs actual list of keys: {})'.format( k_expected, k_out, params_avg.keys(), output.keys() ) ) np.testing.assert_allclose( v_expected.numpy(), v_out.numpy(), err_msg='Tensor value mismatch for key {}'.format(k_expected) ) def test_average_checkpoints_with_shared_parameters(self): def _construct_model_with_shared_parameters(path, value): m = ModelWithSharedParameter() nn.init.constant_(m.FC1.weight, value) torch.save( {'model': m.state_dict()}, path ) return m tmpdir = tempfile.mkdtemp() paths = [] path = os.path.join(tmpdir, "m1.pt") m1 = _construct_model_with_shared_parameters(path, 1.0) paths.append(path) path = os.path.join(tmpdir, "m2.pt") m2 = _construct_model_with_shared_parameters(path, 2.0) paths.append(path) path = os.path.join(tmpdir, "m3.pt") m3 = _construct_model_with_shared_parameters(path, 3.0) paths.append(path) new_model = average_checkpoints(paths) self.assertTrue( torch.equal( new_model['model']['embedding.weight'], (m1.embedding.weight + m2.embedding.weight + m3.embedding.weight) / 3.0 ) ) self.assertTrue( torch.equal( new_model['model']['FC1.weight'], (m1.FC1.weight + m2.FC1.weight + m3.FC1.weight) / 3.0 ) ) self.assertTrue( torch.equal( new_model['model']['FC2.weight'], (m1.FC2.weight + m2.FC2.weight + m3.FC2.weight) / 3.0 ) ) shutil.rmtree(tmpdir) if __name__ == '__main__': unittest.main()
4,494
30.215278
80
py
RegularizedBN
RegularizedBN-main/tests/test_reproducibility.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib from io import StringIO import json import os import tempfile import unittest import torch from . import test_binaries class TestReproducibility(unittest.TestCase): def _test_reproducibility( self, name, extra_flags=None, delta=0.0001, resume_checkpoint='checkpoint1.pt', max_epoch=3, ): def get_last_log_stats_containing_string(log_records, search_string): for log_record in logs.records[::-1]: if search_string in log_record.msg: return json.loads(log_record.msg) if extra_flags is None: extra_flags = [] with tempfile.TemporaryDirectory(name) as data_dir: with self.assertLogs() as logs: test_binaries.create_dummy_data(data_dir) test_binaries.preprocess_translation_data(data_dir) # train epochs 1 and 2 together with self.assertLogs() as logs: test_binaries.train_translation_model( data_dir, 'fconv_iwslt_de_en', [ '--dropout', '0.0', '--log-format', 'json', '--log-interval', '1', '--max-epoch', str(max_epoch), ] + extra_flags, ) train_log = get_last_log_stats_containing_string(logs.records, 'train_loss') valid_log = get_last_log_stats_containing_string(logs.records, 'valid_loss') # train epoch 2, resuming from previous checkpoint 1 os.rename( os.path.join(data_dir, resume_checkpoint), os.path.join(data_dir, 'checkpoint_last.pt'), ) with self.assertLogs() as logs: test_binaries.train_translation_model( data_dir, 'fconv_iwslt_de_en', [ '--dropout', '0.0', '--log-format', 'json', '--log-interval', '1', '--max-epoch', str(max_epoch), ] + extra_flags, ) train_res_log = get_last_log_stats_containing_string(logs.records, 'train_loss') valid_res_log = get_last_log_stats_containing_string(logs.records, 'valid_loss') for k in ['train_loss', 'train_ppl', 'train_num_updates', 'train_gnorm']: self.assertAlmostEqual(float(train_log[k]), float(train_res_log[k]), delta=delta) for k in ['valid_loss', 'valid_ppl', 'valid_num_updates', 'valid_best_loss']: self.assertAlmostEqual(float(valid_log[k]), float(valid_res_log[k]), delta=delta) def test_reproducibility(self): self._test_reproducibility('test_reproducibility') @unittest.skipIf(not torch.cuda.is_available(), 'test requires a GPU') def test_reproducibility_fp16(self): self._test_reproducibility('test_reproducibility_fp16', [ '--fp16', '--fp16-init-scale', '4096', ], delta=0.011) @unittest.skipIf(not torch.cuda.is_available(), 'test requires a GPU') def test_reproducibility_memory_efficient_fp16(self): self._test_reproducibility('test_reproducibility_memory_efficient_fp16', [ '--memory-efficient-fp16', '--fp16-init-scale', '4096', ]) def test_mid_epoch_reproducibility(self): self._test_reproducibility( 'test_mid_epoch_reproducibility', ['--save-interval-updates', '3'], resume_checkpoint='checkpoint_1_3.pt', max_epoch=1, ) if __name__ == '__main__': unittest.main()
3,864
36.163462
97
py
RegularizedBN
RegularizedBN-main/tests/test_sequence_scorer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import unittest import torch from fairseq.sequence_scorer import SequenceScorer import tests.utils as test_utils class TestSequenceScorer(unittest.TestCase): def test_sequence_scorer(self): # construct dummy dictionary d = test_utils.dummy_dictionary(vocab_size=2) self.assertEqual(d.pad(), 1) self.assertEqual(d.eos(), 2) self.assertEqual(d.unk(), 3) eos = d.eos() w1 = 4 w2 = 5 # construct dataloader data = [ { 'source': torch.LongTensor([w1, w2, eos]), 'target': torch.LongTensor([w1, w2, w1, eos]), }, { 'source': torch.LongTensor([w2, eos]), 'target': torch.LongTensor([w2, w1, eos]), }, { 'source': torch.LongTensor([w2, eos]), 'target': torch.LongTensor([w2, eos]), }, ] data_itr = test_utils.dummy_dataloader(data) # specify expected output probabilities args = argparse.Namespace() unk = 0. args.beam_probs = [ # step 0: torch.FloatTensor([ # eos w1 w2 [0.0, unk, 0.6, 0.4], # sentence 1 [0.0, unk, 0.4, 0.6], # sentence 2 [0.0, unk, 0.7, 0.3], # sentence 3 ]), # step 1: torch.FloatTensor([ # eos w1 w2 [0.0, unk, 0.2, 0.7], # sentence 1 [0.0, unk, 0.8, 0.2], # sentence 2 [0.7, unk, 0.1, 0.2], # sentence 3 ]), # step 2: torch.FloatTensor([ # eos w1 w2 [0.10, unk, 0.50, 0.4], # sentence 1 [0.15, unk, 0.15, 0.7], # sentence 2 [0.00, unk, 0.00, 0.0], # sentence 3 ]), # step 3: torch.FloatTensor([ # eos w1 w2 [0.9, unk, 0.05, 0.05], # sentence 1 [0.0, unk, 0.00, 0.0], # sentence 2 [0.0, unk, 0.00, 0.0], # sentence 3 ]), ] expected_scores = [ [0.6, 0.7, 0.5, 0.9], # sentence 1 [0.6, 0.8, 0.15], # sentence 2 [0.3, 0.7], # sentence 3 ] task = test_utils.TestTranslationTask.setup_task(args, d, d) model = task.build_model(args) scorer = SequenceScorer(task.target_dictionary) for sample in data_itr: hypos = task.inference_step(scorer, [model], sample) for id, hypos_id in zip(sample['id'].tolist(), hypos): self.assertHypoTokens(hypos_id[0], data[id]['target']) self.assertHypoScore(hypos_id[0], expected_scores[id]) def assertHypoTokens(self, hypo, tokens): self.assertTensorEqual(hypo['tokens'], torch.LongTensor(tokens)) def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.): pos_scores = torch.FloatTensor(pos_probs).log() self.assertAlmostEqual(hypo['positional_scores'], pos_scores) self.assertEqual(pos_scores.numel(), hypo['tokens'].numel()) score = pos_scores.sum() if normalized: score /= pos_scores.numel()**lenpen self.assertLess(abs(score - hypo['score']), 1e-6) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) if __name__ == '__main__': unittest.main()
3,949
33.051724
75
py
RegularizedBN
RegularizedBN-main/tests/test_memory_efficient_fp16.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import logging import unittest import torch from fairseq.optim.adam import FairseqAdam from fairseq.optim.fp16_optimizer import MemoryEfficientFP16Optimizer @unittest.skipIf(not torch.cuda.is_available(), 'test requires a GPU') class TestMemoryEfficientFP16(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_load_state_dict(self): # define simple FP16 model model = torch.nn.Linear(5, 5).cuda().half() params = list(model.parameters()) # initialize memory efficient FP16 optimizer optimizer = FairseqAdam( argparse.Namespace( lr=[0.00001], adam_betas='(0.9, 0.999)', adam_eps=1e-8, weight_decay=0.0, ), params, ) me_optimizer = MemoryEfficientFP16Optimizer( argparse.Namespace( fp16_init_scale=1, fp16_scale_window=1, fp16_scale_tolerance=1, threshold_loss_scale=1, min_loss_scale=1e-4, ), params, optimizer, ) # optimizer state is created in the first step loss = model(torch.rand(5).cuda().half()).sum() me_optimizer.backward(loss) me_optimizer.step() # reload state state = me_optimizer.state_dict() me_optimizer.load_state_dict(state) for k, v in me_optimizer.optimizer.state.items(): self.assertTrue(k.dtype == torch.float16) for v_i in v.values(): if torch.is_tensor(v_i): self.assertTrue(v_i.dtype == torch.float32) if __name__ == '__main__': unittest.main()
2,002
28.028986
70
py
RegularizedBN
RegularizedBN-main/tests/test_lstm_jitable.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import tempfile import unittest import torch from fairseq.data.dictionary import Dictionary from fairseq.models.lstm import LSTMModel from fairseq.tasks.fairseq_task import FairseqTask DEFAULT_TEST_VOCAB_SIZE = 100 class DummyTask(FairseqTask): def __init__(self, args): super().__init__(args) self.dictionary = get_dummy_dictionary() if getattr(self.args, "ctc", False): self.dictionary.add_symbol("<ctc_blank>") self.src_dict = self.dictionary self.tgt_dict = self.dictionary @property def source_dictionary(self): return self.src_dict @property def target_dictionary(self): return self.dictionary def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE): dummy_dict = Dictionary() # add dummy symbol to satisfy vocab size for id, _ in enumerate(range(vocab_size)): dummy_dict.add_symbol("{}".format(id), 1000) return dummy_dict def get_dummy_task_and_parser(): """ to build a fariseq model, we need some dummy parse and task. This function is used to create dummy task and parser to faciliate model/criterion test Note: we use FbSpeechRecognitionTask as the dummy task. You may want to use other task by providing another function """ parser = argparse.ArgumentParser( description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS ) DummyTask.add_args(parser) args = parser.parse_args([]) task = DummyTask.setup_task(args) return task, parser class TestJitLSTMModel(unittest.TestCase): def _test_save_and_load(self, scripted_module): with tempfile.NamedTemporaryFile() as f: scripted_module.save(f.name) torch.jit.load(f.name) def assertTensorEqual(self, t1, t2): t1 = t1[~torch.isnan(t1)] # can cause size mismatch errors if there are NaNs t2 = t2[~torch.isnan(t2)] self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) def test_jit_and_export_lstm(self): task, parser = get_dummy_task_and_parser() LSTMModel.add_args(parser) args = parser.parse_args([]) args.criterion = "" model = LSTMModel.build_model(args, task) scripted_model = torch.jit.script(model) self._test_save_and_load(scripted_model) def test_assert_jit_vs_nonjit_(self): task, parser = get_dummy_task_and_parser() LSTMModel.add_args(parser) args = parser.parse_args([]) args.criterion = "" model = LSTMModel.build_model(args, task) model.eval() scripted_model = torch.jit.script(model) scripted_model.eval() idx = len(task.source_dictionary) iter = 100 # Inject random input and check output seq_len_tensor = torch.randint(1, 10, (iter, )) num_samples_tensor = torch.randint(1, 10, (iter, )) for i in range(iter): seq_len = seq_len_tensor[i] num_samples = num_samples_tensor[i] src_token = torch.randint(0, idx, (num_samples, seq_len)), src_lengths = torch.randint(1, seq_len+1, (num_samples,)) src_lengths, _ = torch.sort(src_lengths, descending=True) # Force the first sample to have seq_len src_lengths[0] = seq_len prev_output_token = torch.randint(0, idx, (num_samples, 1)), result = model(src_token[0], src_lengths, prev_output_token[0], None) scripted_result = scripted_model(src_token[0], src_lengths, prev_output_token[0], None) self.assertTensorEqual(result[0], scripted_result[0]) self.assertTensorEqual(result[1], scripted_result[1]) if __name__ == "__main__": unittest.main()
3,995
34.052632
99
py
RegularizedBN
RegularizedBN-main/tests/test_multihead_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import unittest from fairseq.modules.multihead_attention import MultiheadAttention class TestMultiheadAttention(unittest.TestCase): def test_append_prev_key_padding_mask(self): bsz = 1 src_len = 4 cases = [ # no padding mask (None, None, None), # current padding mask only ( torch.tensor([[1]]).bool(), None, torch.tensor([[0, 0, 0, 1]]).bool(), ), # previous padding mask only ( None, torch.tensor([[0, 1, 0]]).bool(), torch.tensor([[0, 1, 0, 0]]).bool(), ), # both padding masks ( torch.tensor([[1]]).bool(), torch.tensor([[0, 1, 0]]).bool(), torch.tensor([[0, 1, 0, 1]]).bool(), ), ] for c in cases: key_padding_mask = MultiheadAttention._append_prev_key_padding_mask( c[0], c[1], batch_size=bsz, src_len=src_len, static_kv=False, ) if key_padding_mask is not None: self.assertTrue( torch.all(torch.eq(key_padding_mask, c[2])), f'Unexpected resultant key padding mask: {key_padding_mask}' f' given current: {c[0]} and previous: {c[1]}', ) self.assertEqual(key_padding_mask.size(0), bsz) self.assertEqual(key_padding_mask.size(1), src_len) else: self.assertIsNone(c[2]) if __name__ == '__main__': unittest.main()
1,904
30.229508
80
py
RegularizedBN
RegularizedBN-main/tests/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import os import random import sys import torch import torch.nn.functional as F from io import StringIO from fairseq import options, utils from fairseq.data import Dictionary from fairseq.data.language_pair_dataset import collate from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, ) from fairseq.models.fairseq_encoder import EncoderOut from fairseq.tasks import FairseqTask from fairseq_cli import ( generate, interactive, preprocess, train, validate, ) def dummy_dictionary(vocab_size, prefix='token_'): d = Dictionary() for i in range(vocab_size): token = prefix + str(i) d.add_symbol(token) d.finalize(padding_factor=1) # don't add extra padding symbols return d def dummy_dataloader( samples, padding_idx=1, eos_idx=2, batch_size=None, ): if batch_size is None: batch_size = len(samples) # add any missing data to samples for i, sample in enumerate(samples): if 'id' not in sample: sample['id'] = i # create dataloader dataset = TestDataset(samples) dataloader = torch.utils.data.DataLoader( dataset, batch_size=batch_size, collate_fn=(lambda samples: collate(samples, padding_idx, eos_idx)), ) return iter(dataloader) def sequence_generator_setup(): # construct dummy dictionary d = dummy_dictionary(vocab_size=2) eos = d.eos() w1 = 4 w2 = 5 # construct source data src_tokens = torch.LongTensor([[w1, w2, eos], [w1, w2, eos]]) src_lengths = torch.LongTensor([2, 2]) args = argparse.Namespace() unk = 0. args.beam_probs = [ # step 0: torch.FloatTensor([ # eos w1 w2 # sentence 1: [0.0, unk, 0.9, 0.1], # beam 1 [0.0, unk, 0.9, 0.1], # beam 2 # sentence 2: [0.0, unk, 0.7, 0.3], [0.0, unk, 0.7, 0.3], ]), # step 1: torch.FloatTensor([ # eos w1 w2 prefix # sentence 1: [1.0, unk, 0.0, 0.0], # w1: 0.9 (emit: w1 <eos>: 0.9*1.0) [0.0, unk, 0.9, 0.1], # w2: 0.1 # sentence 2: [0.25, unk, 0.35, 0.4], # w1: 0.7 (don't emit: w1 <eos>: 0.7*0.25) [0.00, unk, 0.10, 0.9], # w2: 0.3 ]), # step 2: torch.FloatTensor([ # eos w1 w2 prefix # sentence 1: [0.0, unk, 0.1, 0.9], # w2 w1: 0.1*0.9 [0.6, unk, 0.2, 0.2], # w2 w2: 0.1*0.1 (emit: w2 w2 <eos>: 0.1*0.1*0.6) # sentence 2: [0.60, unk, 0.4, 0.00], # w1 w2: 0.7*0.4 (emit: w1 w2 <eos>: 0.7*0.4*0.6) [0.01, unk, 0.0, 0.99], # w2 w2: 0.3*0.9 ]), # step 3: torch.FloatTensor([ # eos w1 w2 prefix # sentence 1: [1.0, unk, 0.0, 0.0], # w2 w1 w2: 0.1*0.9*0.9 (emit: w2 w1 w2 <eos>: 0.1*0.9*0.9*1.0) [1.0, unk, 0.0, 0.0], # w2 w1 w1: 0.1*0.9*0.1 (emit: w2 w1 w1 <eos>: 0.1*0.9*0.1*1.0) # sentence 2: [0.1, unk, 0.5, 0.4], # w2 w2 w2: 0.3*0.9*0.99 (emit: w2 w2 w2 <eos>: 0.3*0.9*0.99*0.1) [1.0, unk, 0.0, 0.0], # w1 w2 w1: 0.7*0.4*0.4 (emit: w1 w2 w1 <eos>: 0.7*0.4*0.4*1.0) ]), ] task = TestTranslationTask.setup_task(args, d, d) model = task.build_model(args) tgt_dict = task.target_dictionary return tgt_dict, w1, w2, src_tokens, src_lengths, model def create_dummy_data(data_dir, num_examples=100, maxlen=20, alignment=False): def _create_dummy_data(filename): data = torch.rand(num_examples * maxlen) data = 97 + torch.floor(26 * data).int() with open(os.path.join(data_dir, filename), 'w') as h: offset = 0 for _ in range(num_examples): ex_len = random.randint(1, maxlen) ex_str = ' '.join(map(chr, data[offset:offset+ex_len])) print(ex_str, file=h) offset += ex_len def _create_dummy_alignment_data(filename_src, filename_tgt, filename): with open(os.path.join(data_dir, filename_src), 'r') as src_f, \ open(os.path.join(data_dir, filename_tgt), 'r') as tgt_f, \ open(os.path.join(data_dir, filename), 'w') as h: for src, tgt in zip(src_f, tgt_f): src_len = len(src.split()) tgt_len = len(tgt.split()) avg_len = (src_len + tgt_len) // 2 num_alignments = random.randint(avg_len // 2, 2 * avg_len) src_indices = torch.floor(torch.rand(num_alignments) * src_len).int() tgt_indices = torch.floor(torch.rand(num_alignments) * tgt_len).int() ex_str = ' '.join(["{}-{}".format(src, tgt) for src, tgt in zip(src_indices, tgt_indices)]) print(ex_str, file=h) _create_dummy_data('train.in') _create_dummy_data('train.out') _create_dummy_data('valid.in') _create_dummy_data('valid.out') _create_dummy_data('test.in') _create_dummy_data('test.out') if alignment: _create_dummy_alignment_data('train.in', 'train.out', 'train.align') _create_dummy_alignment_data('valid.in', 'valid.out', 'valid.align') _create_dummy_alignment_data('test.in', 'test.out', 'test.align') def preprocess_lm_data(data_dir): preprocess_parser = options.get_preprocessing_parser() preprocess_args = preprocess_parser.parse_args([ '--only-source', '--trainpref', os.path.join(data_dir, 'train.out'), '--validpref', os.path.join(data_dir, 'valid.out'), '--testpref', os.path.join(data_dir, 'test.out'), '--destdir', data_dir, ]) preprocess.main(preprocess_args) def preprocess_translation_data(data_dir, extra_flags=None): preprocess_parser = options.get_preprocessing_parser() preprocess_args = preprocess_parser.parse_args( [ '--source-lang', 'in', '--target-lang', 'out', '--trainpref', os.path.join(data_dir, 'train'), '--validpref', os.path.join(data_dir, 'valid'), '--testpref', os.path.join(data_dir, 'test'), '--thresholdtgt', '0', '--thresholdsrc', '0', '--destdir', data_dir, ] + (extra_flags or []), ) preprocess.main(preprocess_args) def train_translation_model(data_dir, arch, extra_flags=None, task='translation', run_validation=False, lang_flags=None, extra_valid_flags=None): if lang_flags is None: lang_flags = [ '--source-lang', 'in', '--target-lang', 'out', ] train_parser = options.get_training_parser() train_args = options.parse_args_and_arch( train_parser, [ '--task', task, data_dir, '--save-dir', data_dir, '--arch', arch, '--optimizer', 'nag', '--lr', '0.05', '--max-tokens', '500', '--max-epoch', '1', '--no-progress-bar', '--distributed-world-size', '1', '--num-workers', 0, ] + lang_flags + (extra_flags or []), ) train.main(train_args) if run_validation: # test validation validate_parser = options.get_validation_parser() validate_args = options.parse_args_and_arch( validate_parser, [ '--task', task, data_dir, '--path', os.path.join(data_dir, 'checkpoint_last.pt'), '--valid-subset', 'valid', '--max-tokens', '500', '--no-progress-bar', ] + lang_flags + (extra_valid_flags or []) ) validate.main(validate_args) def generate_main(data_dir, extra_flags=None): if extra_flags is None: extra_flags = [ '--print-alignment', ] generate_parser = options.get_generation_parser() generate_args = options.parse_args_and_arch( generate_parser, [ data_dir, '--path', os.path.join(data_dir, 'checkpoint_last.pt'), '--beam', '3', '--batch-size', '64', '--max-len-b', '5', '--gen-subset', 'valid', '--no-progress-bar', ] + (extra_flags or []), ) # evaluate model in batch mode generate.main(generate_args) # evaluate model interactively generate_args.buffer_size = 0 generate_args.input = '-' generate_args.max_sentences = None orig_stdin = sys.stdin sys.stdin = StringIO('h e l l o\n') interactive.main(generate_args) sys.stdin = orig_stdin class TestDataset(torch.utils.data.Dataset): def __init__(self, data): super().__init__() self.data = data self.sizes = None def __getitem__(self, index): return self.data[index] def __len__(self): return len(self.data) class TestTranslationTask(FairseqTask): def __init__(self, args, src_dict, tgt_dict, model): super().__init__(args) self.src_dict = src_dict self.tgt_dict = tgt_dict self.model = model @classmethod def setup_task(cls, args, src_dict=None, tgt_dict=None, model=None): return cls(args, src_dict, tgt_dict, model) def build_model(self, args): return TestModel.build_model(args, self) @property def source_dictionary(self): return self.src_dict @property def target_dictionary(self): return self.tgt_dict class TestModel(FairseqEncoderDecoderModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @classmethod def build_model(cls, args, task): encoder = TestEncoder(args, task.source_dictionary) decoder = TestIncrementalDecoder(args, task.target_dictionary) return cls(encoder, decoder) class TestEncoder(FairseqEncoder): def __init__(self, args, dictionary): super().__init__(dictionary) self.args = args def forward(self, src_tokens, src_lengths=None, **kwargs): return EncoderOut( encoder_out=src_tokens, encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) def reorder_encoder_out(self, encoder_out, new_order): return EncoderOut( encoder_out=encoder_out.encoder_out.index_select(0, new_order), encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) class TestIncrementalDecoder(FairseqIncrementalDecoder): def __init__(self, args, dictionary): super().__init__(dictionary) assert hasattr(args, 'beam_probs') or hasattr(args, 'probs') args.max_decoder_positions = getattr(args, 'max_decoder_positions', 100) self.args = args def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None): if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] bbsz = prev_output_tokens.size(0) vocab = len(self.dictionary) src_len = encoder_out.encoder_out.size(1) tgt_len = prev_output_tokens.size(1) # determine number of steps if incremental_state is not None: # cache step number step = utils.get_incremental_state(self, incremental_state, 'step') if step is None: step = 0 utils.set_incremental_state(self, incremental_state, 'step', step + 1) steps = [step] else: steps = list(range(tgt_len)) # define output in terms of raw probs if hasattr(self.args, 'probs'): assert self.args.probs.dim() == 3, \ 'expected probs to have size bsz*steps*vocab' probs = self.args.probs.index_select(1, torch.LongTensor(steps)) else: probs = torch.FloatTensor(bbsz, len(steps), vocab).zero_() for i, step in enumerate(steps): # args.beam_probs gives the probability for every vocab element, # starting with eos, then unknown, and then the rest of the vocab if step < len(self.args.beam_probs): probs[:, i, self.dictionary.eos():] = self.args.beam_probs[step] else: probs[:, i, self.dictionary.eos()] = 1.0 # random attention attn = torch.rand(bbsz, tgt_len, src_len) dev = prev_output_tokens.device return probs.to(dev), {"attn": [attn.to(dev)]} def get_normalized_probs(self, net_output, log_probs, _): # the decoder returns probabilities directly probs = net_output[0] if log_probs: return probs.log() else: return probs def max_positions(self): return self.args.max_decoder_positions class TestReshapingEncoder(FairseqEncoder): def __init__(self, args, dictionary): super().__init__(dictionary) self.args = args def forward(self, src_tokens, src_lengths=None, **kwargs): b_sz, t_sz = src_tokens.shape padding_needed = t_sz % 2 x = src_tokens if padding_needed > 0: padding_needed = 2 - padding_needed x = F.pad(x, (0, padding_needed)) return EncoderOut( encoder_out=x.view(b_sz, -1, 2), encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) def reorder_encoder_out(self, encoder_out, new_order): return EncoderOut( encoder_out=encoder_out.encoder_out.index_select(0, new_order), encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) class TestReshapingModel(FairseqEncoderDecoderModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @classmethod def build_model(cls, args, task): encoder = TestReshapingEncoder(args, task.source_dictionary) decoder = TestIncrementalDecoder(args, task.target_dictionary) return cls(encoder, decoder) class TestAdditionalInputEncoder(FairseqEncoder): def __init__(self, args, dictionary): super().__init__(dictionary) self.args = args def forward(self, src_tokens, src_lengths=None, **kwargs): assert 'fancy_other_input' in kwargs assert kwargs['fancy_other_input'] is not None return EncoderOut( encoder_out=src_tokens, encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) def reorder_encoder_out(self, encoder_out, new_order): return EncoderOut( encoder_out=encoder_out.encoder_out.index_select(0, new_order), encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) class TestAdditionalInputModel(FairseqEncoderDecoderModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @classmethod def build_model(cls, args, task): encoder = TestAdditionalInputEncoder(args, task.source_dictionary) decoder = TestIncrementalDecoder(args, task.target_dictionary) return cls(encoder, decoder) def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs): encoder_out = self.encoder( src_tokens, src_lengths=src_lengths, **kwargs) decoder_out = self.decoder( prev_output_tokens, encoder_out=encoder_out, **kwargs) return decoder_out
16,382
32.09697
107
py
RegularizedBN
RegularizedBN-main/tests/test_binaries.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib from io import StringIO import logging import os import random import tempfile import unittest import torch from fairseq import options from fairseq_cli import train from fairseq_cli import eval_lm from fairseq_cli import validate from tests.utils import ( create_dummy_data, preprocess_lm_data, preprocess_translation_data, train_translation_model, generate_main, ) class TestTranslation(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_fconv(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_fconv') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'fconv_iwslt_de_en') generate_main(data_dir) def test_raw(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_fconv_raw') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ['--dataset-impl', 'raw']) train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--dataset-impl', 'raw']) generate_main(data_dir, ['--dataset-impl', 'raw']) def test_update_freq(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_update_freq') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--update-freq', '3']) generate_main(data_dir) def test_max_positions(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_max_positions') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) with self.assertRaises(Exception) as context: train_translation_model( data_dir, 'fconv_iwslt_de_en', ['--max-target-positions', '5'], ) self.assertTrue( 'skip this example with --skip-invalid-size-inputs-valid-test' in str(context.exception) ) train_translation_model( data_dir, 'fconv_iwslt_de_en', ['--max-target-positions', '5', '--skip-invalid-size-inputs-valid-test'], ) with self.assertRaises(Exception) as context: generate_main(data_dir) generate_main(data_dir, ['--skip-invalid-size-inputs-valid-test']) def test_generation(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_sampling') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'fconv_iwslt_de_en') generate_main(data_dir, [ '--sampling', '--temperature', '2', '--beam', '2', '--nbest', '2', ]) generate_main(data_dir, [ '--sampling', '--sampling-topk', '3', '--beam', '2', '--nbest', '2', ]) generate_main(data_dir, [ '--sampling', '--sampling-topp', '0.2', '--beam', '2', '--nbest', '2', ]) generate_main(data_dir, [ '--diversity-rate', '0.5', '--beam', '6', ]) with self.assertRaises(ValueError): generate_main(data_dir, [ '--diverse-beam-groups', '4', '--match-source-len', ]) generate_main(data_dir, ['--prefix-size', '2']) generate_main(data_dir, ['--retain-dropout']) def test_eval_bleu(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_eval_bleu') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'fconv_iwslt_de_en', [ '--eval-bleu', '--eval-bleu-print-samples', '--eval-bleu-remove-bpe', '--eval-bleu-detok', 'space', '--eval-bleu-args', '{"beam": 4, "min_len": 10}', ]) def test_lstm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_lstm') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'lstm_wiseman_iwslt_de_en', [ '--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--decoder-out-embed-dim', '8', ]) generate_main(data_dir) def test_lstm_bidirectional(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_lstm_bidirectional') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'lstm', [ '--encoder-layers', '2', '--encoder-bidirectional', '--encoder-hidden-size', '16', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--decoder-out-embed-dim', '8', '--decoder-layers', '2', ]) generate_main(data_dir) def test_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_transformer') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'transformer_iwslt_de_en', [ '--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', ], run_validation=True) generate_main(data_dir) def test_multilingual_transformer(self): # test with all combinations of encoder/decoder lang tokens encoder_langtok_flags = [[], ['--encoder-langtok', 'src'], ['--encoder-langtok', 'tgt']] decoder_langtok_flags = [[], ['--decoder-langtok']] with contextlib.redirect_stdout(StringIO()): for i in range(len(encoder_langtok_flags)): for j in range(len(decoder_langtok_flags)): enc_ltok_flag = encoder_langtok_flags[i] dec_ltok_flag = decoder_langtok_flags[j] with tempfile.TemporaryDirectory(f'test_multilingual_transformer_{i}_{j}') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, arch='multilingual_transformer', task='multilingual_translation', extra_flags=[ '--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', ] + enc_ltok_flag + dec_ltok_flag, lang_flags=['--lang-pairs', 'in-out,out-in'], run_validation=True, extra_valid_flags=enc_ltok_flag + dec_ltok_flag, ) generate_main( data_dir, extra_flags=[ '--task', 'multilingual_translation', '--lang-pairs', 'in-out,out-in', '--source-lang', 'in', '--target-lang', 'out', ] + enc_ltok_flag + dec_ltok_flag, ) def test_translation_multi_simple_epoch(self): # test with all combinations of encoder/decoder lang tokens encoder_langtok_flags = [[], ['--encoder-langtok', 'src'], ['--encoder-langtok', 'tgt']] decoder_langtok_flags = [[], ['--decoder-langtok']] with contextlib.redirect_stdout(StringIO()): for i in range(len(encoder_langtok_flags)): for j in range(len(decoder_langtok_flags)): enc_ltok_flag = encoder_langtok_flags[i] dec_ltok_flag = decoder_langtok_flags[j] with tempfile.TemporaryDirectory(f'test_translation_multi_simple_epoch_{i}_{j}') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, arch='transformer', task='translation_multi_simple_epoch', extra_flags=[ '--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--sampling-method', 'temperature', '--sampling-temperature', '1.5', '--virtual-epoch-size', '1000', ] + enc_ltok_flag + dec_ltok_flag, lang_flags=['--lang-pairs', 'in-out,out-in'], run_validation=True, extra_valid_flags=enc_ltok_flag + dec_ltok_flag, ) generate_main( data_dir, extra_flags=[ '--task', 'translation_multi_simple_epoch', '--lang-pairs', 'in-out,out-in', '--source-lang', 'in', '--target-lang', 'out', ] + enc_ltok_flag + dec_ltok_flag, ) def test_transformer_cross_self_attention(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_transformer_cross_self_attention') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'transformer_iwslt_de_en', [ '--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--decoder-embed-dim', '8', '--no-cross-attention', '--cross-self-attention', ], run_validation=True) generate_main(data_dir, extra_flags=[]) def test_lightconv(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_lightconv') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'lightconv_iwslt_de_en', [ '--encoder-conv-type', 'lightweight', '--decoder-conv-type', 'lightweight', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', ]) generate_main(data_dir) def test_dynamicconv(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_dynamicconv') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'lightconv_iwslt_de_en', [ '--encoder-conv-type', 'dynamic', '--decoder-conv-type', 'dynamic', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', ]) generate_main(data_dir) def test_cmlm_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_cmlm_transformer') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ['--joined-dictionary']) train_translation_model(data_dir, 'cmlm_transformer', [ '--apply-bert-init', '--criterion', 'nat_loss', '--noise', 'full_mask', '--pred-length-offset', '--length-loss-factor', '0.1' ], task='translation_lev') generate_main(data_dir, [ '--task', 'translation_lev', '--iter-decode-max-iter', '9', '--iter-decode-eos-penalty', '0', '--print-step', ]) def test_nonautoregressive_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_nonautoregressive_transformer') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ['--joined-dictionary']) train_translation_model(data_dir, 'nonautoregressive_transformer', [ '--apply-bert-init', '--src-embedding-copy', '--criterion', 'nat_loss', '--noise', 'full_mask', '--pred-length-offset', '--length-loss-factor', '0.1' ], task='translation_lev') generate_main(data_dir, [ '--task', 'translation_lev', '--iter-decode-max-iter', '0', '--iter-decode-eos-penalty', '0', '--print-step', ]) # def test_nat_crf_transformer(self): # with contextlib.redirect_stdout(StringIO()): # with tempfile.TemporaryDirectory('test_nat_crf_transformer') as data_dir: # create_dummy_data(data_dir) # preprocess_translation_data(data_dir, ['--joined-dictionary']) # train_translation_model(data_dir, 'nacrf_transformer', [ # '--apply-bert-init', '--criterion', # 'nat_loss', '--noise', 'full_mask', '--pred-length-offset', # '--length-loss-factor', '0.1', # '--word-ins-loss-factor', '0.5', # '--crf-lowrank-approx', '1', # '--crf-beam-approx', '1' # ], task='translation_lev') # generate_main(data_dir, [ # '--task', 'translation_lev', # '--iter-decode-max-iter', '0', # '--iter-decode-eos-penalty', '0', # '--print-step', # ]) def test_iterative_nonautoregressive_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_iterative_nonautoregressive_transformer') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ['--joined-dictionary']) train_translation_model(data_dir, 'iterative_nonautoregressive_transformer', [ '--apply-bert-init', '--src-embedding-copy', '--criterion', 'nat_loss', '--noise', 'full_mask', '--stochastic-approx', '--dae-ratio', '0.5', '--train-step', '3' ], task='translation_lev') generate_main(data_dir, [ '--task', 'translation_lev', '--iter-decode-max-iter', '9', '--iter-decode-eos-penalty', '0', '--print-step', ]) def test_insertion_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_insertion_transformer') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ['--joined-dictionary']) train_translation_model(data_dir, 'insertion_transformer', [ '--apply-bert-init', '--criterion', 'nat_loss', '--noise', 'random_mask' ], task='translation_lev') generate_main(data_dir, [ '--task', 'translation_lev', '--iter-decode-max-iter', '9', '--iter-decode-eos-penalty', '0', '--print-step', ]) def test_mixture_of_experts(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_moe') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'transformer_iwslt_de_en', [ '--task', 'translation_moe', '--user-dir', 'examples/translation_moe/src', '--method', 'hMoElp', '--mean-pool-gating-network', '--num-experts', '3', '--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', ]) generate_main(data_dir, [ '--task', 'translation_moe', '--user-dir', 'examples/translation_moe/src', '--method', 'hMoElp', '--mean-pool-gating-network', '--num-experts', '3', '--gen-expert', '0' ]) def test_alignment(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_alignment') as data_dir: create_dummy_data(data_dir, alignment=True) preprocess_translation_data(data_dir, ['--align-suffix', 'align']) train_translation_model( data_dir, 'transformer_align', [ '--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--load-alignments', '--alignment-layer', '1', '--criterion', 'label_smoothed_cross_entropy_with_alignment' ], run_validation=True, ) generate_main(data_dir) class TestStories(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_fconv_self_att_wp(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_fconv_self_att_wp') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) config = [ '--encoder-layers', '[(128, 3)] * 2', '--decoder-layers', '[(128, 3)] * 2', '--decoder-attention', 'True', '--encoder-attention', 'False', '--gated-attention', 'True', '--self-attention', 'True', '--project-input', 'True', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--decoder-out-embed-dim', '8', '--multihead-self-attention-nheads', '2' ] train_translation_model(data_dir, 'fconv_self_att_wp', config) generate_main(data_dir) # fusion model os.rename(os.path.join(data_dir, 'checkpoint_last.pt'), os.path.join(data_dir, 'pretrained.pt')) config.extend([ '--pretrained', 'True', '--pretrained-checkpoint', os.path.join(data_dir, 'pretrained.pt'), '--save-dir', os.path.join(data_dir, 'fusion_model'), ]) train_translation_model(data_dir, 'fconv_self_att_wp', config) class TestLanguageModeling(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_fconv_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_fconv_lm') as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model(data_dir, 'fconv_lm', [ '--decoder-layers', '[(850, 3)] * 2 + [(1024,4)]', '--decoder-embed-dim', '280', '--optimizer', 'nag', '--lr', '0.1', ]) eval_lm_main(data_dir) generate_main(data_dir, [ '--task', 'language_modeling', '--sample-break-mode', 'eos', '--tokens-per-sample', '500', ]) def test_transformer_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_transformer_lm') as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, 'transformer_lm', ['--add-bos-token'], run_validation=True, ) eval_lm_main(data_dir) generate_main(data_dir, [ '--task', 'language_modeling', '--sample-break-mode', 'eos', '--tokens-per-sample', '500', ]) def test_lightconv_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_lightconv_lm') as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, 'lightconv_lm', ['--add-bos-token'], run_validation=True, ) eval_lm_main(data_dir) generate_main(data_dir, [ '--task', 'language_modeling', '--sample-break-mode', 'eos', '--tokens-per-sample', '500', ]) def test_lstm_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_lstm_lm') as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, 'lstm_lm', ['--add-bos-token'], run_validation=True, ) eval_lm_main(data_dir) generate_main(data_dir, [ '--task', 'language_modeling', '--sample-break-mode', 'eos', '--tokens-per-sample', '500', ]) def test_lstm_lm_residuals(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_lstm_lm_residuals') as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, 'lstm_lm', ['--add-bos-token', '--residuals'], run_validation=True, ) eval_lm_main(data_dir) generate_main(data_dir, [ '--task', 'language_modeling', '--sample-break-mode', 'eos', '--tokens-per-sample', '500', ]) class TestMaskedLanguageModel(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_legacy_masked_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_legacy_mlm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_legacy_masked_language_model(data_dir, "masked_lm") def test_roberta_masked_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_roberta_mlm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_masked_lm(data_dir, "roberta_base") def test_roberta_sentence_prediction(self): num_classes = 3 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_roberta_head") as data_dir: create_dummy_roberta_head_data(data_dir, num_classes=num_classes) preprocess_lm_data(os.path.join(data_dir, 'input0')) preprocess_lm_data(os.path.join(data_dir, 'label')) train_roberta_head(data_dir, "roberta_base", num_classes=num_classes) def test_roberta_regression_single(self): num_classes = 1 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_roberta_regression_single") as data_dir: create_dummy_roberta_head_data(data_dir, num_classes=num_classes, regression=True) preprocess_lm_data(os.path.join(data_dir, 'input0')) train_roberta_head(data_dir, "roberta_base", num_classes=num_classes, extra_flags=['--regression-target']) def test_roberta_regression_multiple(self): num_classes = 3 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_roberta_regression_multiple") as data_dir: create_dummy_roberta_head_data(data_dir, num_classes=num_classes, regression=True) preprocess_lm_data(os.path.join(data_dir, 'input0')) train_roberta_head(data_dir, "roberta_base", num_classes=num_classes, extra_flags=['--regression-target']) def _test_pretrained_masked_lm_for_translation(self, learned_pos_emb, encoder_only): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_mlm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_legacy_masked_language_model( data_dir, arch="masked_lm", extra_args=('--encoder-learned-pos',) if learned_pos_emb else () ) with tempfile.TemporaryDirectory( "test_mlm_translation" ) as translation_dir: create_dummy_data(translation_dir) preprocess_translation_data( translation_dir, extra_flags=["--joined-dictionary"] ) # Train transformer with data_dir/checkpoint_last.pt train_translation_model( translation_dir, arch="transformer_from_pretrained_xlm", extra_flags=[ "--decoder-layers", "1", "--decoder-embed-dim", "32", "--decoder-attention-heads", "1", "--decoder-ffn-embed-dim", "32", "--encoder-layers", "1", "--encoder-embed-dim", "32", "--encoder-attention-heads", "1", "--encoder-ffn-embed-dim", "32", "--pretrained-xlm-checkpoint", "{}/checkpoint_last.pt".format(data_dir), "--activation-fn", "gelu", "--max-source-positions", "500", "--max-target-positions", "500", ] + ( ["--encoder-learned-pos", "--decoder-learned-pos"] if learned_pos_emb else [] ) + (['--init-encoder-only'] if encoder_only else []), task="translation_from_pretrained_xlm", ) def test_pretrained_masked_lm_for_translation_learned_pos_emb(self): self._test_pretrained_masked_lm_for_translation(True, False) def test_pretrained_masked_lm_for_translation_sinusoidal_pos_emb(self): self._test_pretrained_masked_lm_for_translation(False, False) def test_pretrained_masked_lm_for_translation_encoder_only(self): self._test_pretrained_masked_lm_for_translation(True, True) def train_legacy_masked_language_model(data_dir, arch, extra_args=()): train_parser = options.get_training_parser() # TODO: langs should be in and out right? train_args = options.parse_args_and_arch( train_parser, [ "--task", "cross_lingual_lm", data_dir, "--arch", arch, # Optimizer args "--optimizer", "adam", "--lr-scheduler", "reduce_lr_on_plateau", "--lr-shrink", "0.5", "--lr", "0.0001", "--min-lr", "1e-09", # dropout, attention args "--dropout", "0.1", "--attention-dropout", "0.1", # MLM args "--criterion", "legacy_masked_lm_loss", "--masked-lm-only", "--monolingual-langs", "in,out", "--num-segment", "5", # Transformer args: use a small transformer model for fast training "--encoder-layers", "1", "--encoder-embed-dim", "32", "--encoder-attention-heads", "1", "--encoder-ffn-embed-dim", "32", # Other training args "--max-tokens", "500", "--tokens-per-sample", "500", "--save-dir", data_dir, "--max-epoch", "1", "--no-progress-bar", "--distributed-world-size", "1", "--dataset-impl", "raw", ] + list(extra_args), ) train.main(train_args) class TestOptimizers(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_optimizers(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_optimizers') as data_dir: # Use just a bit of data and tiny model to keep this test runtime reasonable create_dummy_data(data_dir, num_examples=10, maxlen=5) preprocess_translation_data(data_dir) optimizers = ['adafactor', 'adam', 'nag', 'adagrad', 'sgd', 'adadelta'] last_checkpoint = os.path.join(data_dir, 'checkpoint_last.pt') for optimizer in optimizers: if os.path.exists(last_checkpoint): os.remove(last_checkpoint) train_translation_model(data_dir, 'lstm', [ '--required-batch-size-multiple', '1', '--encoder-layers', '1', '--encoder-hidden-size', '32', '--decoder-layers', '1', '--optimizer', optimizer, ]) generate_main(data_dir) def create_dummy_roberta_head_data(data_dir, num_examples=100, maxlen=10, num_classes=2, regression=False): input_dir = 'input0' def _create_dummy_data(filename): random_data = torch.rand(num_examples * maxlen) input_data = 97 + torch.floor(26 * random_data).int() if regression: output_data = torch.rand((num_examples, num_classes)) else: output_data = 1 + torch.floor(num_classes * torch.rand(num_examples)).int() with open(os.path.join(data_dir, input_dir, filename+'.out'), 'w') as f_in: label_filename = filename+'.label' if regression else filename+'.out' with open(os.path.join(data_dir, 'label', label_filename), 'w') as f_out: offset = 0 for i in range(num_examples): # write example input ex_len = random.randint(1, maxlen) ex_str = ' '.join(map(chr, input_data[offset:offset+ex_len])) print(ex_str, file=f_in) # write example label if regression: class_str = ' '.join(map(str, output_data[i].numpy())) print(class_str, file=f_out) else: class_str = 'class{}'.format(output_data[i]) print(class_str, file=f_out) offset += ex_len os.mkdir(os.path.join(data_dir, input_dir)) os.mkdir(os.path.join(data_dir, 'label')) _create_dummy_data('train') _create_dummy_data('valid') _create_dummy_data('test') def train_masked_lm(data_dir, arch, extra_flags=None): train_parser = options.get_training_parser() train_args = options.parse_args_and_arch( train_parser, [ '--task', 'masked_lm', data_dir, '--arch', arch, '--optimizer', 'adam', '--lr', '0.0001', '--criterion', 'masked_lm', '--max-sentences', '500', '--save-dir', data_dir, '--max-epoch', '1', '--no-progress-bar', '--distributed-world-size', '1', '--ddp-backend', 'no_c10d', '--num-workers', 0, ] + (extra_flags or []), ) train.main(train_args) def train_roberta_head(data_dir, arch, num_classes=2, extra_flags=None): train_parser = options.get_training_parser() train_args = options.parse_args_and_arch( train_parser, [ '--task', 'sentence_prediction', data_dir, '--arch', arch, '--num-classes', str(num_classes), '--optimizer', 'adam', '--lr', '0.0001', '--criterion', 'sentence_prediction', '--max-tokens', '500', '--max-positions', '500', '--max-sentences', '500', '--save-dir', data_dir, '--max-epoch', '1', '--no-progress-bar', '--distributed-world-size', '1', '--ddp-backend', 'no_c10d', '--num-workers', 0, ] + (extra_flags or []), ) train.main(train_args) def train_language_model(data_dir, arch, extra_flags=None, run_validation=False): train_parser = options.get_training_parser() train_args = options.parse_args_and_arch( train_parser, [ '--task', 'language_modeling', data_dir, '--arch', arch, '--optimizer', 'adam', '--lr', '0.0001', '--criterion', 'adaptive_loss', '--adaptive-softmax-cutoff', '5,10,15', '--max-tokens', '500', '--tokens-per-sample', '500', '--save-dir', data_dir, '--max-epoch', '1', '--no-progress-bar', '--distributed-world-size', '1', '--ddp-backend', 'no_c10d', ] + (extra_flags or []), ) train.main(train_args) if run_validation: # test validation validate_parser = options.get_validation_parser() validate_args = options.parse_args_and_arch( validate_parser, [ '--task', 'language_modeling', data_dir, '--path', os.path.join(data_dir, 'checkpoint_last.pt'), '--valid-subset', 'valid', '--max-tokens', '500', '--no-progress-bar', ] ) validate.main(validate_args) def eval_lm_main(data_dir): eval_lm_parser = options.get_eval_lm_parser() eval_lm_args = options.parse_args_and_arch( eval_lm_parser, [ data_dir, '--path', os.path.join(data_dir, 'checkpoint_last.pt'), '--no-progress-bar', ], ) eval_lm.main(eval_lm_args) def train_masked_language_model(data_dir, arch, extra_args=()): train_parser = options.get_training_parser() # TODO: langs should be in and out right? train_args = options.parse_args_and_arch( train_parser, [ "--task", "cross_lingual_lm", data_dir, "--arch", arch, # Optimizer args "--optimizer", "adam", "--lr-scheduler", "reduce_lr_on_plateau", "--lr-shrink", "0.5", "--lr", "0.0001", "--min-lr", "1e-09", # dropout, attention args "--dropout", "0.1", "--attention-dropout", "0.1", # MLM args "--criterion", "masked_lm_loss", "--masked-lm-only", "--monolingual-langs", "in,out", "--num-segment", "5", # Transformer args: use a small transformer model for fast training "--encoder-layers", "1", "--encoder-embed-dim", "32", "--encoder-attention-heads", "1", "--encoder-ffn-embed-dim", "32", # Other training args "--max-tokens", "500", "--tokens-per-sample", "500", "--save-dir", data_dir, "--max-epoch", "1", "--no-progress-bar", "--distributed-world-size", "1", "--dataset-impl", "raw", ] + list(extra_args), ) train.main(train_args) if __name__ == '__main__': unittest.main()
40,028
40.959119
122
py
RegularizedBN
RegularizedBN-main/tests/test_concat_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from fairseq.data import LanguagePairDataset, TokenBlockDataset from fairseq.data.concat_dataset import ConcatDataset from tests.test_train import mock_dict class TestConcatDataset(unittest.TestCase): def setUp(self): d = mock_dict() tokens_1 = torch.LongTensor([1]).view(1, -1) tokens_ds1 = TokenBlockDataset( tokens_1, sizes=[tokens_1.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) self.dataset_1 = LanguagePairDataset( tokens_ds1, tokens_ds1.sizes, d, shuffle=False ) tokens_2 = torch.LongTensor([2]).view(1, -1) tokens_ds2 = TokenBlockDataset( tokens_2, sizes=[tokens_2.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) self.dataset_2 = LanguagePairDataset( tokens_ds2, tokens_ds2.sizes, d, shuffle=False ) def test_concat_dataset_basics(self): d = ConcatDataset( [self.dataset_1, self.dataset_2] ) assert(len(d) == 2) assert(d[0]['source'][0] == 1) assert(d[1]['source'][0] == 2) d = ConcatDataset( [self.dataset_1, self.dataset_2], sample_ratios=[1, 2] ) assert(len(d) == 3) assert(d[0]['source'][0] == 1) assert(d[1]['source'][0] == 2) assert(d[2]['source'][0] == 2) d = ConcatDataset( [self.dataset_1, self.dataset_2], sample_ratios=[2, 1] ) assert(len(d) == 3) assert(d[0]['source'][0] == 1) assert(d[1]['source'][0] == 1) assert(d[2]['source'][0] == 2)
1,943
28.907692
66
py
RegularizedBN
RegularizedBN-main/tests/test_noising.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from typing import Dict, List import tests.utils as test_utils import torch from fairseq import utils from fairseq.data import ( Dictionary, LanguagePairDataset, TransformEosDataset, data_utils, noising, ) class TestDataNoising(unittest.TestCase): def _get_test_data_with_bpe_cont_marker(self, append_eos=True): """ Args: append_eos: if True, each input sentence in the source tokens tensor will have an EOS appended to the end. Returns: vocabs: BPE vocab with continuation markers as suffixes to denote non-end of word tokens. This is the standard BPE format used in fairseq's preprocessing. x: input tensor containing numberized source tokens, with EOS at the end if append_eos is true src_lengths: and source lengths. """ vocab = Dictionary() vocab.add_symbol("he@@") vocab.add_symbol("llo") vocab.add_symbol("how") vocab.add_symbol("are") vocab.add_symbol("y@@") vocab.add_symbol("ou") vocab.add_symbol("n@@") vocab.add_symbol("ew") vocab.add_symbol("or@@") vocab.add_symbol("k") src_tokens = [ ["he@@", "llo", "n@@", "ew", "y@@", "or@@", "k"], ["how", "are", "y@@", "ou"], ] x, src_lengths = x, src_lengths = self._convert_src_tokens_to_tensor( vocab=vocab, src_tokens=src_tokens, append_eos=append_eos ) return vocab, x, src_lengths def _get_test_data_with_bpe_end_marker(self, append_eos=True): """ Args: append_eos: if True, each input sentence in the source tokens tensor will have an EOS appended to the end. Returns: vocabs: BPE vocab with end-of-word markers as suffixes to denote tokens at the end of a word. This is an alternative to fairseq's standard preprocessing framework and is not generally supported within fairseq. x: input tensor containing numberized source tokens, with EOS at the end if append_eos is true src_lengths: and source lengths. """ vocab = Dictionary() vocab.add_symbol("he") vocab.add_symbol("llo_EOW") vocab.add_symbol("how_EOW") vocab.add_symbol("are_EOW") vocab.add_symbol("y") vocab.add_symbol("ou_EOW") vocab.add_symbol("n") vocab.add_symbol("ew_EOW") vocab.add_symbol("or") vocab.add_symbol("k_EOW") src_tokens = [ ["he", "llo_EOW", "n", "ew_EOW", "y", "or", "k_EOW"], ["how_EOW", "are_EOW", "y", "ou_EOW"], ] x, src_lengths = x, src_lengths = self._convert_src_tokens_to_tensor( vocab=vocab, src_tokens=src_tokens, append_eos=append_eos ) return vocab, x, src_lengths def _get_test_data_with_word_vocab(self, append_eos=True): """ Args: append_eos: if True, each input sentence in the source tokens tensor will have an EOS appended to the end. Returns: vocabs: word vocab x: input tensor containing numberized source tokens, with EOS at the end if append_eos is true src_lengths: and source lengths. """ vocab = Dictionary() vocab.add_symbol("hello") vocab.add_symbol("how") vocab.add_symbol("are") vocab.add_symbol("you") vocab.add_symbol("new") vocab.add_symbol("york") src_tokens = [ ["hello", "new", "york", "you"], ["how", "are", "you", "new", "york"], ] x, src_lengths = self._convert_src_tokens_to_tensor( vocab=vocab, src_tokens=src_tokens, append_eos=append_eos ) return vocab, x, src_lengths def _convert_src_tokens_to_tensor( self, vocab: Dictionary, src_tokens: List[List[str]], append_eos: bool ): src_len = [len(x) for x in src_tokens] # If we have to append EOS, we include EOS in counting src length if append_eos: src_len = [length + 1 for length in src_len] x = torch.LongTensor(len(src_tokens), max(src_len)).fill_(vocab.pad()) for i in range(len(src_tokens)): for j in range(len(src_tokens[i])): x[i][j] = vocab.index(src_tokens[i][j]) if append_eos: x[i][j + 1] = vocab.eos() x = x.transpose(1, 0) return x, torch.LongTensor(src_len) def assert_eos_at_end(self, x, x_len, eos): """Asserts last token of every sentence in x is EOS """ for i in range(len(x_len)): self.assertEqual( x[x_len[i] - 1][i], eos, ( "Expected eos (token id {eos}) at the end of sentence {i} " "but got {other} instead" ).format(i=i, eos=eos, other=x[i][-1]), ) def assert_word_dropout_correct(self, x, x_noised, x_len, l_noised): # Expect only the first word (2 bpe tokens) of the first example # was dropped out self.assertEqual(x_len[0] - 2, l_noised[0]) for i in range(l_noised[0]): self.assertEqual(x_noised[i][0], x[i + 2][0]) def test_word_dropout_with_eos(self): vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True) with data_utils.numpy_seed(1234): noising_gen = noising.WordDropout(vocab) x_noised, l_noised = noising_gen.noising(x, x_len, 0.2) self.assert_word_dropout_correct( x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised ) self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos()) def assert_word_blanking_correct(self, x, x_noised, x_len, l_noised, unk): # Expect only the first word (2 bpe tokens) of the first example # was blanked out self.assertEqual(x_len[0], l_noised[0]) for i in range(l_noised[0]): if i < 2: self.assertEqual(x_noised[i][0], unk) else: self.assertEqual(x_noised[i][0], x[i][0]) def test_word_blank_with_eos(self): vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True) with data_utils.numpy_seed(1234): noising_gen = noising.WordDropout(vocab) x_noised, l_noised = noising_gen.noising(x, x_len, 0.2, vocab.unk()) self.assert_word_blanking_correct( x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised, unk=vocab.unk() ) self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos()) def generate_unchanged_shuffle_map(self, length): return {i: i for i in range(length)} def assert_word_shuffle_matches_expected( self, x, x_len, max_shuffle_distance: int, vocab: Dictionary, expected_shufle_maps: List[Dict[int, int]], expect_eos_at_end: bool, bpe_end_marker=None, ): """ This verifies that with a given x, x_len, max_shuffle_distance, and vocab, we get the expected shuffle result. Args: x: Tensor of shape (T x B) = (sequence_length, batch_size) x_len: Tensor of length B = batch_size max_shuffle_distance: arg to pass to noising expected_shuffle_maps: List[mapping] where mapping is a Dict[old_index, new_index], mapping x's elements from their old positions in x to their new positions in x. expect_eos_at_end: if True, check the output to make sure there is an EOS at the end. bpe_end_marker: str denoting the BPE end token. If this is not None, we set the BPE cont token to None in the noising classes. """ bpe_cont_marker = None if bpe_end_marker is None: bpe_cont_marker = "@@" with data_utils.numpy_seed(1234): word_shuffle = noising.WordShuffle( vocab, bpe_cont_marker=bpe_cont_marker, bpe_end_marker=bpe_end_marker ) x_noised, l_noised = word_shuffle.noising( x, x_len, max_shuffle_distance=max_shuffle_distance ) # For every example, we have a different expected shuffle map. We check # that each example is shuffled as expected according to each # corresponding shuffle map. for i in range(len(expected_shufle_maps)): shuffle_map = expected_shufle_maps[i] for k, v in shuffle_map.items(): self.assertEqual(x[k][i], x_noised[v][i]) # Shuffling should not affect the length of each example for pre_shuffle_length, post_shuffle_length in zip(x_len, l_noised): self.assertEqual(pre_shuffle_length, post_shuffle_length) if expect_eos_at_end: self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos()) def test_word_shuffle_with_eos(self): vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True) # Assert word shuffle with max shuffle distance 0 causes input to be # unchanged self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, max_shuffle_distance=0, vocab=vocab, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(example_len) for example_len in x_len ], expect_eos_at_end=True, ) # Assert word shuffle with max shuffle distance 3 matches our expected # shuffle order self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, vocab=vocab, max_shuffle_distance=3, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(x_len[0]), {0: 0, 1: 3, 2: 1, 3: 2}, ], expect_eos_at_end=True, ) def test_word_shuffle_with_eos_nonbpe(self): """The purpose of this is to test shuffling logic with word vocabs""" vocab, x, x_len = self._get_test_data_with_word_vocab(append_eos=True) # Assert word shuffle with max shuffle distance 0 causes input to be # unchanged self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, max_shuffle_distance=0, vocab=vocab, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(example_len) for example_len in x_len ], expect_eos_at_end=True, ) # Assert word shuffle with max shuffle distance 3 matches our expected # shuffle order self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, vocab=vocab, max_shuffle_distance=3, expected_shufle_maps=[ {0: 0, 1: 1, 2: 3, 3: 2}, {0: 0, 1: 2, 2: 1, 3: 3, 4: 4}, ], expect_eos_at_end=True, ) def test_word_shuffle_without_eos(self): """Same result as word shuffle with eos except no EOS at end""" vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False) # Assert word shuffle with max shuffle distance 0 causes input to be # unchanged self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, max_shuffle_distance=0, vocab=vocab, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(example_len) for example_len in x_len ], expect_eos_at_end=False, ) # Assert word shuffle with max shuffle distance 3 matches our expected # shuffle order self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, vocab=vocab, max_shuffle_distance=3, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(x_len[0]), {0: 0, 1: 3, 2: 1, 3: 2}, ], expect_eos_at_end=False, ) def test_word_shuffle_without_eos_with_bpe_end_marker(self): """Same result as word shuffle without eos except using BPE end token""" vocab, x, x_len = self._get_test_data_with_bpe_end_marker(append_eos=False) # Assert word shuffle with max shuffle distance 0 causes input to be # unchanged self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, max_shuffle_distance=0, vocab=vocab, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(example_len) for example_len in x_len ], expect_eos_at_end=False, bpe_end_marker="_EOW", ) # Assert word shuffle with max shuffle distance 3 matches our expected # shuffle order self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, vocab=vocab, max_shuffle_distance=3, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(x_len[0]), {0: 0, 1: 3, 2: 1, 3: 2}, ], expect_eos_at_end=False, bpe_end_marker="_EOW", ) def assert_no_eos_at_end(self, x, x_len, eos): """Asserts that the last token of each sentence in x is not EOS """ for i in range(len(x_len)): self.assertNotEqual( x[x_len[i] - 1][i], eos, "Expected no eos (token id {eos}) at the end of sentence {i}.".format( eos=eos, i=i ), ) def test_word_dropout_without_eos(self): """Same result as word dropout with eos except no EOS at end""" vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False) with data_utils.numpy_seed(1234): noising_gen = noising.WordDropout(vocab) x_noised, l_noised = noising_gen.noising(x, x_len, 0.2) self.assert_word_dropout_correct( x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised ) self.assert_no_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos()) def test_word_blank_without_eos(self): """Same result as word blank with eos except no EOS at end""" vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False) with data_utils.numpy_seed(1234): noising_gen = noising.WordDropout(vocab) x_noised, l_noised = noising_gen.noising(x, x_len, 0.2, vocab.unk()) self.assert_word_blanking_correct( x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised, unk=vocab.unk() ) self.assert_no_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos()) def _get_noising_dataset_batch( self, src_tokens_no_pad, src_dict, append_eos_to_tgt=False, ): """ Constructs a NoisingDataset and the corresponding ``LanguagePairDataset(NoisingDataset(src), src)``. If *append_eos_to_tgt* is True, wrap the source dataset in :class:`TransformEosDataset` to append EOS to the clean source when using it as the target. """ src_dataset = test_utils.TestDataset(data=src_tokens_no_pad) noising_dataset = noising.NoisingDataset( src_dataset=src_dataset, src_dict=src_dict, seed=1234, max_word_shuffle_distance=3, word_dropout_prob=0.2, word_blanking_prob=0.2, noising_class=noising.UnsupervisedMTNoising, ) tgt = src_dataset language_pair_dataset = LanguagePairDataset( src=noising_dataset, tgt=tgt, src_sizes=None, src_dict=src_dict ) language_pair_dataset = TransformEosDataset( language_pair_dataset, src_dict.eos(), append_eos_to_tgt=append_eos_to_tgt, ) dataloader = torch.utils.data.DataLoader( dataset=language_pair_dataset, batch_size=2, collate_fn=language_pair_dataset.collater, ) denoising_batch_result = next(iter(dataloader)) return denoising_batch_result def test_noising_dataset_with_eos(self): src_dict, src_tokens, _ = self._get_test_data_with_bpe_cont_marker( append_eos=True ) # Format data for src_dataset src_tokens = torch.t(src_tokens) src_tokens_no_pad = [] for src_sentence in src_tokens: src_tokens_no_pad.append( utils.strip_pad(tensor=src_sentence, pad=src_dict.pad()) ) denoising_batch_result = self._get_noising_dataset_batch( src_tokens_no_pad=src_tokens_no_pad, src_dict=src_dict ) eos, pad = src_dict.eos(), src_dict.pad() # Generated noisy source as source expected_src = torch.LongTensor( [[4, 5, 10, 11, 8, 12, 13, eos], [pad, pad, pad, 6, 8, 9, 7, eos]] ) # Original clean source as target (right-padded) expected_tgt = torch.LongTensor( [[4, 5, 10, 11, 8, 12, 13, eos], [6, 7, 8, 9, eos, pad, pad, pad]] ) generated_src = denoising_batch_result["net_input"]["src_tokens"] tgt_tokens = denoising_batch_result["target"] self.assertTensorEqual(expected_src, generated_src) self.assertTensorEqual(expected_tgt, tgt_tokens) def test_noising_dataset_without_eos(self): """ Similar to test noising dataset with eos except that we have to set *append_eos_to_tgt* to ``True``. """ src_dict, src_tokens, _ = self._get_test_data_with_bpe_cont_marker( append_eos=False ) # Format data for src_dataset src_tokens = torch.t(src_tokens) src_tokens_no_pad = [] for src_sentence in src_tokens: src_tokens_no_pad.append( utils.strip_pad(tensor=src_sentence, pad=src_dict.pad()) ) denoising_batch_result = self._get_noising_dataset_batch( src_tokens_no_pad=src_tokens_no_pad, src_dict=src_dict, append_eos_to_tgt=True, ) eos, pad = src_dict.eos(), src_dict.pad() # Generated noisy source as source expected_src = torch.LongTensor( [[4, 5, 10, 11, 8, 12, 13], [pad, pad, pad, 6, 8, 9, 7]] ) # Original clean source as target (right-padded) expected_tgt = torch.LongTensor( [[4, 5, 10, 11, 8, 12, 13, eos], [6, 7, 8, 9, eos, pad, pad, pad]] ) generated_src = denoising_batch_result["net_input"]["src_tokens"] tgt_tokens = denoising_batch_result["target"] self.assertTensorEqual(expected_src, generated_src) self.assertTensorEqual(expected_tgt, tgt_tokens) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) if __name__ == "__main__": unittest.main()
19,779
36.533207
87
py
RegularizedBN
RegularizedBN-main/tests/test_constraints.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import sys import torch import unittest from fairseq.token_generation_constraints import * def tensorize(constraints: List[List[int]]) -> torch.Tensor: return [torch.tensor(x) for x in constraints] class TestHelperRoutines(unittest.TestCase): def setUp(self): self.examples = [ ( [[]], torch.tensor([[0]]) ), ( [[], []], torch.tensor([[0], [0]]) ), ( [[torch.tensor([1, 2])], []], torch.tensor([[1, 1, 2, 0], [0, 0, 0, 0]]) ), ( [[torch.tensor([3, 1, 2]), torch.tensor([3]), torch.tensor([4, 5, 6, 7])], [], [ torch.tensor([1, 8, 9, 10, 1, 4, 11, 12]) ]], torch.tensor([[3, 3, 1, 2, 0, 3, 0, 4, 5, 6, 7, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 8, 9, 10, 1, 4, 11, 12, 0, 0, 0]]) ) ] def test_packing(self): """Ensures the list of lists of tensors gets packed correctly.""" for batch_constraints, expected_tensor in self.examples: packed = pack_constraints(batch_constraints) assert torch.equal(packed, expected_tensor) class TestUnorderedConstraintState(unittest.TestCase): def setUp(self): # Tuples of (contraint set, expected printed graph, token counts per node) self.examples = [ ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), "([None].False#6 ([1].True#4 ([2].False#1 [3].True#1) [3].True#1 [4].True#1) ([4].False#2 ([5].True#2 ([6].False#1 [7].True#1))))", { 1: 4, 2: 1, 3: 2, 4: 3, 5: 2, 6: 1, 7: 1 } ), ( [], "[None].False#0", {} ), ( tensorize([[0]]), "([None].False#1 [0].True#1)", { 0: 1 } ), ( tensorize([[100000, 1, 2, 3, 4, 5]]), "([None].False#1 ([100000].False#1 ([1].False#1 ([2].False#1 ([3].False#1 ([4].False#1 [5].True#1))))))", { 100000: 1, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1 } ), ( tensorize([[1, 2], [1, 2]]), "([None].False#2 ([1].False#2 [2].True#2))", { 1: 2, 2: 2 }, ), ( tensorize([[1, 2], [3, 4]]), "([None].False#2 ([1].False#1 [2].True#1) ([3].False#1 [4].True#1))", { 1: 1, 2: 1, 3: 1, 4: 1}, ), ] self.sequences = [ ( self.examples[0][0], [], { "bank": 0, "num_completed": 0, "finished": False, "is_root": True }, ), ( self.examples[0][0], [1, 2], { "bank": 2, "num_completed": 0, "finished": False, "is_root": False }, ), ( self.examples[0][0], [1, 2, 94], { "bank": 1, "num_completed": 1, "finished": False, "is_root": True }, ), ( self.examples[0][0], [1, 3, 999, 1, 4], { "bank": 4, "num_completed": 2, "finished": False, "is_root": False }, ), ( self.examples[0][0], [1, 3, 999, 1, 4, 999], { "bank": 4, "num_completed": 2, "finished": False, "is_root": True }, ), ( self.examples[0][0], [4, 5, 6, 8], { "bank": 2, "num_completed": 1, "finished": False, "is_root": True }, ), ( self.examples[0][0], # Tricky, because in last three, goes down [1->4] branch, could miss [1] and [4->5] # [[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]], [1, 2, 3, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5], { "bank": 14, "num_completed": 6, "finished": True, "is_root": False }, ), ( self.examples[0][0], [1, 2, 3, 999, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5, 117], { "bank": 14, "num_completed": 6, "finished": True, "is_root": True }, ), ( tensorize([[1], [2, 3]]), # Should not be able to get credit for entering 1 a second time [1, 1], { "bank": 1, "num_completed": 1, "finished": False, "is_root": True }, ), ( self.examples[4][0], [1, 2, 1, 2], { "bank": 4, "num_completed": 2, "finished": True, "is_root": False }, ), ( self.examples[4][0], [1, 2, 1, 2, 1], { "bank": 4, "num_completed": 2, "finished": True, "is_root": True }, ), ( self.examples[5][0], [1, 2, 3, 4, 5], { "bank": 4, "num_completed": 2, "finished": True, "is_root": True }, ), ] def test_graphs(self): """ Test whether unordered graph systems are created correctly. """ for example in self.examples: constraints, expected, gold_counts = example c = ConstraintNode.create(constraints) assert ConstraintNode.print_graph(c) == expected, f"got {ConstraintNode.print_graph(c)}, expected {expected}" assert c.token_counts() == gold_counts, f"{c} got {c.token_counts()} wanted {gold_counts}" def test_next_tokens(self): """ Tests that the set of next tokens is correct. """ for example in self.examples: constraints, expected, gold_counts = example root = ConstraintNode.create(constraints) root_tokens = set(root.children.keys()) for sequence in constraints: state = UnorderedConstraintState(root) for token in sequence: all_tokens = root_tokens.union(state.node.children.keys()) assert all_tokens == state.next_tokens(), f"ALL {all_tokens} NEXT {state.next_tokens()}" state = state.advance(token) def test_sequences(self): for constraints, tokens, expected in self.sequences: state = UnorderedConstraintState.create(pack_constraints([constraints])[0]) for token in tokens: state = state.advance(token) result = {} for attr in expected.keys(): result[attr] = getattr(state, attr) assert result == expected, f"TEST({tokens}) GOT: {result} WANTED: {expected}" class TestOrderedConstraintState(unittest.TestCase): def setUp(self): self.sequences = [ ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [], { "bank": 0, "num_completed": 0, "finished": False, "is_root": True }, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2], { "bank": 2, "num_completed": 0, "finished": False, "is_root": False }, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2, 94], { "bank": 0, "num_completed": 0, "finished": False, "is_root": True }, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 3, 999, 1, 4], { "bank": 0, "num_completed": 0, "finished": False, "is_root": True }, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2, 3, 999, 999], { "bank": 3, "num_completed": 1, "finished": False, "is_root": False }, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2, 3, 77, 1, 3, 1], { "bank": 6, "num_completed": 2, "finished": False, "is_root": False }, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2, 3, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5], { "bank": 14, "num_completed": 6, "finished": True, "is_root": False }, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2, 999, 1, 2, 3, 999, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5, 117], { "bank": 14, "num_completed": 6, "finished": True, "is_root": False }, ), ( tensorize([[1], [2, 3]]), [1, 1], { "bank": 1, "num_completed": 1, "finished": False, "is_root": False }, ), ( tensorize([[1, 2], [1, 2]]), [1, 2, 1, 2], { "bank": 4, "num_completed": 2, "finished": True, "is_root": False }, ), ( tensorize([[1, 2], [1, 2]]), [1, 2, 1, 2, 1], { "bank": 4, "num_completed": 2, "finished": True, "is_root": False }, ), ( tensorize([[1, 2], [3, 4]]), [1, 2, 3, 4, 5], { "bank": 4, "num_completed": 2, "finished": True, "is_root": False }, ), ] def test_sequences(self): for i, (constraints, tokens, expected) in enumerate(self.sequences): state = OrderedConstraintState.create(pack_constraints([constraints])[0]) for token in tokens: state = state.advance(token) result = {} for attr in expected.keys(): result[attr] = getattr(state, attr) assert result == expected, f"TEST({tokens}) GOT: {result} WANTED: {expected}" if __name__ == "__main__": unittest.main()
10,242
39.168627
204
py
RegularizedBN
RegularizedBN-main/tests/test_sparse_multihead_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import unittest from fairseq.modules.sparse_multihead_attention import SparseMultiheadAttention class TestSparseMultiheadAttention(unittest.TestCase): def test_sparse_multihead_attention(self): attn_weights = torch.randn(1, 8, 8) bidirectional_sparse_mask = torch.tensor([ [0, 0, 0, 0, 0, float('-inf'), float('-inf'), 0], [0, 0, 0, 0, 0, float('-inf'), float('-inf'), 0], [0, 0, 0, 0, 0, float('-inf'), float('-inf'), 0], [0, 0, 0, 0, 0, float('-inf'), float('-inf'), 0], [float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, 0], [float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, 0], [float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, 0], [float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, 0] ]) bidirectional_attention = SparseMultiheadAttention(16, 1, stride=4, expressivity=1, is_bidirectional=True) bidirectional_attention_sparse_mask = bidirectional_attention.buffered_sparse_mask(attn_weights, 8, 8) torch.all(torch.eq(bidirectional_attention_sparse_mask, bidirectional_sparse_mask)) sparse_mask = torch.tensor([ [0, float('-inf'), float('-inf'), float('-inf'), float('-inf'), float('-inf'), float('-inf'), float('-inf')], [0, 0, float('-inf'), float('-inf'), float('-inf'), float('-inf'), float('-inf'), float('-inf')], [0, 0, 0, float('-inf'), float('-inf'), float('-inf'), float('-inf'), float('-inf')], [0, 0, 0, 0, float('-inf'), float('-inf'), float('-inf'), float('-inf')], [0, 0, 0, 0, 0, float('-inf'), float('-inf'), float('-inf')], [float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, float('-inf'), float('-inf')], [float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, float('-inf')], [float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, 0], ]) attention = SparseMultiheadAttention(16, 1, stride=4, expressivity=1, is_bidirectional=False) attention_sparse_mask = attention.buffered_sparse_mask(attn_weights, 8, 8) torch.all(torch.eq(attention_sparse_mask, sparse_mask)) if __name__ == '__main__': unittest.main()
2,545
50.959184
114
py
RegularizedBN
RegularizedBN-main/tests/test_export.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import tempfile import unittest import torch from fairseq.data.dictionary import Dictionary from fairseq.models.transformer import TransformerModel from fairseq.modules import multihead_attention, sinusoidal_positional_embedding from fairseq.tasks.fairseq_task import FairseqTask DEFAULT_TEST_VOCAB_SIZE = 100 class DummyTask(FairseqTask): def __init__(self, args): super().__init__(args) self.dictionary = get_dummy_dictionary() if getattr(self.args, "ctc", False): self.dictionary.add_symbol("<ctc_blank>") self.src_dict = self.dictionary self.tgt_dict = self.dictionary @property def source_dictionary(self): return self.src_dict @property def target_dictionary(self): return self.dictionary def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE): dummy_dict = Dictionary() # add dummy symbol to satisfy vocab size for id, _ in enumerate(range(vocab_size)): dummy_dict.add_symbol("{}".format(id), 1000) return dummy_dict def get_dummy_task_and_parser(): """ Return a dummy task and argument parser, which can be used to create a model/criterion. """ parser = argparse.ArgumentParser( description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS ) DummyTask.add_args(parser) args = parser.parse_args([]) task = DummyTask.setup_task(args) return task, parser def _test_save_and_load(scripted_module): with tempfile.NamedTemporaryFile() as f: scripted_module.save(f.name) torch.jit.load(f.name) class TestExportModels(unittest.TestCase): def test_export_multihead_attention(self): module = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2) scripted = torch.jit.script(module) _test_save_and_load(scripted) def test_incremental_state_multihead_attention(self): module1 = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2) module1 = torch.jit.script(module1) module2 = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2) module2 = torch.jit.script(module2) state = {} state = module1.set_incremental_state(state, "key", {"a": torch.tensor([1])}) state = module2.set_incremental_state(state, "key", {"a": torch.tensor([2])}) v1 = module1.get_incremental_state(state, "key")["a"] v2 = module2.get_incremental_state(state, "key")["a"] self.assertEqual(v1, 1) self.assertEqual(v2, 2) def test_positional_embedding(self): module = sinusoidal_positional_embedding.SinusoidalPositionalEmbedding( embedding_dim=8, padding_idx=1 ) scripted = torch.jit.script(module) _test_save_and_load(scripted) @unittest.skipIf( torch.__version__ < "1.6.0", "Targeting OSS scriptability for the 1.6 release" ) def test_export_transformer(self): task, parser = get_dummy_task_and_parser() TransformerModel.add_args(parser) args = parser.parse_args([]) model = TransformerModel.build_model(args, task) scripted = torch.jit.script(model) _test_save_and_load(scripted) if __name__ == "__main__": unittest.main()
3,499
31.110092
86
py
RegularizedBN
RegularizedBN-main/tests/test_backtranslation_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from fairseq.data import ( BacktranslationDataset, LanguagePairDataset, TransformEosDataset, ) from fairseq.sequence_generator import SequenceGenerator import tests.utils as test_utils class TestBacktranslationDataset(unittest.TestCase): def setUp(self): self.tgt_dict, self.w1, self.w2, self.src_tokens, self.src_lengths, self.model = ( test_utils.sequence_generator_setup() ) dummy_src_samples = self.src_tokens self.tgt_dataset = test_utils.TestDataset(data=dummy_src_samples) self.cuda = torch.cuda.is_available() def _backtranslation_dataset_helper( self, remove_eos_from_input_src, remove_eos_from_output_src, ): tgt_dataset = LanguagePairDataset( src=self.tgt_dataset, src_sizes=self.tgt_dataset.sizes, src_dict=self.tgt_dict, tgt=None, tgt_sizes=None, tgt_dict=None, ) generator = SequenceGenerator( [self.model], tgt_dict=self.tgt_dict, max_len_a=0, max_len_b=200, beam_size=2, unk_penalty=0, ) backtranslation_dataset = BacktranslationDataset( tgt_dataset=TransformEosDataset( dataset=tgt_dataset, eos=self.tgt_dict.eos(), # remove eos from the input src remove_eos_from_src=remove_eos_from_input_src, ), src_dict=self.tgt_dict, backtranslation_fn=( lambda sample: generator.generate([self.model], sample) ), output_collater=TransformEosDataset( dataset=tgt_dataset, eos=self.tgt_dict.eos(), # if we remove eos from the input src, then we need to add it # back to the output tgt append_eos_to_tgt=remove_eos_from_input_src, remove_eos_from_src=remove_eos_from_output_src, ).collater, cuda=self.cuda, ) dataloader = torch.utils.data.DataLoader( backtranslation_dataset, batch_size=2, collate_fn=backtranslation_dataset.collater, ) backtranslation_batch_result = next(iter(dataloader)) eos, pad, w1, w2 = self.tgt_dict.eos(), self.tgt_dict.pad(), self.w1, self.w2 # Note that we sort by src_lengths and add left padding, so actually # ids will look like: [1, 0] expected_src = torch.LongTensor([[w1, w2, w1, eos], [pad, pad, w1, eos]]) if remove_eos_from_output_src: expected_src = expected_src[:, :-1] expected_tgt = torch.LongTensor([[w1, w2, eos], [w1, w2, eos]]) generated_src = backtranslation_batch_result["net_input"]["src_tokens"] tgt_tokens = backtranslation_batch_result["target"] self.assertTensorEqual(expected_src, generated_src) self.assertTensorEqual(expected_tgt, tgt_tokens) def test_backtranslation_dataset_no_eos_in_output_src(self): self._backtranslation_dataset_helper( remove_eos_from_input_src=False, remove_eos_from_output_src=True, ) def test_backtranslation_dataset_with_eos_in_output_src(self): self._backtranslation_dataset_helper( remove_eos_from_input_src=False, remove_eos_from_output_src=False, ) def test_backtranslation_dataset_no_eos_in_input_src(self): self._backtranslation_dataset_helper( remove_eos_from_input_src=True, remove_eos_from_output_src=False, ) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) if __name__ == "__main__": unittest.main()
4,030
33.452991
90
py
RegularizedBN
RegularizedBN-main/tests/test_fp16_optimizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import copy import unittest import torch from fairseq.optim.fp16_optimizer import FP16Optimizer, MemoryEfficientFP16Optimizer @unittest.skipIf(not torch.cuda.is_available(), 'test requires a GPU') class TestGradientScaling(unittest.TestCase): def setUp(self): self.x = torch.tensor([2.0]).cuda().half() weight = 3.0 bias = 5.0 self.error = 1.0 self.target = torch.tensor([self.x * weight + bias + self.error]).cuda().half() self.loss_fn = torch.nn.L1Loss() self.model = torch.nn.Linear(1, 1) self.model.weight.data = torch.tensor([[weight]]) self.model.bias.data = torch.tensor([bias]) self.model.cuda().half() self.params = list(self.model.parameters()) self.namespace_dls = argparse.Namespace( optimizer='adam', lr=[0.1], adam_betas='(0.9, 0.999)', adam_eps=1e-8, weight_decay=0.0, fp16_init_scale=1, fp16_scale_window=1, fp16_scale_tolerance=1, threshold_loss_scale=1, min_loss_scale=1e-4 ) def run_iter(self, model, params, optimizer): optimizer.zero_grad() y = model(self.x) loss = self.loss_fn(y, self.target) optimizer.backward(loss) self.assertEqual(loss, torch.tensor(1., device='cuda:0', dtype=torch.float16)) grad_norm = optimizer.clip_grad_norm(0) self.assertAlmostEqual(grad_norm.item(), 2.2361, 4) optimizer.step() self.assertEqual(model.weight, torch.tensor([[3.0996]], device='cuda:0', dtype=torch.float16, requires_grad=True)) self.assertEqual(model.bias, torch.tensor([5.1016], device='cuda:0', dtype=torch.float16, requires_grad=True)) self.assertEqual(optimizer.scaler.loss_scale, 2.) def test_mixed_precision(self): model = copy.deepcopy(self.model) params = list(model.parameters()) optimizer = FP16Optimizer.build_optimizer(self.namespace_dls, params) self.run_iter(model, params, optimizer) self.assertTrue(torch.all(optimizer.fp32_params.eq(torch.tensor([3.1000, 5.1000], device='cuda:0', requires_grad=True)))) def test_memory_efficient(self): model = copy.deepcopy(self.model) params = list(model.parameters()) optimizer = MemoryEfficientFP16Optimizer.build_optimizer(self.namespace_dls, params) self.run_iter(model, params, optimizer) if __name__ == '__main__': unittest.main()
2,720
33.884615
129
py
RegularizedBN
RegularizedBN-main/tests/test_sequence_generator.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import tempfile import unittest import tests.utils as test_utils import torch from fairseq import search from fairseq.data.dictionary import Dictionary from fairseq.models.transformer import TransformerModel from fairseq.sequence_generator import SequenceGenerator, EnsembleModel from fairseq.tasks.fairseq_task import FairseqTask DEFAULT_TEST_VOCAB_SIZE = 100 class DummyTask(FairseqTask): def __init__(self, args): super().__init__(args) self.dictionary = get_dummy_dictionary() if getattr(self.args, "ctc", False): self.dictionary.add_symbol("<ctc_blank>") self.src_dict = self.dictionary self.tgt_dict = self.dictionary @property def source_dictionary(self): return self.src_dict @property def target_dictionary(self): return self.dictionary def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE): dummy_dict = Dictionary() # add dummy symbol to satisfy vocab size for id, _ in enumerate(range(vocab_size)): dummy_dict.add_symbol("{}".format(id), 1000) return dummy_dict def get_dummy_task_and_parser(): """ to build a fariseq model, we need some dummy parse and task. This function is used to create dummy task and parser to faciliate model/criterion test Note: we use FbSpeechRecognitionTask as the dummy task. You may want to use other task by providing another function """ parser = argparse.ArgumentParser( description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS ) DummyTask.add_args(parser) args = parser.parse_args([]) task = DummyTask.setup_task(args) return task, parser class TestJitSequenceGeneratorBase(unittest.TestCase): def setUp(self): self.task, self.parser = get_dummy_task_and_parser() eos = self.task.tgt_dict.eos() src_tokens = torch.randint(3, 50, (2, 10)).long() src_tokens = torch.cat((src_tokens, torch.LongTensor([[eos], [eos]])), -1) src_lengths = torch.LongTensor([2, 10]) self.sample = { "net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths} } TransformerModel.add_args(self.parser) args = self.parser.parse_args([]) args.encoder_layers = 2 args.decoder_layers = 1 self.transformer_model = TransformerModel.build_model(args, self.task) def assertOutputEqual(self, hypo, pos_probs): pos_scores = torch.FloatTensor(pos_probs).log() self.assertTensorSizeEqual(hypo["positional_scores"], pos_scores) self.assertTensorSizeEqual(pos_scores.numel(), hypo["tokens"].numel()) def assertTensorSizeEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) def assertHypoEqual(self, h1, h2): "Check two hypos are equal" self.assertTensorEqual(h1["tokens"], h2["tokens"]) self.assertAlmostEqual(h1["positional_scores"], h2["positional_scores"]) self.assertLess(abs(h1["score"] - h2["score"]), 1e-6) self.assertAlmostEqual(h1["attention"], h2["attention"]) def _test_save_and_load(self, scripted_module): with tempfile.NamedTemporaryFile() as f: scripted_module.save(f.name) torch.jit.load(f.name) class TestJitSequeneceGenerator(TestJitSequenceGeneratorBase): @unittest.skipIf( torch.__version__ < "1.6.0", "Targeting OSS scriptability for the 1.6 release" ) def test_export_transformer(self): model = self.transformer_model torch.jit.script(model) @unittest.skipIf( torch.__version__ < "1.6.0", "Targeting OSS scriptability for the 1.6 release" ) def test_ensemble_sequence_generator(self): model = self.transformer_model generator = SequenceGenerator( [model], self.task.tgt_dict, beam_size=2, no_repeat_ngram_size=2 ) scripted_model = torch.jit.script(generator) self._test_save_and_load(scripted_model) class TestJitEnsemble(TestJitSequenceGeneratorBase): @unittest.skipIf( torch.__version__ < "1.6.0", "Targeting OSS scriptability for the 1.6 release" ) def test_export_ensemble_model(self): model = self.transformer_model ensemble_models = EnsembleModel([model]) torch.jit.script(ensemble_models) class TestExportSearch(unittest.TestCase): def setUp(self): task, _ = get_dummy_task_and_parser() self.tgt_dict = task.tgt_dict self.min_top1_prob = 0.4 def test_export_diverse_bs(self): search_strategy = search.DiverseBeamSearch( self.tgt_dict, num_groups=2, diversity_strength=0.0 ) torch.jit.script(search_strategy) def test_export_sampling(self): low_sampling_topp = self.min_top1_prob / 2.0 search_strategy = search.Sampling( self.tgt_dict, sampling_topp=low_sampling_topp ) torch.jit.script(search_strategy) def test_export_diverse_siblings_search(self): search_strategy = search.DiverseSiblingsSearch( self.tgt_dict, diversity_rate=0.5 ) torch.jit.script(search_strategy) class TestSequenceGeneratorBase(unittest.TestCase): def assertHypoTokens(self, hypo, tokens): self.assertTensorEqual(hypo["tokens"], torch.LongTensor(tokens)) def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0): pos_scores = torch.FloatTensor(pos_probs).log() self.assertAlmostEqual(hypo["positional_scores"], pos_scores) self.assertEqual(pos_scores.numel(), hypo["tokens"].numel()) score = pos_scores.sum() if normalized: score /= pos_scores.numel() ** lenpen self.assertLess(abs(score - hypo["score"]), 1e-6) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) class TestSequeneceGenerator(TestSequenceGeneratorBase): def setUp(self): self.tgt_dict, self.w1, self.w2, src_tokens, src_lengths, self.model = ( test_utils.sequence_generator_setup() ) self.sample = { "net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths} } def test_with_normalization(self): generator = SequenceGenerator([self.model], self.tgt_dict, beam_size=2) hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0]) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0]) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0]) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6]) def test_without_normalization(self): # Sentence 1: unchanged from the normalized case # Sentence 2: beams swap order generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, normalize_scores=False ) hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0], normalized=False) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], normalized=False) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], normalized=False) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], normalized=False) def test_with_lenpen_favoring_short_hypos(self): lenpen = 0.6 generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, len_penalty=lenpen ) hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0], lenpen=lenpen) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], lenpen=lenpen) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen) def test_with_lenpen_favoring_long_hypos(self): lenpen = 5.0 generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, len_penalty=lenpen ) hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][0], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w1, eos]) self.assertHypoScore(hypos[0][1], [0.9, 1.0], lenpen=lenpen) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6], lenpen=lenpen) def test_maxlen(self): generator = SequenceGenerator([self.model], self.tgt_dict, beam_size=2, max_len_b=2) hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0]) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w2, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.1, 0.6]) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6]) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w2, w2, eos]) self.assertHypoScore(hypos[1][1], [0.3, 0.9, 0.01]) def test_encoder_with_different_output_len(self): args = self.model.encoder.args task = test_utils.TestTranslationTask.setup_task(args, self.tgt_dict, self.tgt_dict) reshaping_model = test_utils.TestReshapingModel.build_model(args, task) generator = SequenceGenerator([reshaping_model], self.tgt_dict, beam_size=2, max_len_b=2) hypos = generator.forward(self.sample) for sent in [0, 1]: for beam in [0, 1]: assert hypos[sent][beam]['attention'] is not None def test_generation_with_additional_input(self): args = self.model.encoder.args task = test_utils.TestTranslationTask.setup_task(args, self.tgt_dict, self.tgt_dict) add_input_model = test_utils.TestAdditionalInputModel.build_model(args, task) generator = SequenceGenerator([add_input_model], self.tgt_dict, beam_size=2) sample = self.sample.copy() sample['net_input']['fancy_other_input'] = sample['net_input']['src_tokens'] hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0]) class TestDiverseBeamSearch(TestSequenceGeneratorBase): def setUp(self): # construct dummy dictionary d = test_utils.dummy_dictionary(vocab_size=2) self.assertEqual(d.pad(), 1) self.assertEqual(d.eos(), 2) self.assertEqual(d.unk(), 3) self.eos = d.eos() self.w1 = 4 self.w2 = 5 # construct source data self.src_tokens = torch.LongTensor([ [self.w1, self.w2, self.eos], [self.w1, self.w2, self.eos], ]) self.src_lengths = torch.LongTensor([2, 2]) args = argparse.Namespace() unk = 0. args.beam_probs = [ # step 0: torch.FloatTensor([ # eos w1 w2 # sentence 1: [0.0, unk, 0.9, 0.1], # beam 1 [0.0, unk, 0.9, 0.1], # beam 2 # sentence 2: [0.0, unk, 0.7, 0.3], [0.0, unk, 0.7, 0.3], ]), # step 1: torch.FloatTensor([ # eos w1 w2 # sentence 1: [0.0, unk, 0.6, 0.4], [0.0, unk, 0.6, 0.4], # sentence 2: [0.25, unk, 0.35, 0.4], [0.25, unk, 0.35, 0.4], ]), # step 2: torch.FloatTensor([ # eos w1 w2 # sentence 1: [1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0], # sentence 2: [0.9, unk, 0.1, 0.0], [0.9, unk, 0.1, 0.0], ]), ] task = test_utils.TestTranslationTask.setup_task(args, d, d) self.model = task.build_model(args) self.tgt_dict = task.target_dictionary def test_diverse_beam_search(self): search_strategy = search.DiverseBeamSearch(self.tgt_dict, num_groups=2, diversity_strength=0.) generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy, ) sample = {'net_input': {'src_tokens': self.src_tokens, 'src_lengths': self.src_lengths}} hypos = generator.forward(sample) eos, w1, w2 = self.eos, self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 0.6, 1.0]) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w1, w1, eos]) self.assertHypoScore(hypos[0][1], [0.9, 0.6, 1.0]) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.9]) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.9]) class TestDiverseSiblingsSearch(TestDiverseBeamSearch): def assertHypoScore( self, hypo, pos_probs, sibling_rank, diversity_rate, normalized=True, lenpen=1.0 ): pos_scores = torch.FloatTensor(pos_probs).log() pos_scores.sub_(torch.Tensor(sibling_rank) * diversity_rate) self.assertAlmostEqual(hypo["positional_scores"], pos_scores) self.assertEqual(pos_scores.numel(), hypo["tokens"].numel()) score = pos_scores.sum() if normalized: score /= pos_scores.numel() ** lenpen self.assertLess(abs(score - hypo["score"]), 1e-6) def test_diverse_beam_search(self): search_strategy = search.DiverseSiblingsSearch( self.tgt_dict, diversity_rate=0.5 ) generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy ) sample = { "net_input": { "src_tokens": self.src_tokens, "src_lengths": self.src_lengths, } } hypos = generator.forward(sample) eos, w1, w2 = self.eos, self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 0.6, 1.0], [0, 1, 1], 0.5) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.9, 0.4, 1.0], [0, 2, 1], 0.5) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.9], [0, 1, 1], 0.5) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w1, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.35, 0.9], [0, 2, 1], 0.5) class TestTopPSamplingSearch(TestSequenceGeneratorBase): def setUp(self): # construct dummy dictionary d = test_utils.dummy_dictionary(vocab_size=2) self.assertEqual(d.pad(), 1) self.assertEqual(d.eos(), 2) self.assertEqual(d.unk(), 3) self.eos = d.eos() self.w1 = 4 self.w2 = 5 # construct source data self.src_tokens = torch.LongTensor([ [self.w1, self.w2, self.eos], [self.w1, self.w2, self.eos], ]) self.src_lengths = torch.LongTensor([2, 2]) args = argparse.Namespace() unk = 0. # The minimal probability of top 2 tokens. self.min_top2_prob = 0.75 # The minimal probability of the top 1 token. self.min_top1_prob = 0.4 w1_prob = self.min_top1_prob w2_prob = self.min_top2_prob - self.min_top1_prob eos_prob = 1 - self.min_top2_prob args.beam_probs = [ # step 0: torch.FloatTensor([ # eos w1 w2 [0.0, unk, 1.0, 0.0], [0.0, unk, 1.0, 0.0], [0.0, unk, 1.0, 0.0], [0.0, unk, 1.0, 0.0], ]), # step 1: torch.FloatTensor([ # eos w1 w2 [eos_prob, unk, w1_prob, w2_prob], [eos_prob, unk, w1_prob, w2_prob], [eos_prob, unk, w1_prob, w2_prob], [eos_prob, unk, w1_prob, w2_prob], ]), # step 2: torch.FloatTensor([ # eos w1 w2 [1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0], ]), ] task = test_utils.TestTranslationTask.setup_task(args, d, d) self.model = task.build_model(args) self.tgt_dict = task.target_dictionary def test_topp_sampling_search_low_prob(self): # Given a prob low enough to top-P sampling, we expect only the top # 1 token to be sampled, which always results in the same output. low_sampling_topp = self.min_top1_prob/2.0 search_strategy = search.Sampling(self.tgt_dict, sampling_topp=low_sampling_topp) generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy) sample = { 'net_input': { 'src_tokens': self.src_tokens, 'src_lengths': self.src_lengths } } hypos = generator.forward(sample) eos, w1 = self.eos, self.w1 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, w1, eos]) self.assertHypoScore(hypos[0][0], [1.0, 0.4, 1.0]) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w1, w1, eos]) self.assertHypoScore(hypos[0][1], [1.0, 0.4, 1.0]) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w1, eos]) self.assertHypoScore(hypos[1][0], [1.0, 0.4, 1.0]) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w1, eos]) self.assertHypoScore(hypos[1][1], [1.0, 0.4, 1.0]) def test_topp_sampling_search_high_prob(self): # Given a prob high enough to top-P sampling, any of the top 2 # tokens could be sampled. This can cause different outputs. high_sampling_topp = (self.min_top1_prob+self.min_top2_prob)/2.0 search_strategy = search.Sampling(self.tgt_dict, sampling_topp=high_sampling_topp) generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy) sample = { 'net_input': { 'src_tokens': self.src_tokens, 'src_lengths': self.src_lengths } } hypos = generator.forward(sample) eos, w1, w2 = self.eos, self.w1, self.w2 # sentence 1, beam 1 self.assertTrue(self.hypoTokens(hypos[0][0], [w1, w1, eos]) or self.hypoTokens(hypos[0][0], [w1, w2, eos])) self.assertTrue(self.hypoScore(hypos[0][0], [1.0, 0.4, 1.0]) or self.hypoScore(hypos[0][0], [1.0, 0.35, 1.0])) # sentence 1, beam 2 self.assertTrue(self.hypoTokens(hypos[0][1], [w1, w1, eos]) or self.hypoTokens(hypos[0][1], [w1, w2, eos])) self.assertTrue(self.hypoScore(hypos[0][1], [1.0, 0.4, 1.0]) or self.hypoScore(hypos[0][1], [1.0, 0.35, 1.0])) # sentence 2, beam 1 self.assertTrue(self.hypoTokens(hypos[1][0], [w1, w1, eos]) or self.hypoTokens(hypos[1][0], [w1, w2, eos])) self.assertTrue(self.hypoScore(hypos[1][0], [1.0, 0.4, 1.0]) or self.hypoScore(hypos[1][0], [1.0, 0.35, 1.0])) # sentence 2, beam 2 self.assertTrue(self.hypoTokens(hypos[1][1], [w1, w1, eos]) or self.hypoTokens(hypos[1][1], [w1, w2, eos])) self.assertTrue(self.hypoScore(hypos[1][1], [1.0, 0.4, 1.0]) or self.hypoScore(hypos[1][1], [1.0, 0.35, 1.0])) def hypoTokens(self, hypo, tokens): return self.tensorEqual(hypo['tokens'], torch.LongTensor(tokens)) def hypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.): pos_scores = torch.FloatTensor(pos_probs).log() if not self.almostEqual(hypo['positional_scores'], pos_scores): return False if pos_scores.numel() != hypo['tokens'].numel(): return False score = pos_scores.sum() if normalized: score /= pos_scores.numel() ** lenpen return abs(score - hypo['score']) < 1e-6 def almostEqual(self, t1, t2): return t1.size() == t2.size() and (t1 - t2).abs().max() < 1e-4 def tensorEqual(self, t1, t2): return t1.size() == t2.size() and t1.ne(t2).long().sum() == 0 if __name__ == "__main__": unittest.main()
23,549
38.780405
102
py
RegularizedBN
RegularizedBN-main/tests/test_label_smoothing.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import copy import unittest import torch from fairseq.criterions.cross_entropy import CrossEntropyCriterion from fairseq.criterions.label_smoothed_cross_entropy import LabelSmoothedCrossEntropyCriterion import tests.utils as test_utils class TestLabelSmoothing(unittest.TestCase): def setUp(self): # build dictionary self.d = test_utils.dummy_dictionary(3) vocab = len(self.d) self.assertEqual(vocab, 4 + 3) # 4 special + 3 tokens self.assertEqual(self.d.pad(), 1) self.assertEqual(self.d.eos(), 2) self.assertEqual(self.d.unk(), 3) pad, eos, unk, w1, w2, w3 = 1, 2, 3, 4, 5, 6 # noqa: F841 # build dataset self.data = [ # the first batch item has padding {'source': torch.LongTensor([w1, eos]), 'target': torch.LongTensor([w1, eos])}, {'source': torch.LongTensor([w1, eos]), 'target': torch.LongTensor([w1, w1, eos])}, ] self.sample = next(test_utils.dummy_dataloader(self.data)) # build model self.args = argparse.Namespace() self.args.sentence_avg = False self.args.probs = torch.FloatTensor([ # pad eos unk w1 w2 w3 [0.05, 0.05, 0.1, 0.05, 0.3, 0.4, 0.05], [0.05, 0.10, 0.2, 0.05, 0.2, 0.3, 0.10], [0.05, 0.15, 0.3, 0.05, 0.1, 0.2, 0.15], ]).unsqueeze(0).expand(2, 3, 7) # add batch dimension self.task = test_utils.TestTranslationTask.setup_task(self.args, self.d, self.d) self.model = self.task.build_model(self.args) def test_nll_loss(self): self.args.label_smoothing = 0.1 nll_crit = CrossEntropyCriterion.build_criterion(self.args, self.task) smooth_crit = LabelSmoothedCrossEntropyCriterion.build_criterion(self.args, self.task) nll_loss, nll_sample_size, nll_logging_output = nll_crit(self.model, self.sample) smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit(self.model, self.sample) self.assertLess(abs(nll_loss - nll_logging_output['loss']), 1e-6) self.assertLess(abs(nll_loss - smooth_logging_output['nll_loss']), 1e-6) def test_padding(self): self.args.label_smoothing = 0.1 crit = LabelSmoothedCrossEntropyCriterion.build_criterion(self.args, self.task) loss, _, logging_output = crit(self.model, self.sample) def get_one_no_padding(idx): # create a new sample with just a single batch item so that there's # no padding sample1 = next(test_utils.dummy_dataloader([self.data[idx]])) args1 = copy.copy(self.args) args1.probs = args1.probs[idx, :, :].unsqueeze(0) model1 = self.task.build_model(args1) loss1, _, _ = crit(model1, sample1) return loss1 loss1 = get_one_no_padding(0) loss2 = get_one_no_padding(1) self.assertAlmostEqual(loss, loss1 + loss2) def test_reduction(self): self.args.label_smoothing = 0.1 crit = LabelSmoothedCrossEntropyCriterion.build_criterion(self.args, self.task) loss, _, logging_output = crit(self.model, self.sample, reduce=True) unreduced_loss, _, _ = crit(self.model, self.sample, reduce=False) self.assertAlmostEqual(loss, unreduced_loss.sum()) def test_zero_eps(self): self.args.label_smoothing = 0.0 nll_crit = CrossEntropyCriterion.build_criterion(self.args, self.task) smooth_crit = LabelSmoothedCrossEntropyCriterion.build_criterion(self.args, self.task) nll_loss, nll_sample_size, nll_logging_output = nll_crit(self.model, self.sample) smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit(self.model, self.sample) self.assertAlmostEqual(nll_loss, smooth_loss) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-6) if __name__ == '__main__': unittest.main()
4,235
41.36
101
py
RegularizedBN
RegularizedBN-main/tests/test_convtbc.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import unittest from fairseq.modules import ConvTBC import torch.nn as nn class TestConvTBC(unittest.TestCase): def test_convtbc(self): # ksz, in_channels, out_channels conv_tbc = ConvTBC(4, 5, kernel_size=3, padding=1) # out_channels, in_channels, ksz conv1d = nn.Conv1d(4, 5, kernel_size=3, padding=1) conv_tbc.weight.data.copy_(conv1d.weight.data.transpose(0, 2)) conv_tbc.bias.data.copy_(conv1d.bias.data) input_tbc = torch.randn(7, 2, 4, requires_grad=True) input1d = input_tbc.data.transpose(0, 1).transpose(1, 2) input1d.requires_grad = True output_tbc = conv_tbc(input_tbc) output1d = conv1d(input1d) self.assertAlmostEqual(output_tbc.data.transpose(0, 1).transpose(1, 2), output1d.data) grad_tbc = torch.randn(output_tbc.size()) grad1d = grad_tbc.transpose(0, 1).transpose(1, 2).contiguous() output_tbc.backward(grad_tbc) output1d.backward(grad1d) self.assertAlmostEqual(conv_tbc.weight.grad.data.transpose(0, 2), conv1d.weight.grad.data) self.assertAlmostEqual(conv_tbc.bias.grad.data, conv1d.bias.grad.data) self.assertAlmostEqual(input_tbc.grad.data.transpose(0, 1).transpose(1, 2), input1d.grad.data) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) if __name__ == '__main__': unittest.main()
1,679
33.285714
102
py
RegularizedBN
RegularizedBN-main/tests/test_token_block_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from fairseq.data import TokenBlockDataset import tests.utils as test_utils class TestTokenBlockDataset(unittest.TestCase): def _build_dataset(self, data, **kwargs): sizes = [len(x) for x in data] underlying_ds = test_utils.TestDataset(data) return TokenBlockDataset(underlying_ds, sizes, **kwargs) def test_eos_break_mode(self): data = [ torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), torch.tensor([1], dtype=torch.long), torch.tensor([8, 7, 6, 1], dtype=torch.long), ] ds = self._build_dataset(data, block_size=None, pad=0, eos=1, break_mode='eos') self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1]) self.assertEqual(ds[1].tolist(), [1]) self.assertEqual(ds[2].tolist(), [8, 7, 6, 1]) data = [ torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), torch.tensor([8, 7, 6, 1], dtype=torch.long), torch.tensor([1], dtype=torch.long), ] ds = self._build_dataset(data, block_size=None, pad=0, eos=1, break_mode='eos') self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1]) self.assertEqual(ds[1].tolist(), [8, 7, 6, 1]) self.assertEqual(ds[2].tolist(), [1]) def test_block_break_mode(self): data = [ torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), torch.tensor([8, 7, 6, 1], dtype=torch.long), torch.tensor([9, 1], dtype=torch.long), ] ds = self._build_dataset(data, block_size=3, pad=0, eos=1, break_mode='none') self.assertEqual(ds[0].tolist(), [5, 4, 3]) self.assertEqual(ds[1].tolist(), [2, 1, 8]) self.assertEqual(ds[2].tolist(), [7, 6, 1]) self.assertEqual(ds[3].tolist(), [9, 1]) def test_complete_break_mode(self): data = [ torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), torch.tensor([8, 7, 6, 1], dtype=torch.long), torch.tensor([9, 1], dtype=torch.long), ] ds = self._build_dataset(data, block_size=6, pad=0, eos=1, break_mode='complete') self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1]) self.assertEqual(ds[1].tolist(), [8, 7, 6, 1, 9, 1]) data = [ torch.tensor([4, 3, 2, 1], dtype=torch.long), torch.tensor([5, 1], dtype=torch.long), torch.tensor([1], dtype=torch.long), torch.tensor([6, 1], dtype=torch.long), ] ds = self._build_dataset(data, block_size=3, pad=0, eos=1, break_mode='complete') self.assertEqual(ds[0].tolist(), [4, 3, 2, 1]) self.assertEqual(ds[1].tolist(), [5, 1, 1]) self.assertEqual(ds[2].tolist(), [6, 1]) if __name__ == "__main__": unittest.main()
2,970
36.607595
89
py
RegularizedBN
RegularizedBN-main/tests/test_multi_corpus_sampled_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from collections import OrderedDict import numpy as np import torch from fairseq.data import LanguagePairDataset, TokenBlockDataset from fairseq.data.multi_corpus_sampled_dataset import MultiCorpusSampledDataset from tests.test_train import mock_dict class TestMultiCorpusSampledDataset(unittest.TestCase): def setUp(self): d = mock_dict() tokens_1 = torch.LongTensor([1]).view(1, -1) tokens_ds1 = TokenBlockDataset( tokens_1, sizes=[tokens_1.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) self.dataset_1 = LanguagePairDataset( tokens_ds1, tokens_ds1.sizes, d, shuffle=False ) tokens_2 = torch.LongTensor([2]).view(1, -1) tokens_ds2 = TokenBlockDataset( tokens_2, sizes=[tokens_2.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) self.dataset_2 = LanguagePairDataset( tokens_ds2, tokens_ds2.sizes, d, shuffle=False ) def _test_sample_helper( self, expected_sample_from_first_ds_percentage, num_samples=1000, sampling_func=None, ): # To make sure test is not flaky np.random.seed(0) if sampling_func is None: m = MultiCorpusSampledDataset( OrderedDict({0: self.dataset_1, 1: self.dataset_2}), ) else: m = MultiCorpusSampledDataset( OrderedDict({0: self.dataset_1, 1: self.dataset_2}), sampling_func=sampling_func, ) m.ordered_indices() count_sample_from_first_dataset = 0 for _ in range(num_samples): if m.collater([m[0], m[1]])["net_input"]["src_tokens"][0] == 1: count_sample_from_first_dataset += 1 sample_from_first_ds_percentage = ( 1.0 * count_sample_from_first_dataset / num_samples ) self.assertLess( abs( sample_from_first_ds_percentage - expected_sample_from_first_ds_percentage ), 0.01, ) def test_multi_corpus_sampled_dataset_uniform_sample(self): self._test_sample_helper(expected_sample_from_first_ds_percentage=0.5) def test_multi_corpus_sampled_dataset_weighted_sample(self): def naive_weighted_sample(weights): def f(l): v = np.random.random() agg = 0 for i, weight in enumerate(weights): agg += weight if agg > v: return i return f self._test_sample_helper( expected_sample_from_first_ds_percentage=0.9, sampling_func=naive_weighted_sample(weights=[0.9, 0.1]), )
3,105
31.354167
79
py
RegularizedBN
RegularizedBN-main/tests/test_bmuf.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse from multiprocessing import Manager import random import unittest import torch import torch.nn as nn from fairseq import distributed_utils, optim class Model(nn.Module): def __init__(self, input_size, output_size): super(Model, self).__init__() self.fc = nn.Linear(input_size, output_size) def forward(self, input): output = self.fc(input) return output def setup_model_loss_criterion(args, rank, is_cuda): """ setup model, criterion and optimizer based on input args """ args.distributed_rank = rank if args.distributed_world_size > 1: distributed_utils.distributed_init(args) torch.manual_seed(1) model = Model(args.input_size, args.nb_classes) loss_fn = nn.CrossEntropyLoss() if is_cuda: model = model.cuda() loss_fn = loss_fn.cuda() optimizer = optim.sgd.SGD(args, model.parameters()) optimizer = optim.FairseqBMUF(args, optimizer) return model, loss_fn, optimizer def train_step(input, target, model, loss_fn, optimizer, **unused): """Do forward, backward and parameter update.""" model.train() output = model(input) loss = loss_fn(output, target) optimizer.backward(loss) optimizer.step() def single_gpu_training(args, rank, iterations, shared_results): is_cuda = torch.cuda.is_available() if is_cuda: torch.cuda.set_device(rank) model, loss_fn, optimizer = setup_model_loss_criterion(args, rank, is_cuda) for _ in range(iterations): input = torch.randn(1, args.input_size) target = torch.empty(args.batch_size, dtype=torch.long).random_(args.nb_classes) if is_cuda: input = input.cuda() target = target.cuda() train_step(input, target, model, loss_fn, optimizer) results = [] for param in model.parameters(): if len(results) == 0: results = param.flatten().cpu().data else: results = torch.cat((results, param.flatten().cpu().data), 0) shared_results[rank] = results def setup_args(): args = argparse.Namespace() args.global_sync_iter = 20 args.block_momentum = 0.875 args.block_lr = 0.5 args.input_size = 5 args.nb_classes = 2 args.batch_size = 1 args.lr = [1e-3] args.momentum = 0 args.weight_decay = 0 args.warmup_iterations = 0 args.use_nbm = True args.average_sync = True args.global_sync_iter = 1 args.model_parallel_size = 1 args.distributed_backend = "gloo" args.distributed_world_size = 2 port = random.randint(10000, 20000) args.distributed_init_method = "tcp://localhost:{port}".format(port=port) args.distributed_init_host = "localhost" args.distributed_port = port + 1 args.local_world_size = args.distributed_world_size return args @unittest.skipIf(torch.cuda.device_count() < 2, "test requires 2 GPUs") class TestBMUF(unittest.TestCase): def bmuf_process(self, args, iterations): processes = [] results = Manager().dict() ctx = torch.multiprocessing.get_context("spawn") for rank in range(args.distributed_world_size): p = ctx.Process( target=single_gpu_training, args=(args, rank, iterations, results) ) p.start() processes.append(p) for p in processes: p.join() return results def test_bmuf_sync(self): # Train model for 1 iteration and do bmuf sync without doing warmup args = setup_args() iterations = 1 results = self.bmuf_process(args, iterations) # Make sure params in both machines are same assert len(results) == 2 self.assertAlmostEqual(results[0], results[1]) def test_warmup_sync(self): # Train model for 20 iteration and do warmup sync without doing bmuf sync args = setup_args() args.warmup_iterations = 20 iterations = 20 results = self.bmuf_process(args, iterations) # Make sure params in both machines are same assert len(results) == 2 self.assertAlmostEqual(results[0], results[1]) def test_warmup_sync_bmuf_sync(self): # Train model for 25 iteration and do warmup sync after 20 iteration # and bmuf sync after 25 iteration args = setup_args() args.warmup_iterations = 20 args.global_sync_iter = 5 iterations = 25 results = self.bmuf_process(args, iterations) # Make sure params in both machines are same assert len(results) == 2 self.assertAlmostEqual(results[0], results[1]) def test_single_gpu_bmuf(self): # Train model for 5 iterations and use GPU 1 args = setup_args() args.distributed_world_size = 1 args.warmup_iterations = 5 iterations = 20 results = self.bmuf_process(args, iterations) assert len(results) == 1 def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) if __name__ == '__main__': unittest.main()
5,351
29.758621
88
py
RegularizedBN
RegularizedBN-main/tests/test_dictionary.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import io import tempfile import unittest import torch from fairseq.data import Dictionary class TestDictionary(unittest.TestCase): def test_finalize(self): txt = [ 'A B C D', 'B C D', 'C D', 'D', ] ref_ids1 = list(map(torch.IntTensor, [ [4, 5, 6, 7, 2], [5, 6, 7, 2], [6, 7, 2], [7, 2], ])) ref_ids2 = list(map(torch.IntTensor, [ [7, 6, 5, 4, 2], [6, 5, 4, 2], [5, 4, 2], [4, 2], ])) # build dictionary d = Dictionary() for line in txt: d.encode_line(line, add_if_not_exist=True) def get_ids(dictionary): ids = [] for line in txt: ids.append(dictionary.encode_line(line, add_if_not_exist=False)) return ids def assertMatch(ids, ref_ids): for toks, ref_toks in zip(ids, ref_ids): self.assertEqual(toks.size(), ref_toks.size()) self.assertEqual(0, (toks != ref_toks).sum().item()) ids = get_ids(d) assertMatch(ids, ref_ids1) # check finalized dictionary d.finalize() finalized_ids = get_ids(d) assertMatch(finalized_ids, ref_ids2) # write to disk and reload with tempfile.NamedTemporaryFile(mode='w') as tmp_dict: d.save(tmp_dict.name) d = Dictionary.load(tmp_dict.name) reload_ids = get_ids(d) assertMatch(reload_ids, ref_ids2) assertMatch(finalized_ids, reload_ids) def test_overwrite(self): # for example, Camembert overwrites <unk>, <s> and </s> dict_file = io.StringIO( "<unk> 999 #fairseq:overwrite\n" "<s> 999 #fairseq:overwrite\n" "</s> 999 #fairseq:overwrite\n" ", 999\n" "▁de 999\n" ) d = Dictionary() d.add_from_file(dict_file) self.assertEqual(d.index('<pad>'), 1) self.assertEqual(d.index('foo'), 3) self.assertEqual(d.index('<unk>'), 4) self.assertEqual(d.index('<s>'), 5) self.assertEqual(d.index('</s>'), 6) self.assertEqual(d.index(','), 7) self.assertEqual(d.index('▁de'), 8) def test_no_overwrite(self): # for example, Camembert overwrites <unk>, <s> and </s> dict_file = io.StringIO( "<unk> 999\n" "<s> 999\n" "</s> 999\n" ", 999\n" "▁de 999\n" ) d = Dictionary() with self.assertRaisesRegex(RuntimeError, 'Duplicate'): d.add_from_file(dict_file) def test_space(self): # for example, character models treat space as a symbol dict_file = io.StringIO( " 999\n" "a 999\n" "b 999\n" ) d = Dictionary() d.add_from_file(dict_file) self.assertEqual(d.index(' '), 4) self.assertEqual(d.index('a'), 5) self.assertEqual(d.index('b'), 6) if __name__ == '__main__': unittest.main()
3,336
27.521368
80
py
RegularizedBN
RegularizedBN-main/tests/test_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from fairseq import utils class TestUtils(unittest.TestCase): def test_convert_padding_direction(self): pad = 1 left_pad = torch.LongTensor([ [2, 3, 4, 5, 6], [1, 7, 8, 9, 10], [1, 1, 1, 11, 12], ]) right_pad = torch.LongTensor([ [2, 3, 4, 5, 6], [7, 8, 9, 10, 1], [11, 12, 1, 1, 1], ]) self.assertAlmostEqual( right_pad, utils.convert_padding_direction( left_pad, pad, left_to_right=True, ), ) self.assertAlmostEqual( left_pad, utils.convert_padding_direction( right_pad, pad, right_to_left=True, ), ) def test_make_positions(self): pad = 1 left_pad_input = torch.LongTensor([ [9, 9, 9, 9, 9], [1, 9, 9, 9, 9], [1, 1, 1, 9, 9], ]) left_pad_output = torch.LongTensor([ [2, 3, 4, 5, 6], [1, 2, 3, 4, 5], [1, 1, 1, 2, 3], ]) right_pad_input = torch.LongTensor([ [9, 9, 9, 9, 9], [9, 9, 9, 9, 1], [9, 9, 1, 1, 1], ]) right_pad_output = torch.LongTensor([ [2, 3, 4, 5, 6], [2, 3, 4, 5, 1], [2, 3, 1, 1, 1], ]) self.assertAlmostEqual( left_pad_output, utils.make_positions(left_pad_input, pad), ) self.assertAlmostEqual( right_pad_output, utils.make_positions(right_pad_input, pad), ) def test_clip_grad_norm_(self): params = torch.nn.Parameter(torch.zeros(5)).requires_grad_(False) grad_norm = utils.clip_grad_norm_(params, 1.0) self.assertTrue(torch.is_tensor(grad_norm)) self.assertEqual(grad_norm, 0.0) params = [torch.nn.Parameter(torch.zeros(5)) for i in range(3)] for p in params: p.grad = torch.full((5,), fill_value=2.) grad_norm = utils.clip_grad_norm_(params, 1.0) exp_grad_norm = torch.full((15,), fill_value=2.).norm() self.assertTrue(torch.is_tensor(grad_norm)) self.assertEqual(grad_norm, exp_grad_norm) grad_norm = utils.clip_grad_norm_(params, 1.0) self.assertAlmostEqual(grad_norm, torch.tensor(1.0)) def test_resolve_max_positions_with_tuple(self): resolved = utils.resolve_max_positions(None, (2000, 100, 2000), 12000) self.assertEqual(resolved, (2000, 100, 2000)) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess(utils.item((t1 - t2).abs().max()), 1e-4) if __name__ == '__main__': unittest.main()
3,067
28.219048
78
py
RegularizedBN
RegularizedBN-main/tests/test_character_token_embedder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import unittest from fairseq.data import Dictionary from fairseq.modules import CharacterTokenEmbedder class TestCharacterTokenEmbedder(unittest.TestCase): def test_character_token_embedder(self): vocab = Dictionary() vocab.add_symbol('hello') vocab.add_symbol('there') embedder = CharacterTokenEmbedder(vocab, [(2, 16), (4, 32), (8, 64), (16, 2)], 64, 5, 2) test_sents = [['hello', 'unk', 'there'], ['there'], ['hello', 'there']] max_len = max(len(s) for s in test_sents) input = torch.LongTensor(len(test_sents), max_len + 2).fill_(vocab.pad()) for i in range(len(test_sents)): input[i][0] = vocab.eos() for j in range(len(test_sents[i])): input[i][j + 1] = vocab.index(test_sents[i][j]) input[i][j + 2] = vocab.eos() embs = embedder(input) assert embs.size() == (len(test_sents), max_len + 2, 5) self.assertAlmostEqual(embs[0][0], embs[1][0]) self.assertAlmostEqual(embs[0][0], embs[0][-1]) self.assertAlmostEqual(embs[0][1], embs[2][1]) self.assertAlmostEqual(embs[0][3], embs[1][1]) embs.sum().backward() assert embedder.char_embeddings.weight.grad is not None def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-6) if __name__ == '__main__': unittest.main()
1,656
34.255319
96
py
RegularizedBN
RegularizedBN-main/tests/gpu/test_binaries_gpu.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import logging import os import tempfile import unittest from io import StringIO import torch from fairseq import options from fairseq_cli import train from tests.utils import ( create_dummy_data, generate_main, preprocess_lm_data, preprocess_translation_data, train_translation_model, ) class TestTranslationGPU(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_fp16(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_fp16") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, "fconv_iwslt_de_en", ["--fp16"]) generate_main(data_dir) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_memory_efficient_fp16(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_memory_efficient_fp16") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "fconv_iwslt_de_en", ["--memory-efficient-fp16"] ) generate_main(data_dir) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_transformer_fp16(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_transformer") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "transformer_iwslt_de_en", [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--fp16", ], run_validation=True, ) generate_main(data_dir) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_levenshtein_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_levenshtein_transformer" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ["--joined-dictionary"]) train_translation_model( data_dir, "levenshtein_transformer", [ "--apply-bert-init", "--early-exit", "6,6,6", "--criterion", "nat_loss", ], task="translation_lev", ) generate_main( data_dir, [ "--task", "translation_lev", "--iter-decode-max-iter", "9", "--iter-decode-eos-penalty", "0", "--print-step", ], ) def _quantize_language_model(data_dir, arch, extra_flags=None, run_validation=False): train_parser = options.get_training_parser() train_args = options.parse_args_and_arch( train_parser, [ "--task", "language_modeling", data_dir, "--arch", arch, "--optimizer", "adam", "--lr", "0.0001", "--criterion", "adaptive_loss", "--adaptive-softmax-cutoff", "5,10,15", "--max-tokens", "500", "--tokens-per-sample", "500", "--save-dir", data_dir, "--max-epoch", "1", "--no-progress-bar", "--distributed-world-size", "1", "--ddp-backend", "no_c10d", "--num-workers", 0, ] + (extra_flags or []), ) train.main(train_args) # try scalar quantization scalar_quant_train_parser = options.get_training_parser() scalar_quant_train_args = options.parse_args_and_arch( scalar_quant_train_parser, [ "--task", "language_modeling", data_dir, "--arch", arch, "--optimizer", "adam", "--lr", "0.0001", "--criterion", "adaptive_loss", "--adaptive-softmax-cutoff", "5,10,15", "--max-tokens", "500", "--tokens-per-sample", "500", "--save-dir", data_dir, "--max-update", "3", "--no-progress-bar", "--distributed-world-size", "1", "--ddp-backend", "no_c10d", "--num-workers", 0, "--quant-noise-scalar", "0.5", ] + (extra_flags or []), ) train.main(scalar_quant_train_args) # try iterative PQ quantization quantize_parser = options.get_training_parser() quantize_args = options.parse_args_and_arch( quantize_parser, [ "--task", "language_modeling", data_dir, "--arch", arch, "--optimizer", "adam", "--lr", "0.0001", "--criterion", "adaptive_loss", "--adaptive-softmax-cutoff", "5,10,15", "--max-tokens", "50", "--tokens-per-sample", "50", "--max-update", "6", "--no-progress-bar", "--distributed-world-size", "1", "--ddp-backend", "no_c10d", "--num-workers", 0, "--restore-file", os.path.join(data_dir, "checkpoint_last.pt"), "--reset-optimizer", "--quantization-config-path", os.path.join( os.path.dirname(__file__), "transformer_quantization_config.yaml" ), ] + (extra_flags or []), ) train.main(quantize_args) class TestQuantization(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_quantization(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_quantization") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) # tests both scalar and iterative PQ quantization _quantize_language_model(data_dir, "transformer_lm") class TestOptimizersGPU(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_flat_grads(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_flat_grads") as data_dir: # Use just a bit of data and tiny model to keep this test runtime reasonable create_dummy_data(data_dir, num_examples=10, maxlen=5) preprocess_translation_data(data_dir) with self.assertRaises(RuntimeError): # adafactor isn't compatible with flat grads, which # are used by default with --fp16 train_translation_model( data_dir, "lstm", [ "--required-batch-size-multiple", "1", "--encoder-layers", "1", "--encoder-hidden-size", "32", "--decoder-layers", "1", "--optimizer", "adafactor", "--fp16", ], ) # but it should pass once we set --fp16-no-flatten-grads train_translation_model( data_dir, "lstm", [ "--required-batch-size-multiple", "1", "--encoder-layers", "1", "--encoder-hidden-size", "32", "--decoder-layers", "1", "--optimizer", "adafactor", "--fp16", "--fp16-no-flatten-grads", ], ) if __name__ == "__main__": unittest.main()
9,920
31.421569
92
py
RegularizedBN
RegularizedBN-main/tests/speech_recognition/test_data_utils.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from examples.speech_recognition.data import data_utils class DataUtilsTest(unittest.TestCase): def test_normalization(self): sample_len1 = torch.tensor([[-0.7661, -1.3889, -2.0972, -0.9134, -0.7071, -0.9765, -0.8700, -0.8283, 0.7512, 1.3211, 2.1532, 2.1174, 1.2800, 1.2633, 1.6147, 1.6322, 2.0723, 3.1522, 3.2852, 2.2309, 2.5569, 2.2183, 2.2862, 1.5886, 0.8773, 0.8725, 1.2662, 0.9899, 1.1069, 1.3926, 1.2795, 1.1199, 1.1477, 1.2687, 1.3843, 1.1903, 0.8355, 1.1367, 1.2639, 1.4707]]) out = data_utils.apply_mv_norm(sample_len1) assert not torch.isnan(out).any() assert (out == sample_len1).all()
1,039
42.333333
108
py
RegularizedBN
RegularizedBN-main/tests/speech_recognition/asr_test_base.py
#!/usr/bin/env python3 import argparse import os import unittest from inspect import currentframe, getframeinfo import numpy as np import torch from fairseq.data import data_utils as fairseq_data_utils from fairseq.data.dictionary import Dictionary from fairseq.models import ( BaseFairseqModel, FairseqDecoder, FairseqEncoder, FairseqEncoderDecoderModel, FairseqEncoderModel, FairseqModel, ) from fairseq.tasks.fairseq_task import FairseqTask from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask DEFAULT_TEST_VOCAB_SIZE = 100 # /////////////////////////////////////////////////////////////////////////// # utility function to setup dummy dict/task/input # /////////////////////////////////////////////////////////////////////////// def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE): dummy_dict = Dictionary() # add dummy symbol to satisfy vocab size for id, _ in enumerate(range(vocab_size)): dummy_dict.add_symbol("{}".format(id), 1000) return dummy_dict class DummyTask(FairseqTask): def __init__(self, args): super().__init__(args) self.dictionary = get_dummy_dictionary() if getattr(self.args, "ctc", False): self.dictionary.add_symbol("<ctc_blank>") self.tgt_dict = self.dictionary @property def target_dictionary(self): return self.dictionary def get_dummy_task_and_parser(): """ to build a fariseq model, we need some dummy parse and task. This function is used to create dummy task and parser to faciliate model/criterion test Note: we use FbSpeechRecognitionTask as the dummy task. You may want to use other task by providing another function """ parser = argparse.ArgumentParser( description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS ) DummyTask.add_args(parser) args = parser.parse_args([]) task = DummyTask.setup_task(args) return task, parser def get_dummy_input(T=100, D=80, B=5, K=100): forward_input = {} # T max sequence length # D feature vector dimension # B batch size # K target dimension size feature = torch.randn(B, T, D) # this (B, T, D) layout is just a convention, you can override it by # write your own _prepare_forward_input function src_lengths = torch.from_numpy( np.random.randint(low=1, high=T, size=B, dtype=np.int64) ) src_lengths[0] = T # make sure the maximum length matches prev_output_tokens = [] for b in range(B): token_length = np.random.randint(low=1, high=src_lengths[b].item() + 1) tokens = np.random.randint(low=0, high=K, size=token_length, dtype=np.int64) prev_output_tokens.append(torch.from_numpy(tokens)) prev_output_tokens = fairseq_data_utils.collate_tokens( prev_output_tokens, pad_idx=1, eos_idx=2, left_pad=False, move_eos_to_beginning=False, ) src_lengths, sorted_order = src_lengths.sort(descending=True) forward_input["src_tokens"] = feature.index_select(0, sorted_order) forward_input["src_lengths"] = src_lengths forward_input["prev_output_tokens"] = prev_output_tokens return forward_input def get_dummy_encoder_output(encoder_out_shape=(100, 80, 5)): """ This only provides an example to generate dummy encoder output """ (T, B, D) = encoder_out_shape encoder_out = {} encoder_out["encoder_out"] = torch.from_numpy( np.random.randn(*encoder_out_shape).astype(np.float32) ) seq_lengths = torch.from_numpy(np.random.randint(low=1, high=T, size=B)) # some dummy mask encoder_out["encoder_padding_mask"] = torch.arange(T).view(1, T).expand( B, -1 ) >= seq_lengths.view(B, 1).expand(-1, T) encoder_out["encoder_padding_mask"].t_() # encoer_padding_mask is (T, B) tensor, with (t, b)-th element indicate # whether encoder_out[t, b] is valid (=0) or not (=1) return encoder_out def _current_postion_info(): cf = currentframe() frameinfo = " (at {}:{})".format( os.path.basename(getframeinfo(cf).filename), cf.f_back.f_lineno ) return frameinfo def check_encoder_output(encoder_output, batch_size=None): """we expect encoder_output to be a dict with the following key/value pairs: - encoder_out: a Torch.Tensor - encoder_padding_mask: a binary Torch.Tensor """ if not isinstance(encoder_output, dict): msg = ( "FairseqEncoderModel.forward(...) must be a dict" + _current_postion_info() ) return False, msg if "encoder_out" not in encoder_output: msg = ( "FairseqEncoderModel.forward(...) must contain encoder_out" + _current_postion_info() ) return False, msg if "encoder_padding_mask" not in encoder_output: msg = ( "FairseqEncoderModel.forward(...) must contain encoder_padding_mask" + _current_postion_info() ) return False, msg if not isinstance(encoder_output["encoder_out"], torch.Tensor): msg = "encoder_out must be a torch.Tensor" + _current_postion_info() return False, msg if encoder_output["encoder_out"].dtype != torch.float32: msg = "encoder_out must have float32 dtype" + _current_postion_info() return False, msg mask = encoder_output["encoder_padding_mask"] if mask is not None: if not isinstance(mask, torch.Tensor): msg = ( "encoder_padding_mask must be a torch.Tensor" + _current_postion_info() ) return False, msg if ( mask.dtype != torch.uint8 and (not hasattr(torch, 'bool') or mask.dtype != torch.bool) ): msg = ( "encoder_padding_mask must have dtype of uint8" + _current_postion_info() ) return False, msg if mask.dim() != 2: msg = ( "we expect encoder_padding_mask to be a 2-d tensor, in shape (T, B)" + _current_postion_info() ) return False, msg if batch_size is not None and mask.size(1) != batch_size: msg = ( "we expect encoder_padding_mask to be a 2-d tensor, with size(1)" + " being the batch size" + _current_postion_info() ) return False, msg return True, None def check_decoder_output(decoder_output): """we expect output from a decoder is a tuple with the following constraint: - the first element is a torch.Tensor - the second element can be anything (reserved for future use) """ if not isinstance(decoder_output, tuple): msg = "FariseqDecoder output must be a tuple" + _current_postion_info() return False, msg if len(decoder_output) != 2: msg = "FairseqDecoder output must be 2-elem tuple" + _current_postion_info() return False, msg if not isinstance(decoder_output[0], torch.Tensor): msg = ( "FariseqDecoder output[0] must be a torch.Tensor" + _current_postion_info() ) return False, msg return True, None # /////////////////////////////////////////////////////////////////////////// # Base Test class # /////////////////////////////////////////////////////////////////////////// class TestBaseFairseqModelBase(unittest.TestCase): """ This class is used to facilitate writing unittest for any class derived from `BaseFairseqModel`. """ @classmethod def setUpClass(cls): if cls is TestBaseFairseqModelBase: raise unittest.SkipTest("Skipping test case in base") super().setUpClass() def setUpModel(self, model): self.assertTrue(isinstance(model, BaseFairseqModel)) self.model = model def setupInput(self): pass def setUp(self): self.model = None self.forward_input = None pass class TestFairseqEncoderDecoderModelBase(TestBaseFairseqModelBase): """ base code to test FairseqEncoderDecoderModel (formally known as `FairseqModel`) must be derived from this base class """ @classmethod def setUpClass(cls): if cls is TestFairseqEncoderDecoderModelBase: raise unittest.SkipTest("Skipping test case in base") super().setUpClass() def setUpModel(self, model_cls, extra_args_setters=None): self.assertTrue( issubclass(model_cls, (FairseqEncoderDecoderModel, FairseqModel)), msg="This class only tests for FairseqModel subclasses", ) task, parser = get_dummy_task_and_parser() model_cls.add_args(parser) args = parser.parse_args([]) if extra_args_setters is not None: for args_setter in extra_args_setters: args_setter(args) model = model_cls.build_model(args, task) self.model = model def setUpInput(self, input=None): self.forward_input = get_dummy_input() if input is None else input def setUp(self): super().setUp() def test_forward(self): if self.model and self.forward_input: forward_output = self.model.forward(**self.forward_input) # for FairseqEncoderDecoderModel, forward returns a tuple of two # elements, the first one is a Torch.Tensor succ, msg = check_decoder_output(forward_output) if not succ: self.assertTrue(succ, msg=msg) self.forward_output = forward_output def test_get_normalized_probs(self): if self.model and self.forward_input: forward_output = self.model.forward(**self.forward_input) logprob = self.model.get_normalized_probs(forward_output, log_probs=True) prob = self.model.get_normalized_probs(forward_output, log_probs=False) # in order for different models/criterion to play with each other # we need to know whether the logprob or prob output is batch_first # or not. We assume an additional attribute will be attached to logprob # or prob. If you find your code failed here, simply override # FairseqModel.get_normalized_probs, see example at # https://fburl.com/batch_first_example self.assertTrue(hasattr(logprob, "batch_first")) self.assertTrue(hasattr(prob, "batch_first")) self.assertTrue(torch.is_tensor(logprob)) self.assertTrue(torch.is_tensor(prob)) class TestFairseqEncoderModelBase(TestBaseFairseqModelBase): """ base class to test FairseqEncoderModel """ @classmethod def setUpClass(cls): if cls is TestFairseqEncoderModelBase: raise unittest.SkipTest("Skipping test case in base") super().setUpClass() def setUpModel(self, model_cls, extra_args_setters=None): self.assertTrue( issubclass(model_cls, FairseqEncoderModel), msg="This class is only used for testing FairseqEncoderModel", ) task, parser = get_dummy_task_and_parser() model_cls.add_args(parser) args = parser.parse_args([]) if extra_args_setters is not None: for args_setter in extra_args_setters: args_setter(args) model = model_cls.build_model(args, task) self.model = model def setUpInput(self, input=None): self.forward_input = get_dummy_input() if input is None else input # get_dummy_input() is originally for s2s, here we delete extra dict # items, so it can be used for EncoderModel / Encoder as well self.forward_input.pop("prev_output_tokens", None) def setUp(self): super().setUp() def test_forward(self): if self.forward_input and self.model: bsz = self.forward_input["src_tokens"].size(0) forward_output = self.model.forward(**self.forward_input) # we expect forward_output to be a dict with the following # key/value pairs: # - encoder_out: a Torch.Tensor # - encoder_padding_mask: a binary Torch.Tensor succ, msg = check_encoder_output(forward_output, batch_size=bsz) if not succ: self.assertTrue(succ, msg=msg) self.forward_output = forward_output def test_get_normalized_probs(self): if self.model and self.forward_input: forward_output = self.model.forward(**self.forward_input) logprob = self.model.get_normalized_probs(forward_output, log_probs=True) prob = self.model.get_normalized_probs(forward_output, log_probs=False) # in order for different models/criterion to play with each other # we need to know whether the logprob or prob output is batch_first # or not. We assume an additional attribute will be attached to logprob # or prob. If you find your code failed here, simply override # FairseqModel.get_normalized_probs, see example at # https://fburl.com/batch_first_example self.assertTrue(hasattr(logprob, "batch_first")) self.assertTrue(hasattr(prob, "batch_first")) self.assertTrue(torch.is_tensor(logprob)) self.assertTrue(torch.is_tensor(prob)) class TestFairseqEncoderBase(unittest.TestCase): """ base class to test FairseqEncoder """ @classmethod def setUpClass(cls): if cls is TestFairseqEncoderBase: raise unittest.SkipTest("Skipping test case in base") super().setUpClass() def setUpEncoder(self, encoder): self.assertTrue( isinstance(encoder, FairseqEncoder), msg="This class is only used for test FairseqEncoder", ) self.encoder = encoder def setUpInput(self, input=None): self.forward_input = get_dummy_input() if input is None else input # get_dummy_input() is originally for s2s, here we delete extra dict # items, so it can be used for EncoderModel / Encoder as well self.forward_input.pop("prev_output_tokens", None) def setUp(self): self.encoder = None self.forward_input = None def test_forward(self): if self.encoder and self.forward_input: bsz = self.forward_input["src_tokens"].size(0) forward_output = self.encoder.forward(**self.forward_input) succ, msg = check_encoder_output(forward_output, batch_size=bsz) if not succ: self.assertTrue(succ, msg=msg) self.forward_output = forward_output class TestFairseqDecoderBase(unittest.TestCase): """ base class to test FairseqDecoder """ @classmethod def setUpClass(cls): if cls is TestFairseqDecoderBase: raise unittest.SkipTest("Skipping test case in base") super().setUpClass() def setUpDecoder(self, decoder): self.assertTrue( isinstance(decoder, FairseqDecoder), msg="This class is only used for test FairseqDecoder", ) self.decoder = decoder def setUpInput(self, input=None): self.forward_input = get_dummy_encoder_output() if input is None else input def setUpPrevOutputTokens(self, tokens=None): if tokens is None: self.encoder_input = get_dummy_input() self.prev_output_tokens = self.encoder_input["prev_output_tokens"] else: self.prev_output_tokens = tokens def setUp(self): self.decoder = None self.forward_input = None self.prev_output_tokens = None def test_forward(self): if ( self.decoder is not None and self.forward_input is not None and self.prev_output_tokens is not None ): forward_output = self.decoder.forward( prev_output_tokens=self.prev_output_tokens, encoder_out=self.forward_input, ) succ, msg = check_decoder_output(forward_output) if not succ: self.assertTrue(succ, msg=msg) self.forward_input = forward_output class DummyEncoderModel(FairseqEncoderModel): def __init__(self, encoder): super().__init__(encoder) @classmethod def build_model(cls, args, task): return cls(DummyEncoder()) def get_logits(self, net_output): # Inverse of sigmoid to use with BinaryCrossEntropyWithLogitsCriterion as # F.binary_cross_entropy_with_logits combines sigmoid and CE return torch.log( torch.div(net_output["encoder_out"], 1 - net_output["encoder_out"]) ) def get_normalized_probs(self, net_output, log_probs, sample=None): lprobs = super().get_normalized_probs(net_output, log_probs, sample=sample) lprobs.batch_first = True return lprobs class DummyEncoder(FairseqEncoder): def __init__(self): super().__init__(None) def forward(self, src_tokens, src_lengths): mask, max_len = lengths_to_encoder_padding_mask(src_lengths) return {"encoder_out": src_tokens, "encoder_padding_mask": mask} class CrossEntropyCriterionTestBase(unittest.TestCase): @classmethod def setUpClass(cls): if cls is CrossEntropyCriterionTestBase: raise unittest.SkipTest("Skipping base class test case") super().setUpClass() def setUpArgs(self): args = argparse.Namespace() args.sentence_avg = False args.threshold = 0.1 # to use with BinaryCrossEntropyWithLogitsCriterion return args def setUp(self): args = self.setUpArgs() self.model = DummyEncoderModel(encoder=DummyEncoder()) self.criterion = self.criterion_cls.build_criterion(args=args, task=DummyTask(args)) def get_src_tokens(self, correct_prediction, aggregate): """ correct_prediction: True if the net_output (src_tokens) should predict the correct target aggregate: True if the criterion expects net_output (src_tokens) aggregated across time axis """ predicted_idx = 0 if correct_prediction else 1 if aggregate: src_tokens = torch.zeros((2, 2), dtype=torch.float) for b in range(2): src_tokens[b][predicted_idx] = 1.0 else: src_tokens = torch.zeros((2, 10, 2), dtype=torch.float) for b in range(2): for t in range(10): src_tokens[b][t][predicted_idx] = 1.0 return src_tokens def get_target(self, soft_target): if soft_target: target = torch.zeros((2, 2), dtype=torch.float) for b in range(2): target[b][0] = 1.0 else: target = torch.zeros((2, 10), dtype=torch.long) return target def get_test_sample(self, correct, soft_target, aggregate): src_tokens = self.get_src_tokens(correct, aggregate) target = self.get_target(soft_target) L = src_tokens.size(1) return { "net_input": {"src_tokens": src_tokens, "src_lengths": torch.tensor([L])}, "target": target, "ntokens": src_tokens.size(0) * src_tokens.size(1), }
19,491
33.9319
92
py
RegularizedBN
RegularizedBN-main/tests/speech_recognition/test_collaters.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import numpy as np import torch from examples.speech_recognition.data.collaters import Seq2SeqCollater class TestSeq2SeqCollator(unittest.TestCase): def test_collate(self): eos_idx = 1 pad_idx = 0 collater = Seq2SeqCollater( feature_index=0, label_index=1, pad_index=pad_idx, eos_index=eos_idx ) # 2 frames in the first sample and 3 frames in the second one frames1 = np.array([[7, 8], [9, 10]]) frames2 = np.array([[1, 2], [3, 4], [5, 6]]) target1 = np.array([4, 2, 3, eos_idx]) target2 = np.array([3, 2, eos_idx]) sample1 = {"id": 0, "data": [frames1, target1]} sample2 = {"id": 1, "data": [frames2, target2]} batch = collater.collate([sample1, sample2]) # collate sort inputs by frame's length before creating the batch self.assertTensorEqual(batch["id"], torch.tensor([1, 0])) self.assertEqual(batch["ntokens"], 7) self.assertTensorEqual( batch["net_input"]["src_tokens"], torch.tensor( [[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [pad_idx, pad_idx]]] ), ) self.assertTensorEqual( batch["net_input"]["prev_output_tokens"], torch.tensor([[eos_idx, 3, 2, pad_idx], [eos_idx, 4, 2, 3]]), ) self.assertTensorEqual(batch["net_input"]["src_lengths"], torch.tensor([3, 2])) self.assertTensorEqual( batch["target"], torch.tensor([[3, 2, eos_idx, pad_idx], [4, 2, 3, eos_idx]]), ) self.assertEqual(batch["nsentences"], 2) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) if __name__ == "__main__": unittest.main()
2,048
33.728814
87
py
RegularizedBN
RegularizedBN-main/fairseq/checkpoint_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import collections import logging import os import re import traceback from collections import OrderedDict from typing import Union import torch from fairseq.file_io import PathManager from fairseq.models import FairseqDecoder, FairseqEncoder from torch.serialization import default_restore_location logger = logging.getLogger(__name__) def save_checkpoint(args, trainer, epoch_itr, val_loss): from fairseq import distributed_utils, meters # only one worker should attempt to create the required dir if args.distributed_rank == 0: os.makedirs(args.save_dir, exist_ok=True) prev_best = getattr(save_checkpoint, "best", val_loss) #prev_best = 0 if val_loss is not None: best_function = max if args.maximize_best_checkpoint_metric else min save_checkpoint.best = best_function(val_loss, prev_best) if args.no_save or not trainer.is_data_parallel_master: return def is_better(a, b): return a >= b if args.maximize_best_checkpoint_metric else a <= b write_timer = meters.StopwatchMeter() write_timer.start() epoch = epoch_itr.epoch end_of_epoch = epoch_itr.end_of_epoch() updates = trainer.get_num_updates() #print("get here!");exit() suffix = getattr(args, "checkpoint_suffix", "") checkpoint_conds = collections.OrderedDict() checkpoint_conds["checkpoint{}{}.pt".format(epoch, suffix)] = ( end_of_epoch and not args.no_epoch_checkpoints and epoch % args.save_interval == 0 ) checkpoint_conds["checkpoint_{}_{}{}.pt".format(epoch, updates, suffix)] = ( not end_of_epoch and args.save_interval_updates > 0 and updates % args.save_interval_updates == 0 ) checkpoint_conds["checkpoint_best{}.pt".format(suffix)] = val_loss is not None and ( not hasattr(save_checkpoint, "best") or is_better(val_loss, save_checkpoint.best) ) if val_loss is not None and args.keep_best_checkpoints > 0: checkpoint_conds["checkpoint.best_{}_{:.2f}.pt".format( args.best_checkpoint_metric, val_loss)] = ( not hasattr(save_checkpoint, "best") or is_better(val_loss, save_checkpoint.best) ) checkpoint_conds["checkpoint_last{}.pt".format(suffix)] = not args.no_last_checkpoints extra_state = {"train_iterator": epoch_itr.state_dict(), "val_loss": val_loss} if hasattr(save_checkpoint, "best"): extra_state.update({"best": save_checkpoint.best}) checkpoints = [ os.path.join(args.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond ] if len(checkpoints) > 0: trainer.save_checkpoint(checkpoints[0], extra_state) for cp in checkpoints[1:]: PathManager.copy(checkpoints[0], cp, overwrite=True) write_timer.stop() logger.info( "saved checkpoint {} (epoch {} @ {} updates, score {}) (writing took {} seconds)".format( checkpoints[0], epoch, updates, val_loss, write_timer.sum ) ) if not end_of_epoch and args.keep_interval_updates > 0: # remove old checkpoints; checkpoints are sorted in descending order checkpoints = checkpoint_paths( args.save_dir, pattern=r"checkpoint_\d+_(\d+)\.pt" ) for old_chk in checkpoints[args.keep_interval_updates :]: if os.path.lexists(old_chk): os.remove(old_chk) if args.keep_last_epochs > 0: # remove old epoch checkpoints; checkpoints are sorted in descending order checkpoints = checkpoint_paths(args.save_dir, pattern=r"checkpoint(\d+)\.pt") for old_chk in checkpoints[args.keep_last_epochs :]: if os.path.lexists(old_chk): os.remove(old_chk) if args.keep_best_checkpoints > 0: # only keep the best N checkpoints according to validation metric checkpoints = checkpoint_paths( args.save_dir, pattern=r"checkpoint\.best_{}_(\d+\.?\d*)\.pt".format(args.best_checkpoint_metric)) if not args.maximize_best_checkpoint_metric: checkpoints = checkpoints[::-1] for old_chk in checkpoints[args.keep_best_checkpoints:]: if os.path.lexists(old_chk): os.remove(old_chk) def load_checkpoint(args, trainer, **passthrough_args): """ Load a checkpoint and restore the training iterator. *passthrough_args* will be passed through to ``trainer.get_train_iterator``. """ reset_optimizer = args.reset_optimizer reset_lr_scheduler = args.reset_lr_scheduler optimizer_overrides = eval(args.optimizer_overrides) reset_meters = args.reset_meters reset_dataloader = args.reset_dataloader if getattr(args, 'finetune_from_model', None) is not None \ and (reset_optimizer or reset_lr_scheduler or reset_meters or reset_dataloader): raise ValueError("--finetune-from-model can not be set together with either --reset-optimizer" " or reset_lr_scheduler or reset_meters or reset_dataloader") suffix = getattr(args, "checkpoint_suffix", "") if args.restore_file == "checkpoint_last.pt": # default value of restore_file is 'checkpoint_last.pt' checkpoint_path = os.path.join(args.save_dir, "checkpoint_last{}.pt".format(suffix)) first_launch = not PathManager.exists(checkpoint_path) if getattr(args, 'finetune_from_model', None) is not None and first_launch: # if there is no last checkpoint to restore, start the finetune from pretrained model # else just use usual logic to load checkpoint, e.g. restart from last checkpoint and etc. if PathManager.exists(args.finetune_from_model): checkpoint_path = args.finetune_from_model reset_optimizer = True reset_lr_scheduler = True reset_meters = True reset_dataloader = True logger.info(f'loading pretrained model from {checkpoint_path}: ' 'optimizer, lr scheduler, meters, dataloader will be reset') else: raise ValueError(f'--funetune-from-model {args.finetune_from_model} does not exist') elif getattr(args, "model_parallel_size", 1) > 1: checkpoint_path = args.restore_file.replace(".pt", suffix + ".pt") else: checkpoint_path = args.restore_file if args.restore_file != "checkpoint_last.pt" and getattr(args, 'finetune_from_model', None): raise ValueError( '--finetune-from-model and --restore-file (non-default value) ' 'can not be specified together: ' + str(args)) extra_state = trainer.load_checkpoint( checkpoint_path, reset_optimizer, reset_lr_scheduler, optimizer_overrides, reset_meters=reset_meters, ) if ( extra_state is not None and "best" in extra_state and not reset_optimizer and not reset_meters ): save_checkpoint.best = extra_state["best"] if extra_state is not None and not reset_dataloader: # restore iterator from checkpoint itr_state = extra_state["train_iterator"] epoch_itr = trainer.get_train_iterator( epoch=itr_state["epoch"], load_dataset=True, **passthrough_args ) epoch_itr.load_state_dict(itr_state) else: epoch_itr = trainer.get_train_iterator( epoch=1, load_dataset=True, **passthrough_args ) trainer.lr_step(epoch_itr.epoch) return extra_state, epoch_itr def load_checkpoint_to_cpu(path, arg_overrides=None): """Loads a checkpoint to CPU (with upgrading for backward compatibility).""" with PathManager.open(path, "rb") as f: state = torch.load( f, map_location=lambda s, l: default_restore_location(s, "cpu") ) args = state["args"] #print(args);exit() if arg_overrides is not None: for arg_name, arg_val in arg_overrides.items(): setattr(args, arg_name, arg_val) state = _upgrade_state_dict(state) return state def load_model_ensemble(filenames, arg_overrides=None, task=None, strict=True, suffix=''): """Loads an ensemble of models. Args: filenames (List[str]): checkpoint files to load arg_overrides (Dict[str,Any], optional): override model args that were used during model training task (fairseq.tasks.FairseqTask, optional): task to use for loading """ ensemble, args, _task = load_model_ensemble_and_task( filenames, arg_overrides, task, strict, suffix, ) return ensemble, args def load_model_ensemble_and_task(filenames, arg_overrides=None, task=None, strict=True, suffix=''): from fairseq import tasks ensemble = [] for filename in filenames: filename = filename.replace(".pt", suffix + ".pt") if not PathManager.exists(filename): raise IOError("Model file not found: {}".format(filename)) state = load_checkpoint_to_cpu(filename, arg_overrides) #print(task) #<fairseq.tasks.translation.TranslationTask object at 0x7f9d48fbca90> args = state["args"] if task is None: task = tasks.setup_task(args) # build model for ensemble #print(args) model = task.build_model(args) model.load_state_dict(state["model"], strict=strict, args=args) ensemble.append(model) return ensemble, args, task def checkpoint_paths(path, pattern=r"checkpoint(\d+)\.pt"): """Retrieves all checkpoints found in `path` directory. Checkpoints are identified by matching filename to the specified pattern. If the pattern contains groups, the result will be sorted by the first group in descending order. """ pt_regexp = re.compile(pattern) files = os.listdir(path) entries = [] for i, f in enumerate(files): m = pt_regexp.fullmatch(f) if m is not None: idx = float(m.group(1)) if len(m.groups()) > 0 else i entries.append((idx, m.group(0))) return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)] def torch_persistent_save(obj, f): if isinstance(f, str): with PathManager.open(f, "wb") as h: torch_persistent_save(obj, h) return for i in range(3): try: return torch.save(obj, f) except Exception: if i == 2: logger.error(traceback.format_exc()) def save_state( filename, args, model_state_dict, criterion, optimizer, lr_scheduler, num_updates, optim_history=None, extra_state=None, ): from fairseq import utils if optim_history is None: optim_history = [] if extra_state is None: extra_state = {} state_dict = { "args": args, "model": model_state_dict or {}, "optimizer_history": optim_history + [ { "criterion_name": criterion.__class__.__name__, "optimizer_name": optimizer.__class__.__name__, "lr_scheduler_state": lr_scheduler.state_dict(), "num_updates": num_updates, } ], "extra_state": extra_state, } if utils.has_parameters(criterion): state_dict["criterion"] = criterion.state_dict() if not args.no_save_optimizer_state: state_dict["last_optimizer_state"] = optimizer.state_dict() # convert all state to CPU state_dict = utils.move_to_cpu(state_dict) with PathManager.open(filename, "wb") as f: torch_persistent_save(state_dict, f) def _upgrade_state_dict(state): """Helper for upgrading old model checkpoints.""" from fairseq import models, registry, tasks # add optimizer_history if "optimizer_history" not in state: state["optimizer_history"] = [ {"criterion_name": "CrossEntropyCriterion", "best_loss": state["best_loss"]} ] state["last_optimizer_state"] = state["optimizer"] del state["optimizer"] del state["best_loss"] # move extra_state into sub-dictionary if "epoch" in state and "extra_state" not in state: state["extra_state"] = { "epoch": state["epoch"], "batch_offset": state["batch_offset"], "val_loss": state["val_loss"], } del state["epoch"] del state["batch_offset"] del state["val_loss"] # reduce optimizer history's memory usage (only keep the last state) if "optimizer" in state["optimizer_history"][-1]: state["last_optimizer_state"] = state["optimizer_history"][-1]["optimizer"] for optim_hist in state["optimizer_history"]: del optim_hist["optimizer"] # record the optimizer class name if "optimizer_name" not in state["optimizer_history"][-1]: state["optimizer_history"][-1]["optimizer_name"] = "FairseqNAG" # move best_loss into lr_scheduler_state if "lr_scheduler_state" not in state["optimizer_history"][-1]: state["optimizer_history"][-1]["lr_scheduler_state"] = { "best": state["optimizer_history"][-1]["best_loss"] } del state["optimizer_history"][-1]["best_loss"] # keep track of number of updates if "num_updates" not in state["optimizer_history"][-1]: state["optimizer_history"][-1]["num_updates"] = 0 # old model checkpoints may not have separate source/target positions if hasattr(state["args"], "max_positions") and not hasattr( state["args"], "max_source_positions" ): state["args"].max_source_positions = state["args"].max_positions state["args"].max_target_positions = state["args"].max_positions # use stateful training data iterator if "train_iterator" not in state["extra_state"]: state["extra_state"]["train_iterator"] = { "epoch": state["extra_state"]["epoch"], "iterations_in_epoch": state["extra_state"].get("batch_offset", 0), } # default to translation task if not hasattr(state["args"], "task"): state["args"].task = "translation" # --raw-text and --lazy-load are deprecated if getattr(state["args"], "raw_text", False): state["args"].dataset_impl = "raw" elif getattr(state["args"], "lazy_load", False): state["args"].dataset_impl = "lazy" # epochs start at 1 if state["extra_state"]["train_iterator"] is not None: state["extra_state"]["train_iterator"]["epoch"] = max( state["extra_state"]["train_iterator"].get("epoch", 1), 1, ) # set any missing default values in the task, model or other registries registry.set_defaults(state["args"], tasks.TASK_REGISTRY[state["args"].task]) registry.set_defaults(state["args"], models.ARCH_MODEL_REGISTRY[state["args"].arch]) for registry_name, REGISTRY in registry.REGISTRIES.items(): choice = getattr(state["args"], registry_name, None) if choice is not None: cls = REGISTRY["registry"][choice] registry.set_defaults(state["args"], cls) return state def prune_state_dict(state_dict, args): """Prune the given state_dict if desired for LayerDrop (https://arxiv.org/abs/1909.11556). Training with LayerDrop allows models to be robust to pruning at inference time. This function prunes state_dict to allow smaller models to be loaded from a larger model and re-maps the existing state_dict for this to occur. It's called by functions that load models from checkpoints and does not need to be called directly. """ if not args or args.arch == "ptt_transformer": # args should not be none, but don't crash if it is. return state_dict encoder_layers_to_keep = ( args.encoder_layers_to_keep if "encoder_layers_to_keep" in vars(args) else None ) decoder_layers_to_keep = ( args.decoder_layers_to_keep if "decoder_layers_to_keep" in vars(args) else None ) if not encoder_layers_to_keep and not decoder_layers_to_keep: return state_dict # apply pruning logger.info( "Pruning model to specified layer configuration - this works best if the model was trained with LayerDrop" ) def create_pruning_pass(layers_to_keep, layer_name): keep_layers = sorted( [int(layer_string) for layer_string in layers_to_keep.split(",")] ) mapping_dict = {} for i in range(len(keep_layers)): mapping_dict[str(keep_layers[i])] = str(i) regex = re.compile(r"^{layer}.*\.layers\.(\d+)".format(layer=layer_name)) return {"substitution_regex": regex, "mapping_dict": mapping_dict} pruning_passes = [] if encoder_layers_to_keep: pruning_passes.append(create_pruning_pass(encoder_layers_to_keep, "encoder")) if decoder_layers_to_keep: pruning_passes.append(create_pruning_pass(decoder_layers_to_keep, "decoder")) new_state_dict = {} for layer_name in state_dict.keys(): match = re.search(r"\.layers\.(\d+)\.", layer_name) # if layer has no number in it, it is a supporting layer, such as an # embedding if not match: new_state_dict[layer_name] = state_dict[layer_name] continue # otherwise, layer should be pruned. original_layer_number = match.group(1) # figure out which mapping dict to replace from for pruning_pass in pruning_passes: if original_layer_number in pruning_pass["mapping_dict"] and pruning_pass[ "substitution_regex" ].search(layer_name): new_layer_number = pruning_pass["mapping_dict"][original_layer_number] substitution_match = pruning_pass["substitution_regex"].search( layer_name ) new_state_key = ( layer_name[: substitution_match.start(1)] + new_layer_number + layer_name[substitution_match.end(1) :] ) new_state_dict[new_state_key] = state_dict[layer_name] # Since layers are now pruned, *_layers_to_keep are no longer needed. # This is more of "It would make it work fix" rather than a proper fix. if "encoder_layers_to_keep" in vars(args): args.encoder_layers_to_keep = None if "decoder_layers_to_keep" in vars(args): args.decoder_layers_to_keep = None return new_state_dict def load_pretrained_component_from_model( component: Union[FairseqEncoder, FairseqDecoder], checkpoint: str ): """ Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the provided `component` object. If state_dict fails to load, there may be a mismatch in the architecture of the corresponding `component` found in the `checkpoint` file. """ if not PathManager.exists(checkpoint): raise IOError("Model file not found: {}".format(checkpoint)) state = load_checkpoint_to_cpu(checkpoint) if isinstance(component, FairseqEncoder): component_type = "encoder" elif isinstance(component, FairseqDecoder): component_type = "decoder" else: raise ValueError( "component to load must be either a FairseqEncoder or " "FairseqDecoder. Loading other component types are not supported." ) component_state_dict = OrderedDict() for key in state["model"].keys(): if key.startswith(component_type): # encoder.input_layers.0.0.weight --> input_layers.0.0.weight component_subkey = key[len(component_type) + 1 :] component_state_dict[component_subkey] = state["model"][key] component.load_state_dict(component_state_dict, strict=True) return component def verify_checkpoint_directory(save_dir: str) -> None: if not os.path.exists(save_dir): os.makedirs(save_dir, exist_ok=True) temp_file_path = os.path.join(save_dir, "dummy") try: with open(temp_file_path, "w"): pass except OSError as e: logger.warning("Unable to access checkpoint save directory: {}".format(save_dir)) raise e else: os.remove(temp_file_path)
20,546
37.84121
114
py
RegularizedBN
RegularizedBN-main/fairseq/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import copy import importlib.util import logging import math import os import sys import warnings from collections import defaultdict from itertools import accumulate from typing import Callable, Dict, List, Optional import numpy as np import torch import torch.nn.functional as F from fairseq.data import iterators from fairseq.logging.meters import safe_round from fairseq.modules import gelu, gelu_accurate from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor try: from amp_C import multi_tensor_l2norm multi_tensor_l2norm_available = True except ImportError: multi_tensor_l2norm_available = False logger = logging.getLogger(__name__) MANIFOLD_PATH_SEP = "|" def split_paths(paths: str) -> List[str]: return paths.split(os.pathsep) if "://" not in paths else paths.split(MANIFOLD_PATH_SEP) def load_ensemble_for_inference(filenames, task, model_arg_overrides=None): from fairseq import checkpoint_utils deprecation_warning( "utils.load_ensemble_for_inference is deprecated. " "Please use checkpoint_utils.load_model_ensemble instead." ) return checkpoint_utils.load_model_ensemble( filenames, arg_overrides=model_arg_overrides, task=task ) def apply_to_sample(f, sample): if hasattr(sample, '__len__') and len(sample) == 0: return {} def _apply(x): if torch.is_tensor(x): return f(x) elif isinstance(x, dict): return {key: _apply(value) for key, value in x.items()} elif isinstance(x, list): return [_apply(x) for x in x] elif isinstance(x, tuple): return tuple(_apply(x) for x in x) elif isinstance(x, set): return {_apply(x) for x in x} else: return x return _apply(sample) def move_to_cuda(sample): def _move_to_cuda(tensor): return tensor.cuda() return apply_to_sample(_move_to_cuda, sample) def move_to_cpu(sample): def _move_to_cpu(tensor): # PyTorch has poor support for half tensors (float16) on CPU. # Move any such tensors to float32. if tensor.dtype in {torch.bfloat16, torch.float16}: tensor = tensor.to(dtype=torch.float32) return tensor.cpu() return apply_to_sample(_move_to_cpu, sample) def get_incremental_state( module: MultiheadAttention, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], key: str, ) -> Optional[Dict[str, Optional[Tensor]]]: """Helper for getting incremental state for an nn.Module.""" return module.get_incremental_state(incremental_state, key) def set_incremental_state( module: MultiheadAttention, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], key: str, value: Dict[str, Optional[Tensor]], ) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]: """Helper for setting incremental state for an nn.Module.""" if incremental_state is not None: result = module.set_incremental_state(incremental_state, key, value) if result is not None: incremental_state = result return incremental_state def load_align_dict(replace_unk): if replace_unk is None: align_dict = None elif isinstance(replace_unk, str) and len(replace_unk) > 0: # Load alignment dictionary for unknown word replacement if it was passed as an argument. align_dict = {} with open(replace_unk, "r") as f: for line in f: cols = line.split() align_dict[cols[0]] = cols[1] else: # No alignment dictionary provided but we still want to perform unknown word replacement by copying the # original source word. align_dict = {} return align_dict def print_embed_overlap(embed_dict, vocab_dict): embed_keys = set(embed_dict.keys()) vocab_keys = set(vocab_dict.symbols) overlap = len(embed_keys & vocab_keys) logger.info("found {}/{} types in embedding file".format(overlap, len(vocab_dict))) def parse_embedding(embed_path): """Parse embedding text file into a dictionary of word and embedding tensors. The first line can have vocabulary size and dimension. The following lines should contain word and embedding separated by spaces. Example: 2 5 the -0.0230 -0.0264 0.0287 0.0171 0.1403 at -0.0395 -0.1286 0.0275 0.0254 -0.0932 """ embed_dict = {} with open(embed_path) as f_embed: next(f_embed) # skip header for line in f_embed: pieces = line.rstrip().split(" ") embed_dict[pieces[0]] = torch.Tensor( [float(weight) for weight in pieces[1:]] ) return embed_dict def load_embedding(embed_dict, vocab, embedding): for idx in range(len(vocab)): token = vocab[idx] if token in embed_dict: embedding.weight.data[idx] = embed_dict[token] return embedding def replace_unk(hypo_str, src_str, alignment, align_dict, unk): from fairseq import tokenizer # Tokens are strings here hypo_tokens = tokenizer.tokenize_line(hypo_str) # TODO: Very rare cases where the replacement is '<eos>' should be handled gracefully src_tokens = tokenizer.tokenize_line(src_str) + ["<eos>"] for i, ht in enumerate(hypo_tokens): if ht == unk: src_token = src_tokens[alignment[i]] # Either take the corresponding value in the aligned dictionary or just copy the original value. hypo_tokens[i] = align_dict.get(src_token, src_token) return " ".join(hypo_tokens) def post_process_prediction( hypo_tokens, src_str, alignment, align_dict, tgt_dict, remove_bpe=None, extra_symbols_to_ignore=None ): hypo_str = tgt_dict.string(hypo_tokens, remove_bpe, extra_symbols_to_ignore=extra_symbols_to_ignore) if align_dict is not None: hypo_str = replace_unk( hypo_str, src_str, alignment, align_dict, tgt_dict.unk_string() ) if align_dict is not None or remove_bpe is not None: # Convert back to tokens for evaluating with unk replacement or without BPE # Note that the dictionary can be modified inside the method. hypo_tokens = tgt_dict.encode_line(hypo_str, add_if_not_exist=True) return hypo_tokens, hypo_str, alignment def make_positions(tensor, padding_idx: int, onnx_trace: bool = False): """Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. """ # The series of casts and type-conversions here are carefully # balanced to both work with ONNX export and XLA. In particular XLA # prefers ints, cumsum defaults to output longs, and ONNX doesn't know # how to handle the dtype kwarg in cumsum. mask = tensor.ne(padding_idx).int() return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx def strip_pad(tensor, pad): return tensor[tensor.ne(pad)] def buffered_arange(max): if not hasattr(buffered_arange, "buf"): buffered_arange.buf = torch.LongTensor() if max > buffered_arange.buf.numel(): buffered_arange.buf.resize_(max) torch.arange(max, out=buffered_arange.buf) return buffered_arange.buf[:max] def convert_padding_direction( src_tokens, padding_idx, right_to_left: bool = False, left_to_right: bool = False ): assert right_to_left ^ left_to_right pad_mask = src_tokens.eq(padding_idx) if not pad_mask.any(): # no padding, return early return src_tokens if left_to_right and not pad_mask[:, 0].any(): # already right padded return src_tokens if right_to_left and not pad_mask[:, -1].any(): # already left padded return src_tokens max_len = src_tokens.size(1) buffered = torch.empty(0).long() if max_len > 0: torch.arange(max_len, out=buffered) range = buffered.type_as(src_tokens).expand_as(src_tokens) num_pads = pad_mask.long().sum(dim=1, keepdim=True) if right_to_left: index = torch.remainder(range - num_pads, max_len) else: index = torch.remainder(range + num_pads, max_len) return src_tokens.gather(1, index) def item(tensor): if hasattr(tensor, "item"): return tensor.item() if hasattr(tensor, "__getitem__"): return tensor[0] return tensor def multi_tensor_total_norm(grads, chunk_size=2048*32) -> torch.Tensor: per_device_grads = {} norms = [] for grad in grads: device = grad.device cur_device_grads = per_device_grads.get(device) if cur_device_grads is None: cur_device_grads = [] per_device_grads[device] = cur_device_grads cur_device_grads.append(grad) for device in per_device_grads.keys(): cur_device_grads = per_device_grads[device] if device.type == "cuda": # TODO(msb) return has_inf has_inf = torch.zeros((1, 1), dtype=torch.int, device=device) with torch.cuda.device(device): norm = multi_tensor_l2norm(chunk_size, has_inf, [cur_device_grads], False) norms.append(norm[0]) else: norms += [torch.norm(g, p=2, dtype=torch.float32) for g in cur_device_grads] total_norm = torch.norm(torch.stack(norms)) return total_norm def clip_grad_norm_(params, max_norm, aggregate_norm_fn=None) -> torch.Tensor: if isinstance(params, torch.Tensor): params = [params] params = list(params) grads = [p.grad.detach() for p in filter(lambda p: p.grad is not None, params)] if len(grads) == 0: if len(params) > 0: return params[0].new_tensor(0.) else: return torch.tensor(0.) if len(grads) == 1: total_norm = torch.norm(grads[0], p=2, dtype=torch.float32) else: if multi_tensor_l2norm_available: total_norm = multi_tensor_total_norm(grads) else: if torch.cuda.is_available(): warnings.warn( "amp_C fused kernels unavailable, disabling multi_tensor_l2norm; " "you may get better performance by installing NVIDIA's apex library" ) total_norm = torch.norm( torch.stack([torch.norm(g, p=2, dtype=torch.float32) for g in grads]) ) if aggregate_norm_fn is not None: total_norm = aggregate_norm_fn(total_norm) if max_norm > 0: max_norm = float(max_norm) clip_coef = (max_norm / (total_norm + 1e-6)).clamp_(max=1) for g in grads: g.mul_(clip_coef) return total_norm def fill_with_neg_inf(t): """FP16-compatible function that fills a tensor with -inf.""" return t.float().fill_(float("-inf")).type_as(t) def _match_types(arg1, arg2): """Convert the numerical argument to the same type as the other argument""" def upgrade(arg_number, arg_structure): if isinstance(arg_structure, tuple): return tuple([arg_number] * len(arg_structure)) elif isinstance(arg_structure, dict): arg = copy.deepcopy(arg_structure) for k in arg: arg[k] = upgrade(arg_number, arg_structure[k]) return arg else: return arg_number if isinstance(arg1, float) or isinstance(arg1, int): return upgrade(arg1, arg2), arg2 elif isinstance(arg2, float) or isinstance(arg2, int): return arg1, upgrade(arg2, arg1) return arg1, arg2 def resolve_max_positions(*args): """Resolve max position constraints from multiple sources.""" def map_value_update(d1, d2): updated_value = copy.deepcopy(d1) for key in d2: if key not in updated_value: updated_value[key] = d2[key] else: updated_value[key] = min(d1[key], d2[key]) return updated_value def nullsafe_min(l): minim = None for item in l: if minim is None: minim = item elif item is not None and item < minim: minim = item return minim max_positions = None for arg in args: if max_positions is None: max_positions = arg elif arg is not None: max_positions, arg = _match_types(max_positions, arg) if isinstance(arg, float) or isinstance(arg, int): max_positions = min(max_positions, arg) elif isinstance(arg, dict): max_positions = map_value_update(max_positions, arg) else: max_positions = tuple(map(nullsafe_min, zip(max_positions, arg))) return max_positions def import_user_module(args): module_path = getattr(args, "user_dir", None) if module_path is not None: module_path = os.path.abspath(args.user_dir) if not os.path.exists(module_path): fairseq_rel_path = os.path.join( os.path.dirname(__file__), "..", args.user_dir ) if os.path.exists(fairseq_rel_path): module_path = fairseq_rel_path module_parent, module_name = os.path.split(module_path) if module_name not in sys.modules: sys.path.insert(0, module_parent) importlib.import_module(module_name) def softmax(x, dim: int, onnx_trace: bool = False): if onnx_trace: return F.softmax(x.float(), dim=dim) else: return F.softmax(x, dim=dim, dtype=torch.float32) def log_softmax(x, dim: int, onnx_trace: bool = False): if onnx_trace: return F.log_softmax(x.float(), dim=dim) else: return F.log_softmax(x, dim=dim, dtype=torch.float32) def get_perplexity(loss, round=2, base=2): if loss is None: return 0. try: return safe_round(base ** loss, round) except OverflowError: return float('inf') def deprecation_warning(message, stacklevel=3): # don't use DeprecationWarning, since it's ignored by default warnings.warn(message, stacklevel=stacklevel) def get_activation_fn(activation: str) -> Callable: """ Returns the activation function corresponding to `activation` """ if activation == "relu": return F.relu elif activation == "gelu": return gelu elif activation == "gelu_fast": deprecation_warning( "--activation-fn=gelu_fast has been renamed to gelu_accurate" ) return gelu_accurate elif activation == "gelu_accurate": return gelu_accurate elif activation == "tanh": return torch.tanh elif activation == "linear": return lambda x: x else: raise RuntimeError("--activation-fn {} not supported".format(activation)) def get_available_activation_fns() -> List: return [ "relu", "gelu", "gelu_fast", # deprecated "gelu_accurate", "tanh", "linear", ] @contextlib.contextmanager def eval(model): is_training = model.training model.eval() yield model.train(is_training) def has_parameters(module): try: next(module.parameters()) return True except StopIteration: return False def set_torch_seed(seed): # Set seed based on args.seed and the update number so that we get # reproducible results when resuming from checkpoints assert isinstance(seed, int) torch.manual_seed(seed) torch.cuda.manual_seed(seed) @contextlib.contextmanager def with_torch_seed(seed): assert isinstance(seed, int) rng_state = torch.get_rng_state() cuda_rng_state = torch.cuda.get_rng_state() set_torch_seed(seed) yield torch.set_rng_state(rng_state) torch.cuda.set_rng_state(cuda_rng_state) def parse_alignment(line): """ Parses a single line from the alingment file. Args: line (str): String containing the alignment of the format: <src_idx_1>-<tgt_idx_1> <src_idx_2>-<tgt_idx_2> .. <src_idx_m>-<tgt_idx_m>. All indices are 0 indexed. Returns: torch.IntTensor: packed alignments of shape (2 * m). """ alignments = line.strip().split() parsed_alignment = torch.IntTensor(2 * len(alignments)) for idx, alignment in enumerate(alignments): src_idx, tgt_idx = alignment.split("-") parsed_alignment[2 * idx] = int(src_idx) parsed_alignment[2 * idx + 1] = int(tgt_idx) return parsed_alignment def get_token_to_word_mapping(tokens, exclude_list): n = len(tokens) word_start = [int(token not in exclude_list) for token in tokens] word_idx = list(accumulate(word_start)) token_to_word = {i: word_idx[i] for i in range(n)} return token_to_word def extract_hard_alignment(attn, src_sent, tgt_sent, pad, eos): tgt_valid = ((tgt_sent != pad) & (tgt_sent != eos)).nonzero(as_tuple=False).squeeze(dim=-1) src_invalid = ((src_sent == pad) | (src_sent == eos)).nonzero(as_tuple=False).squeeze(dim=-1) src_token_to_word = get_token_to_word_mapping(src_sent, [eos, pad]) tgt_token_to_word = get_token_to_word_mapping(tgt_sent, [eos, pad]) alignment = [] if len(tgt_valid) != 0 and len(src_invalid) < len(src_sent): attn_valid = attn[tgt_valid] attn_valid[:, src_invalid] = float("-inf") _, src_indices = attn_valid.max(dim=1) for tgt_idx, src_idx in zip(tgt_valid, src_indices): alignment.append( ( src_token_to_word[src_idx.item()] - 1, tgt_token_to_word[tgt_idx.item()] - 1, ) ) return alignment def new_arange(x, *size): """ Return a Tensor of `size` filled with a range function on the device of x. If size is empty, using the size of the variable x. """ if len(size) == 0: size = x.size() return torch.arange(size[-1], device=x.device).expand(*size).contiguous() def get_tpu_device(args): import torch_xla.core.xla_model as xm return xm.xla_device() def tpu_data_loader(itr): import torch_xla.core.xla_model as xm import torch_xla.distributed.parallel_loader as pl xm.rendezvous("tpu_data_loader") # wait for all workers xm.mark_step() device = xm.xla_device() return iterators.CountingIterator( pl.ParallelLoader(itr, [device]).per_device_loader(device), start=getattr(itr, "n", 0), total=len(itr), ) class CudaEnvironment(object): def __init__(self): cur_device = torch.cuda.current_device() prop = torch.cuda.get_device_properties("cuda:{}".format(cur_device)) self.name = prop.name self.major = prop.major self.minor = prop.minor self.total_memory_in_GB = prop.total_memory / 1024 / 1024 / 1024 @staticmethod def pretty_print_cuda_env_list(cuda_env_list): """ Given a list of CudaEnviorments, pretty print them """ num_workers = len(cuda_env_list) center = "CUDA enviroments for all {} workers".format(num_workers) banner_len = 40 - len(center) // 2 first_line = "*" * banner_len + center + "*" * banner_len logger.info(first_line) for r, env in enumerate(cuda_env_list): logger.info( "rank {:3d}: ".format(r) + "capabilities = {:2d}.{:<2d} ; ".format(env.major, env.minor) + "total memory = {:.3f} GB ; ".format(env.total_memory_in_GB) + "name = {:40s}".format(env.name) ) logger.info(first_line)
19,882
31.864463
111
py
RegularizedBN
RegularizedBN-main/fairseq/hub_utils.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import copy import logging import os from typing import List, Dict, Iterator, Tuple, Any import torch from torch import nn from fairseq import utils from fairseq.data import encoders logger = logging.getLogger(__name__) def from_pretrained( model_name_or_path, checkpoint_file='model.pt', data_name_or_path='.', archive_map=None, **kwargs ): from fairseq import checkpoint_utils, file_utils #print("here") if archive_map is not None: if model_name_or_path in archive_map: model_name_or_path = archive_map[model_name_or_path] if data_name_or_path is not None and data_name_or_path in archive_map: data_name_or_path = archive_map[data_name_or_path] # allow archive_map to set default arg_overrides (e.g., tokenizer, bpe) # for each model if isinstance(model_name_or_path, dict): for k, v in model_name_or_path.items(): if k == 'checkpoint_file': checkpoint_file = v elif ( k != 'path' # only set kwargs that don't already have overrides and k not in kwargs ): kwargs[k] = v model_name_or_path = model_name_or_path['path'] model_path = file_utils.load_archive_file(model_name_or_path) # convenience hack for loading data and BPE codes from model archive if data_name_or_path.startswith('.'): kwargs['data'] = os.path.abspath(os.path.join(model_path, data_name_or_path)) else: kwargs['data'] = file_utils.load_archive_file(data_name_or_path) for file, arg in { 'code': 'bpe_codes', 'bpecodes': 'bpe_codes', 'sentencepiece.bpe.model': 'sentencepiece_model', }.items(): path = os.path.join(model_path, file) if os.path.exists(path): kwargs[arg] = path if 'user_dir' in kwargs: utils.import_user_module(argparse.Namespace(user_dir=kwargs['user_dir'])) #print("here2") models, args, task = checkpoint_utils.load_model_ensemble_and_task( [os.path.join(model_path, cpt) for cpt in checkpoint_file.split(os.pathsep)], arg_overrides=kwargs, ) #print("here3") return { 'args': args, 'task': task, 'models': models, } class GeneratorHubInterface(nn.Module): """ PyTorch Hub interface for generating sequences from a pre-trained translation or language model. """ def __init__(self, args, task, models): super().__init__() self.args = args self.task = task self.models = nn.ModuleList(models) self.src_dict = task.source_dictionary self.tgt_dict = task.target_dictionary # optimize model for generation for model in self.models: model.prepare_for_inference_(args) # Load alignment dictionary for unknown word replacement # (None if no unknown word replacement, empty if no path to align dictionary) self.align_dict = utils.load_align_dict(getattr(args, 'replace_unk', None)) self.tokenizer = encoders.build_tokenizer(args) self.bpe = encoders.build_bpe(args) self.max_positions = utils.resolve_max_positions( self.task.max_positions(), *[model.max_positions() for model in models] ) # this is useful for determining the device self.register_buffer('_float_tensor', torch.tensor([0], dtype=torch.float)) @property def device(self): return self._float_tensor.device def translate(self, sentences: List[str], beam: int = 5, verbose: bool = False, **kwargs) -> List[str]: return self.sample(sentences, beam, verbose, **kwargs) def sample(self, sentences: List[str], beam: int = 1, verbose: bool = False, **kwargs) -> List[str]: if isinstance(sentences, str): return self.sample([sentences], beam=beam, verbose=verbose, **kwargs)[0] tokenized_sentences = [self.encode(sentence) for sentence in sentences] batched_hypos = self.generate(tokenized_sentences, beam, verbose, **kwargs) return [self.decode(hypos[0]['tokens']) for hypos in batched_hypos] def score(self, sentences: List[str], **kwargs): if isinstance(sentences, str): return self.score([sentences], **kwargs)[0] # NOTE: this doesn't support translation tasks currently tokenized_sentences = [self.encode(sentence) for sentence in sentences] return [hypos[0] for hypos in self.generate(tokenized_sentences, score_reference=True, **kwargs)] def generate( self, tokenized_sentences: List[torch.LongTensor], beam: int = 5, verbose: bool = False, skip_invalid_size_inputs=False, inference_step_args=None, **kwargs ) -> List[List[Dict[str, torch.Tensor]]]: if torch.is_tensor(tokenized_sentences) and tokenized_sentences.dim() == 1: return self.generate( tokenized_sentences.unsqueeze(0), beam=beam, verbose=verbose, **kwargs )[0] # build generator using current args as well as any kwargs gen_args = copy.copy(self.args) gen_args.beam = beam for k, v in kwargs.items(): setattr(gen_args, k, v) generator = self.task.build_generator(self.models, gen_args) inference_step_args = inference_step_args or {} results = [] for batch in self._build_batches(tokenized_sentences, skip_invalid_size_inputs): batch = utils.apply_to_sample(lambda t: t.to(self.device), batch) translations = self.task.inference_step( generator, self.models, batch, **inference_step_args ) for id, hypos in zip(batch["id"].tolist(), translations): results.append((id, hypos)) # sort output to match input order outputs = [hypos for _, hypos in sorted(results, key=lambda x: x[0])] if verbose: def getarg(name, default): return getattr(gen_args, name, getattr(self.args, name, default)) for source_tokens, target_hypotheses in zip(tokenized_sentences, outputs): src_str_with_unk = self.string(source_tokens) logger.info('S\t{}'.format(src_str_with_unk)) for hypo in target_hypotheses: hypo_str = self.decode(hypo['tokens']) logger.info('H\t{}\t{}'.format(hypo['score'], hypo_str)) logger.info('P\t{}'.format( ' '.join(map(lambda x: '{:.4f}'.format(x), hypo['positional_scores'].tolist())) )) if hypo['alignment'] is not None and getarg('print_alignment', False): logger.info('A\t{}'.format( ' '.join(['{}-{}'.format(src_idx, tgt_idx) for src_idx, tgt_idx in hypo['alignment']]) )) return outputs def encode(self, sentence: str) -> torch.LongTensor: sentence = self.tokenize(sentence) sentence = self.apply_bpe(sentence) return self.binarize(sentence) def decode(self, tokens: torch.LongTensor) -> str: sentence = self.string(tokens) sentence = self.remove_bpe(sentence) return self.detokenize(sentence) def tokenize(self, sentence: str) -> str: if self.tokenizer is not None: sentence = self.tokenizer.encode(sentence) return sentence def detokenize(self, sentence: str) -> str: if self.tokenizer is not None: sentence = self.tokenizer.decode(sentence) return sentence def apply_bpe(self, sentence: str) -> str: if self.bpe is not None: sentence = self.bpe.encode(sentence) return sentence def remove_bpe(self, sentence: str) -> str: if self.bpe is not None: sentence = self.bpe.decode(sentence) return sentence def binarize(self, sentence: str) -> torch.LongTensor: return self.src_dict.encode_line(sentence, add_if_not_exist=False).long() def string(self, tokens: torch.LongTensor) -> str: return self.tgt_dict.string(tokens) def _build_batches( self, tokens: List[List[int]], skip_invalid_size_inputs: bool ) -> Iterator[Dict[str, Any]]: lengths = torch.LongTensor([t.numel() for t in tokens]) batch_iterator = self.task.get_batch_iterator( dataset=self.task.build_dataset_for_inference(tokens, lengths), max_tokens=self.args.max_tokens, max_sentences=self.args.max_sentences, max_positions=self.max_positions, ignore_invalid_inputs=skip_invalid_size_inputs, ).next_epoch_itr(shuffle=False) return batch_iterator class BPEHubInterface(object): """PyTorch Hub interface for Byte-Pair Encoding (BPE).""" def __init__(self, bpe, **kwargs): super().__init__() args = argparse.Namespace(bpe=bpe, **kwargs) self.bpe = encoders.build_bpe(args) assert self.bpe is not None def encode(self, sentence: str) -> str: return self.bpe.encode(sentence) def decode(self, sentence: str) -> str: return self.bpe.decode(sentence) class TokenizerHubInterface(object): """PyTorch Hub interface for tokenization.""" def __init__(self, tokenizer, **kwargs): super().__init__() args = argparse.Namespace(tokenizer=tokenizer, **kwargs) self.tokenizer = encoders.build_tokenizer(args) assert self.tokenizer is not None def encode(self, sentence: str) -> str: return self.tokenizer.encode(sentence) def decode(self, sentence: str) -> str: return self.tokenizer.decode(sentence)
10,099
36.269373
114
py
RegularizedBN
RegularizedBN-main/fairseq/sequence_scorer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import sys from fairseq import utils class SequenceScorer(object): """Scores the target for a given source sentence.""" def __init__( self, tgt_dict, softmax_batch=None, compute_alignment=False, eos=None, symbols_to_strip_from_output=None, ): self.pad = tgt_dict.pad() self.eos = tgt_dict.eos() if eos is None else eos self.softmax_batch = softmax_batch or sys.maxsize assert self.softmax_batch > 0 self.compute_alignment = compute_alignment self.symbols_to_strip_from_output = ( symbols_to_strip_from_output.union({self.eos}) if symbols_to_strip_from_output is not None else {self.eos}) @torch.no_grad() def generate(self, models, sample, **kwargs): """Score a batch of translations.""" net_input = sample['net_input'] def batch_for_softmax(dec_out, target): # assumes decoder_out[0] is the only thing needed (may not be correct for future models!) first, rest = dec_out[0], dec_out[1:] bsz, tsz, dim = first.shape if bsz * tsz < self.softmax_batch: yield dec_out, target, True else: flat = first.contiguous().view(1, -1, dim) flat_tgt = target.contiguous().view(flat.shape[:-1]) s = 0 while s < flat.size(1): e = s + self.softmax_batch yield (flat[:, s:e],) + rest, flat_tgt[:, s:e], False s = e def gather_target_probs(probs, target): probs = probs.gather( dim=2, index=target.unsqueeze(-1), ) return probs orig_target = sample['target'] # compute scores for each model in the ensemble avg_probs = None avg_attn = None for model in models: model.eval() decoder_out = model(**net_input) attn = decoder_out[1] if len(decoder_out) > 1 else None if type(attn) is dict: attn = attn.get('attn', None) batched = batch_for_softmax(decoder_out, orig_target) probs, idx = None, 0 for bd, tgt, is_single in batched: sample['target'] = tgt curr_prob = model.get_normalized_probs(bd, log_probs=len(models) == 1, sample=sample).data if is_single: probs = gather_target_probs(curr_prob, orig_target) else: if probs is None: probs = curr_prob.new(orig_target.numel()) step = curr_prob.size(0) * curr_prob.size(1) end = step + idx tgt_probs = gather_target_probs(curr_prob.view(tgt.shape + (curr_prob.size(-1),)), tgt) probs[idx:end] = tgt_probs.view(-1) idx = end sample['target'] = orig_target probs = probs.view(sample['target'].shape) if avg_probs is None: avg_probs = probs else: avg_probs.add_(probs) if attn is not None and torch.is_tensor(attn): attn = attn.data if avg_attn is None: avg_attn = attn else: avg_attn.add_(attn) if len(models) > 1: avg_probs.div_(len(models)) avg_probs.log_() if avg_attn is not None: avg_attn.div_(len(models)) bsz = avg_probs.size(0) hypos = [] start_idxs = sample['start_indices'] if 'start_indices' in sample else [0] * bsz for i in range(bsz): # remove padding from ref ref = utils.strip_pad(sample['target'][i, start_idxs[i]:], self.pad) \ if sample['target'] is not None else None tgt_len = ref.numel() avg_probs_i = avg_probs[i][start_idxs[i]:start_idxs[i] + tgt_len] score_i = avg_probs_i.sum() / tgt_len if avg_attn is not None: avg_attn_i = avg_attn[i] if self.compute_alignment: alignment = utils.extract_hard_alignment( avg_attn_i, sample['net_input']['src_tokens'][i], sample['target'][i], self.pad, self.eos, ) else: alignment = None else: avg_attn_i = alignment = None hypos.append([{ 'tokens': ref, 'score': score_i, 'attention': avg_attn_i, 'alignment': alignment, 'positional_scores': avg_probs_i, }]) return hypos
5,071
36.850746
107
py
RegularizedBN
RegularizedBN-main/fairseq/binarizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os from collections import Counter from fairseq.tokenizer import tokenize_line import torch from fairseq.file_io import PathManager def safe_readline(f): pos = f.tell() while True: try: return f.readline() except UnicodeDecodeError: pos -= 1 f.seek(pos) # search where this character begins class Binarizer: @staticmethod def binarize( filename, dict, consumer, tokenize=tokenize_line, append_eos=True, reverse_order=False, offset=0, end=-1, already_numberized=False, ): nseq, ntok = 0, 0 replaced = Counter() def replaced_consumer(word, idx): if idx == dict.unk_index and word != dict.unk_word: replaced.update([word]) with open(PathManager.get_local_path(filename), "r", encoding="utf-8") as f: f.seek(offset) # next(f) breaks f.tell(), hence readline() must be used line = safe_readline(f) while line: if end > 0 and f.tell() > end: break if already_numberized: id_strings = line.strip().split() id_list = [int(id_string) for id_string in id_strings] if reverse_order: id_list.reverse() if append_eos: id_list.append(dict.eos()) ids = torch.IntTensor(id_list) else: ids = dict.encode_line( line=line, line_tokenizer=tokenize, add_if_not_exist=False, consumer=replaced_consumer, append_eos=append_eos, reverse_order=reverse_order, ) nseq += 1 ntok += len(ids) consumer(ids) line = f.readline() return { "nseq": nseq, "nunk": sum(replaced.values()), "ntok": ntok, "replaced": replaced, } @staticmethod def binarize_alignments(filename, alignment_parser, consumer, offset=0, end=-1): nseq = 0 with open(PathManager.get_local_path(filename), "r") as f: f.seek(offset) line = safe_readline(f) while line: if end > 0 and f.tell() > end: break ids = alignment_parser(line) nseq += 1 consumer(ids) line = f.readline() return {"nseq": nseq} @staticmethod def find_offsets(filename, num_chunks): with open(PathManager.get_local_path(filename), "r", encoding="utf-8") as f: size = os.fstat(f.fileno()).st_size chunk_size = size // num_chunks offsets = [0 for _ in range(num_chunks + 1)] for i in range(1, num_chunks): f.seek(chunk_size * i) safe_readline(f) offsets[i] = f.tell() return offsets
3,354
30.952381
84
py
RegularizedBN
RegularizedBN-main/fairseq/distributed_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import pickle import random import socket import struct import subprocess import warnings from collections import OrderedDict from typing import Any, Dict, Mapping import torch import torch.distributed as dist from fairseq import utils logger = logging.getLogger(__name__) def is_master(args): return args.distributed_rank == 0 def infer_init_method(args, force_distributed=False): if args.distributed_init_method is not None or getattr(args, 'tpu', False): return # support torch.distributed.launch if all(key in os.environ for key in [ 'MASTER_ADDR', 'MASTER_PORT', 'WORLD_SIZE', 'RANK' ]): args.distributed_init_method = 'env://' args.distributed_world_size = int(os.environ['WORLD_SIZE']) args.distributed_rank = int(os.environ['RANK']) # processes are created by torch.distributed.launch args.distributed_no_spawn = True # we can determine the init method automatically for Slurm elif args.distributed_port > 0: node_list = os.environ.get('SLURM_STEP_NODELIST') if node_list is None: node_list = os.environ.get('SLURM_JOB_NODELIST') if node_list is not None: try: hostnames = subprocess.check_output(['scontrol', 'show', 'hostnames', node_list]) args.distributed_init_method = 'tcp://{host}:{port}'.format( host=hostnames.split()[0].decode('utf-8'), port=args.distributed_port, ) nnodes = int(os.environ.get('SLURM_NNODES')) ntasks_per_node = os.environ.get('SLURM_NTASKS_PER_NODE') if ntasks_per_node is not None: ntasks_per_node = int(ntasks_per_node) else: ntasks = int(os.environ.get('SLURM_NTASKS')) nnodes = int(os.environ.get('SLURM_NNODES')) assert ntasks % nnodes == 0 ntasks_per_node = int(ntasks / nnodes) if ntasks_per_node == 1: assert args.distributed_world_size % nnodes == 0 gpus_per_node = args.distributed_world_size // nnodes node_id = int(os.environ.get('SLURM_NODEID')) args.distributed_rank = node_id * gpus_per_node else: assert ntasks_per_node == args.distributed_world_size // nnodes args.distributed_no_spawn = True args.distributed_rank = int(os.environ.get('SLURM_PROCID')) args.device_id = int(os.environ.get('SLURM_LOCALID')) except subprocess.CalledProcessError as e: # scontrol failed raise e except FileNotFoundError: # Slurm is not installed pass elif args.distributed_world_size > 1 or force_distributed: # fallback for single node with multiple GPUs assert args.distributed_world_size <= torch.cuda.device_count() port = random.randint(10000, 20000) args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port) def distributed_init(args): if not getattr(args, 'tpu', False): if torch.distributed.is_initialized(): warnings.warn('Distributed is already initialized, cannot initialize twice!') else: logger.info('distributed init (rank {}): {}'.format( args.distributed_rank, args.distributed_init_method, )) dist.init_process_group( backend=args.distributed_backend, init_method=args.distributed_init_method, world_size=args.distributed_world_size, rank=args.distributed_rank, ) logger.info('initialized host {} as rank {}'.format( socket.gethostname(), args.distributed_rank, )) # perform a dummy all-reduce to initialize the NCCL communicator if torch.cuda.is_available(): dist.all_reduce(torch.zeros(1).cuda()) args.distributed_rank = torch.distributed.get_rank() else: import torch_xla.core.xla_model as xm assert xm.xrt_world_size() == args.distributed_world_size args.device_id = xm.get_local_ordinal() args.distributed_rank = xm.get_ordinal() xm.rendezvous('distributed_init') # wait for all workers xm.mark_step() if is_master(args): logging.getLogger().setLevel(logging.INFO) else: logging.getLogger().setLevel(logging.WARNING) if args.model_parallel_size > 1: try: from fairseq.model_parallel.megatron.mpu import ( get_model_parallel_rank, initialize_model_parallel, model_parallel_cuda_manual_seed, ) except ImportError: raise ImportError( '\n\nPlease install the megatron submodule:' '\n\n git submodule update --init ' 'fairseq/model_parallel/megatron' ) initialize_model_parallel(args.model_parallel_size) model_parallel_cuda_manual_seed(args.seed) model_part_number = get_model_parallel_rank() args.checkpoint_suffix += '-model_part-{0}'.format(model_part_number) return args.distributed_rank def distributed_main(i, main, args, kwargs): args.device_id = i if torch.cuda.is_available() and not args.cpu and not getattr(args, "tpu", False): torch.cuda.set_device(args.device_id) if args.distributed_rank is None: # torch.multiprocessing.spawn args.distributed_rank = kwargs.pop('start_rank', 0) + i args.distributed_rank = distributed_init(args) after_distributed_init_fn = kwargs.pop('after_distributed_init_fn', None) if after_distributed_init_fn: args = after_distributed_init_fn(args) main(args, **kwargs) def call_main(args, main, **kwargs): if args.distributed_init_method is None: infer_init_method(args) if args.distributed_init_method is not None: # distributed training if not args.distributed_no_spawn: start_rank = args.distributed_rank args.distributed_rank = None # assign automatically kwargs['start_rank'] = start_rank torch.multiprocessing.spawn( fn=distributed_main, args=(main, args, kwargs), nprocs=min( torch.cuda.device_count(), args.distributed_world_size, ), ) else: distributed_main(args.device_id, main, args, kwargs) elif getattr(args, "tpu", False): import torch_xla.distributed.xla_multiprocessing as xmp torch.multiprocessing.set_sharing_strategy("file_system") xmp.spawn( fn=distributed_main, args=(main, args, kwargs), nprocs=8, # use all 8 TPU cores ) else: # single GPU main main(args, **kwargs) def get_rank(): return dist.get_rank() def get_world_size(): return dist.get_world_size() def get_default_group(): return dist.group.WORLD def all_reduce(tensor, group=None): if isinstance(group, tuple) and group[0] == 'tpu': import torch_xla.core.xla_model as xm return xm.all_reduce('sum', [tensor], groups=group[1]) else: if group is None: group = get_default_group() return dist.all_reduce(tensor, group=group) def all_gather_list(data, group=None, max_size=16384): """Gathers arbitrary data from all nodes into a list. Similar to :func:`~torch.distributed.all_gather` but for arbitrary Python data. Note that *data* must be picklable. Args: data (Any): data from the local worker to be gathered on other workers group (optional): group of the collective max_size (int, optional): maximum size of the data to be gathered across workers """ rank = get_rank() world_size = get_world_size() buffer_size = max_size * world_size if not hasattr(all_gather_list, '_buffer') or \ all_gather_list._buffer.numel() < buffer_size: all_gather_list._buffer = torch.cuda.ByteTensor(buffer_size) all_gather_list._cpu_buffer = torch.ByteTensor(max_size).pin_memory() buffer = all_gather_list._buffer buffer.zero_() cpu_buffer = all_gather_list._cpu_buffer data = utils.move_to_cpu(data) enc = pickle.dumps(data) enc_size = len(enc) header_size = 4 # size of header that contains the length of the encoded data size = header_size + enc_size if size > max_size: raise ValueError('encoded data size ({}) exceeds max_size ({})'.format(size, max_size)) header = struct.pack(">I", enc_size) cpu_buffer[:size] = torch.ByteTensor(list(header + enc)) start = rank * max_size buffer[start:start + size].copy_(cpu_buffer[:size]) all_reduce(buffer, group=group) buffer = buffer.cpu() try: result = [] for i in range(world_size): out_buffer = buffer[i * max_size:(i + 1) * max_size] enc_size, = struct.unpack(">I", bytes(out_buffer[:header_size].tolist())) if enc_size > 0: result.append(pickle.loads(bytes(out_buffer[header_size:header_size + enc_size].tolist()))) return result except pickle.UnpicklingError: raise Exception( 'Unable to unpickle data from other workers. all_gather_list requires all ' 'workers to enter the function together, so this error usually indicates ' 'that the workers have fallen out of sync somehow. Workers can fall out of ' 'sync if one of them runs out of memory, or if there are other conditions ' 'in your training script that can cause one worker to finish an epoch ' 'while other workers are still iterating over their portions of the data. ' 'Try rerunning with --ddp-backend=no_c10d and see if that helps.' ) def all_reduce_dict( data: Mapping[str, Any], device, group=None, ) -> Dict[str, Any]: """ AllReduce a dictionary of values across workers. We separately reduce items that are already on the device and items on CPU for better performance. Args: data (Mapping[str, Any]): dictionary of data to all-reduce, but cannot be a nested dictionary device (torch.device): device for the reduction group (optional): group of the collective """ data_keys = list(data.keys()) # We want to separately reduce items that are already on the # device and items on CPU for performance reasons. cpu_data = OrderedDict() device_data = OrderedDict() for k in data_keys: t = data[k] if not torch.is_tensor(t): cpu_data[k] = torch.tensor(t, dtype=torch.double) elif t.device.type != device.type: cpu_data[k] = t.to(dtype=torch.double) else: device_data[k] = t.to(dtype=torch.double) def _all_reduce_dict(data: OrderedDict): if len(data) == 0: return data buf = torch.stack(list(data.values())).to(device=device) all_reduce(buf, group=group) return {k: buf[i] for i, k in enumerate(data)} cpu_data = _all_reduce_dict(cpu_data) device_data = _all_reduce_dict(device_data) def get_from_stack(key): if key in cpu_data: return cpu_data[key] elif key in device_data: return device_data[key] raise KeyError return OrderedDict([(key, get_from_stack(key)) for key in data_keys])
11,970
36.06192
107
py
RegularizedBN
RegularizedBN-main/fairseq/sequence_generator.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import Dict, List, Optional import torch import torch.nn as nn from fairseq import search, utils from fairseq.data import data_utils from fairseq.models import FairseqIncrementalDecoder from fairseq.models.fairseq_encoder import EncoderOut from torch import Tensor class SequenceGenerator(nn.Module): def __init__( self, models, tgt_dict, beam_size=1, max_len_a=0, max_len_b=200, min_len=1, normalize_scores=True, len_penalty=1.0, unk_penalty=0.0, temperature=1.0, match_source_len=False, no_repeat_ngram_size=0, search_strategy=None, eos=None, symbols_to_strip_from_output=None, ): """Generates translations of a given source sentence. Args: models (List[~fairseq.models.FairseqModel]): ensemble of models, currently support fairseq.models.TransformerModel for scripting beam_size (int, optional): beam width (default: 1) max_len_a/b (int, optional): generate sequences of maximum length ax + b, where x is the source length min_len (int, optional): the minimum length of the generated output (not including end-of-sentence) normalize_scores (bool, optional): normalize scores by the length of the output (default: True) len_penalty (float, optional): length penalty, where <1.0 favors shorter, >1.0 favors longer sentences (default: 1.0) unk_penalty (float, optional): unknown word penalty, where <0 produces more unks, >0 produces fewer (default: 0.0) temperature (float, optional): temperature, where values >1.0 produce more uniform samples and values <1.0 produce sharper samples (default: 1.0) match_source_len (bool, optional): outputs should match the source length (default: False) """ super().__init__() if isinstance(models, EnsembleModel): self.model = models else: self.model = EnsembleModel(models) self.tgt_dict = tgt_dict self.pad = tgt_dict.pad() self.unk = tgt_dict.unk() self.eos = tgt_dict.eos() if eos is None else eos self.symbols_to_strip_from_output = ( symbols_to_strip_from_output.union({self.eos}) if symbols_to_strip_from_output is not None else {self.eos}) self.vocab_size = len(tgt_dict) self.beam_size = beam_size # the max beam size is the dictionary size - 1, since we never select pad self.beam_size = min(beam_size, self.vocab_size - 1) self.max_len_a = max_len_a self.max_len_b = max_len_b self.min_len = min_len self.normalize_scores = normalize_scores self.len_penalty = len_penalty self.unk_penalty = unk_penalty self.temperature = temperature self.match_source_len = match_source_len self.no_repeat_ngram_size = no_repeat_ngram_size assert temperature > 0, "--temperature must be greater than 0" self.search = ( search.BeamSearch(tgt_dict) if search_strategy is None else search_strategy ) # We only need to set src_lengths in LengthConstrainedBeamSearch. # As a module attribute, setting it would break in multithread # settings when the model is shared. self.should_set_src_lengths = hasattr(self.search, 'needs_src_lengths') and self.search.needs_src_lengths self.model.eval() def cuda(self): self.model.cuda() return self @torch.no_grad() def forward( self, sample: Dict[str, Dict[str, Tensor]], prefix_tokens: Optional[Tensor] = None, bos_token: Optional[int] = None, ): """Generate a batch of translations. Args: sample (dict): batch prefix_tokens (torch.LongTensor, optional): force decoder to begin with these tokens bos_token (int, optional): beginning of sentence token (default: self.eos) """ return self._generate(sample, prefix_tokens, bos_token=bos_token) # TODO(myleott): unused, deprecate after pytorch-translate migration def generate_batched_itr(self, data_itr, beam_size=None, cuda=False, timer=None): """Iterate over a batched dataset and yield individual translations. Args: cuda (bool, optional): use GPU for generation timer (StopwatchMeter, optional): time generations """ for sample in data_itr: s = utils.move_to_cuda(sample) if cuda else sample if "net_input" not in s: continue input = s["net_input"] # model.forward normally channels prev_output_tokens into the decoder # separately, but SequenceGenerator directly calls model.encoder encoder_input = { k: v for k, v in input.items() if k != "prev_output_tokens" } if timer is not None: timer.start() with torch.no_grad(): hypos = self.generate(encoder_input) if timer is not None: timer.stop(sum(len(h[0]["tokens"]) for h in hypos)) for i, id in enumerate(s["id"].data): # remove padding src = utils.strip_pad(input["src_tokens"].data[i, :], self.pad) ref = ( utils.strip_pad(s["target"].data[i, :], self.pad) if s["target"] is not None else None ) yield id, src, ref, hypos[i] @torch.no_grad() def generate(self, models, sample: Dict[str, Dict[str, Tensor]], **kwargs): """Generate translations. Match the api of other fairseq generators. Args: models (List[~fairseq.models.FairseqModel]): ensemble of models sample (dict): batch prefix_tokens (torch.LongTensor, optional): force decoder to begin with these tokens constraints (torch.LongTensor, optional): force decoder to include the list of constraints bos_token (int, optional): beginning of sentence token (default: self.eos) """ return self._generate(sample, **kwargs) def _generate( self, sample: Dict[str, Dict[str, Tensor]], prefix_tokens: Optional[Tensor] = None, constraints: Optional[Tensor] = None, bos_token: Optional[int] = None, ): incremental_states = torch.jit.annotate( List[Dict[str, Dict[str, Optional[Tensor]]]], [ torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {}) for i in range(self.model.models_size) ], ) net_input = sample["net_input"] if 'src_tokens' in net_input: #true src_tokens = net_input['src_tokens'] # length of the source text being the character length except EndOfSentence and pad src_lengths = (src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1) elif 'source' in net_input: src_tokens = net_input['source'] src_lengths = ( net_input['padding_mask'].size(-1) - net_input['padding_mask'].sum(-1) if net_input['padding_mask'] is not None else torch.tensor(src_tokens.size(-1)).to(src_tokens) ) else: raise Exception('expected src_tokens or source in net input') # bsz: total number of sentences in beam # Note that src_tokens may have more than 2 dimenions (i.e. audio features) bsz, src_len = src_tokens.size()[:2] beam_size = self.beam_size if constraints is not None and not self.search.supports_constraints: #False raise NotImplementedError("Target-side constraints were provided, but search method doesn't support them") # Initialize constraints, when active self.search.init_constraints(constraints, beam_size) #do nothing max_len: int = -1 if self.match_source_len: #false max_len = src_lengths.max().item() else: max_len = min( int(self.max_len_a * src_len + self.max_len_b), # exclude the EOS marker self.model.max_decoder_positions() - 1, ) assert ( self.min_len <= max_len ), "min_len cannot be larger than max_len, please adjust these!" # compute the encoder output for each beam encoder_outs = self.model.forward_encoder(net_input) # placeholder of indices for bsz * beam_size to hold tokens and accumulative scores new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1) new_order = new_order.to(src_tokens.device).long() encoder_outs = self.model.reorder_encoder_out(encoder_outs, new_order) # ensure encoder_outs is a List. assert encoder_outs is not None # initialize buffers scores = ( torch.zeros(bsz * beam_size, max_len + 1).to(src_tokens).float() ) # +1 for eos; pad is never chosen for scoring tokens = ( torch.zeros(bsz * beam_size, max_len + 2) .to(src_tokens) .long() .fill_(self.pad) ) # +2 for eos and pad tokens[:, 0] = self.eos if bos_token is None else bos_token attn: Optional[Tensor] = None # A list that indicates candidates that should be ignored. # For example, suppose we're sampling and have already finalized 2/5 # samples. Then cands_to_ignore would mark 2 positions as being ignored, # so that we only finalize the remaining 3 samples. cands_to_ignore = ( torch.zeros(bsz, beam_size).to(src_tokens).eq(-1) ) # forward and backward-compatible False mask # list of completed sentences finalized = torch.jit.annotate( List[List[Dict[str, Tensor]]], [torch.jit.annotate(List[Dict[str, Tensor]], []) for i in range(bsz)], ) # contains lists of dictionaries of infomation about the hypothesis being finalized at each step finished = [ False for i in range(bsz) ] # a boolean array indicating if the sentence at the index is finished or not num_remaining_sent = bsz # number of sentences remaining # number of candidate hypos per step cand_size = 2 * beam_size # 2 x beam size in case half are EOS # offset arrays for converting between different indexing schemes bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens) cand_offsets = torch.arange(0, cand_size).type_as(tokens) reorder_state: Optional[Tensor] = None batch_idxs: Optional[Tensor] = None for step in range(max_len + 1): # one extra step for EOS marker # reorder decoder internal states based on the prev choice of beams # print(f'step: {step}') if reorder_state is not None: #false if batch_idxs is not None: # update beam indices to take into account removed sentences corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as( batch_idxs ) reorder_state.view(-1, beam_size).add_( corr.unsqueeze(-1) * beam_size ) self.model.reorder_incremental_state(incremental_states, reorder_state) encoder_outs = self.model.reorder_encoder_out( encoder_outs, reorder_state ) lprobs, avg_attn_scores = self.model.forward_decoder( tokens[:, : step + 1], encoder_outs, incremental_states, self.temperature, ) lprobs[lprobs != lprobs] = torch.tensor(-math.inf).to(lprobs) lprobs[:, self.pad] = -math.inf # never select pad lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty # handle max length constraint if step >= max_len: lprobs[:, : self.eos] = -math.inf lprobs[:, self.eos + 1 :] = -math.inf # handle prefix tokens (possibly with different lengths) if ( prefix_tokens is not None and step < prefix_tokens.size(1) and step < max_len ): lprobs, tokens, scores = self._prefix_tokens( step, lprobs, scores, tokens, prefix_tokens, beam_size ) elif step < self.min_len: # minimum length constraint (does not apply if using prefix_tokens) lprobs[:, self.eos] = -math.inf # Record attention scores, only support avg_attn_scores is a Tensor if avg_attn_scores is not None: if attn is None: attn = torch.empty( bsz * beam_size, avg_attn_scores.size(1), max_len + 2 ).to(scores) attn[:, :, step + 1].copy_(avg_attn_scores) scores = scores.type_as(lprobs) eos_bbsz_idx = torch.empty(0).to( tokens ) # indices of hypothesis ending with eos (finished sentences) eos_scores = torch.empty(0).to( scores ) # scores of hypothesis ending with eos (finished sentences) if self.should_set_src_lengths: self.search.set_src_lengths(src_lengths) if self.no_repeat_ngram_size > 0: lprobs = self._no_repeat_ngram(tokens, lprobs, bsz, beam_size, step) # Shape: (batch, cand_size) cand_scores, cand_indices, cand_beams = self.search.step( step, lprobs.view(bsz, -1, self.vocab_size), scores.view(bsz, beam_size, -1)[:, :, :step], ) # cand_bbsz_idx contains beam indices for the top candidate # hypotheses, with a range of values: [0, bsz*beam_size), # and dimensions: [bsz, cand_size] cand_bbsz_idx = cand_beams.add(bbsz_offsets) # finalize hypotheses that end in eos # Shape of eos_mask: (batch size, beam size) eos_mask = cand_indices.eq(self.eos) & cand_scores.ne(-math.inf) eos_mask[:, :beam_size][cands_to_ignore] = torch.tensor(0).to(eos_mask) # only consider eos when it's among the top beam_size indices # Now we know what beam item(s) to finish # Shape: 1d list of absolute-numbered eos_bbsz_idx = torch.masked_select( cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size] ) finalized_sents: List[int] = [] if eos_bbsz_idx.numel() > 0: eos_scores = torch.masked_select( cand_scores[:, :beam_size], mask=eos_mask[:, :beam_size] ) finalized_sents = self.finalize_hypos( step, eos_bbsz_idx, eos_scores, tokens, scores, finalized, finished, beam_size, attn, src_lengths, max_len, ) num_remaining_sent -= len(finalized_sents) assert num_remaining_sent >= 0 if num_remaining_sent == 0: break assert step < max_len # Remove finalized sentences (ones for which {beam_size} # finished hypotheses have been generated) from the batch. if len(finalized_sents) > 0: new_bsz = bsz - len(finalized_sents) # construct batch_idxs which holds indices of batches to keep for the next pass batch_mask = torch.ones(bsz, dtype=torch.bool, device=cand_indices.device) batch_mask[finalized_sents] = False # TODO replace `nonzero(as_tuple=False)` after TorchScript supports it batch_idxs = torch.arange(bsz, device=cand_indices.device).masked_select(batch_mask) # Choose the subset of the hypothesized constraints that will continue self.search.prune_sentences(batch_idxs) eos_mask = eos_mask[batch_idxs] cand_beams = cand_beams[batch_idxs] bbsz_offsets.resize_(new_bsz, 1) cand_bbsz_idx = cand_beams.add(bbsz_offsets) cand_scores = cand_scores[batch_idxs] cand_indices = cand_indices[batch_idxs] if prefix_tokens is not None: prefix_tokens = prefix_tokens[batch_idxs] src_lengths = src_lengths[batch_idxs] cands_to_ignore = cands_to_ignore[batch_idxs] scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) if attn is not None: attn = attn.view(bsz, -1)[batch_idxs].view( new_bsz * beam_size, attn.size(1), -1 ) bsz = new_bsz else: batch_idxs = None # Set active_mask so that values > cand_size indicate eos hypos # and values < cand_size indicate candidate active hypos. # After, the min values per row are the top candidate active hypos # Rewrite the operator since the element wise or is not supported in torchscript. eos_mask[:, :beam_size] = ~((~cands_to_ignore) & (~eos_mask[:, :beam_size])) active_mask = torch.add( eos_mask.type_as(cand_offsets) * cand_size, cand_offsets[: eos_mask.size(1)], ) # get the top beam_size active hypotheses, which are just # the hypos with the smallest values in active_mask. # {active_hypos} indicates which {beam_size} hypotheses # from the list of {2 * beam_size} candidates were # selected. Shapes: (batch size, beam size) new_cands_to_ignore, active_hypos = torch.topk( active_mask, k=beam_size, dim=1, largest=False ) # update cands_to_ignore to ignore any finalized hypos. cands_to_ignore = new_cands_to_ignore.ge(cand_size)[:, :beam_size] # Make sure there is at least one active item for each sentence in the batch. assert (~cands_to_ignore).any(dim=1).all() # update cands_to_ignore to ignore any finalized hypos # {active_bbsz_idx} denotes which beam number is continued for each new hypothesis (a beam # can be selected more than once). active_bbsz_idx = torch.gather(cand_bbsz_idx, dim=1, index=active_hypos) active_scores = torch.gather(cand_scores, dim=1, index=active_hypos) active_bbsz_idx = active_bbsz_idx.view(-1) active_scores = active_scores.view(-1) # copy tokens and scores for active hypotheses # Set the tokens for each beam (can select the same row more than once) tokens[:, : step + 1] = torch.index_select( tokens[:, : step + 1], dim=0, index=active_bbsz_idx ) # Select the next token for each of them tokens.view(bsz, beam_size, -1)[:, :, step + 1] = torch.gather( cand_indices, dim=1, index=active_hypos ) if step > 0: scores[:, :step] = torch.index_select( scores[:, :step], dim=0, index=active_bbsz_idx ) scores.view(bsz, beam_size, -1)[:, :, step] = torch.gather( cand_scores, dim=1, index=active_hypos ) # Update constraints based on which candidates were selected for the next beam self.search.update_constraints(active_hypos) # copy attention for active hypotheses if attn is not None: attn[:, :, : step + 2] = torch.index_select( attn[:, :, : step + 2], dim=0, index=active_bbsz_idx ) # reorder incremental state in decoder reorder_state = active_bbsz_idx # sort by score descending for sent in range(len(finalized)): # make into beam container BCList = [ BeamContainer(elem["score"].item(), elem) for elem in finalized[sent] ] BCList.sort() BCList.reverse() finalized[sent] = torch.jit.annotate( List[Dict[str, Tensor]], [x.elem for x in BCList] ) return finalized def _prefix_tokens( self, step: int, lprobs, scores, tokens, prefix_tokens, beam_size: int ): """Handle prefix tokens""" prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1) prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1)) prefix_mask = prefix_toks.ne(self.pad) lprobs[prefix_mask] = torch.tensor(-math.inf).to(lprobs) lprobs[prefix_mask] = lprobs[prefix_mask].scatter( -1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs[prefix_mask] ) # if prefix includes eos, then we should make sure tokens and # scores are the same across all beams eos_mask = prefix_toks.eq(self.eos) if eos_mask.any(): # validate that the first beam matches the prefix first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[ :, 0, 1 : step + 1 ] eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0] target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step] assert (first_beam == target_prefix).all() # copy tokens, scores and lprobs from the first beam to all beams tokens = self.replicate_first_beam(tokens, eos_mask_batch_dim, beam_size) scores = self.replicate_first_beam(scores, eos_mask_batch_dim, beam_size) lprobs = self.replicate_first_beam(lprobs, eos_mask_batch_dim, beam_size) return lprobs, tokens, scores def replicate_first_beam(self, tensor, mask, beam_size: int): tensor = tensor.view(-1, beam_size, tensor.size(-1)) tensor[mask] = tensor[mask][:, :1, :] return tensor.view(-1, tensor.size(-1)) def finalize_hypos( self, step: int, bbsz_idx, eos_scores, tokens, scores, finalized: List[List[Dict[str, Tensor]]], finished: List[bool], beam_size: int, attn: Optional[Tensor], src_lengths, max_len: int, ): """Finalize hypothesis, store finalized information in `finalized`, and change `finished` accordingly. A sentence is finalized when {beam_size} finished items have been collected for it. Returns number of sentences (not beam items) being finalized. These will be removed from the batch and not processed further. Args: bbsz_idx (Tensor): """ assert bbsz_idx.numel() == eos_scores.numel() # clone relevant token and attention tensors. # tokens is (batch * beam, max_len). So the index_select # gets the newly EOS rows, then selects cols 1..{step + 2} tokens_clone = tokens.index_select(0, bbsz_idx)[ :, 1 : step + 2 ] # skip the first index, which is EOS tokens_clone[:, step] = self.eos attn_clone = ( attn.index_select(0, bbsz_idx)[:, :, 1 : step + 2] if attn is not None else None ) # compute scores per token position pos_scores = scores.index_select(0, bbsz_idx)[:, : step + 1] pos_scores[:, step] = eos_scores # convert from cumulative to per-position scores pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1] # normalize sentence-level scores if self.normalize_scores: eos_scores /= (step + 1) ** self.len_penalty # cum_unfin records which sentences in the batch are finished. # It helps match indexing between (a) the original sentences # in the batch and (b) the current, possibly-reduced set of # sentences. cum_unfin: List[int] = [] prev = 0 for f in finished: if f: prev += 1 else: cum_unfin.append(prev) # set() is not supported in script export # The keys here are of the form "{sent}_{unfin_idx}", where # "unfin_idx" is the index in the current (possibly reduced) # list of sentences, and "sent" is the index in the original, # unreduced batch sents_seen: Dict[str, Optional[Tensor]] = {} # For every finished beam item for i in range(bbsz_idx.size()[0]): idx = bbsz_idx[i] score = eos_scores[i] # sentence index in the current (possibly reduced) batch unfin_idx = idx // beam_size # sentence index in the original (unreduced) batch sent = unfin_idx + cum_unfin[unfin_idx] # print(f"{step} FINISHED {idx} {score} {sent}={unfin_idx} {cum_unfin}") # Cannot create dict for key type '(int, int)' in torchscript. # The workaround is to cast int to string seen = str(sent.item()) + "_" + str(unfin_idx.item()) if seen not in sents_seen: sents_seen[seen] = None if self.match_source_len and step > src_lengths[unfin_idx]: score = torch.tensor(-math.inf).to(score) # An input sentence (among those in a batch) is finished when # beam_size hypotheses have been collected for it if len(finalized[sent]) < beam_size: if attn_clone is not None: # remove padding tokens from attn scores hypo_attn = attn_clone[i] else: hypo_attn = torch.empty(0) finalized[sent].append( { "tokens": tokens_clone[i], "score": score, "attention": hypo_attn, # src_len x tgt_len "alignment": torch.empty(0), "positional_scores": pos_scores[i], } ) newly_finished: List[int] = [] for seen in sents_seen.keys(): # check termination conditions for this sentence sent: int = int(float(seen.split("_")[0])) unfin_idx: int = int(float(seen.split("_")[1])) if not finished[sent] and self.is_finished( step, unfin_idx, max_len, len(finalized[sent]), beam_size ): finished[sent] = True newly_finished.append(unfin_idx) return newly_finished def is_finished( self, step: int, unfin_idx: int, max_len: int, finalized_sent_len: int, beam_size: int, ): """ Check whether decoding for a sentence is finished, which occurs when the list of finalized sentences has reached the beam size, or when we reach the maximum length. """ assert finalized_sent_len <= beam_size if finalized_sent_len == beam_size or step == max_len: return True return False def calculate_banned_tokens( self, tokens, step: int, gen_ngrams: List[Dict[str, List[int]]], no_repeat_ngram_size: int, bbsz_idx: int, ): tokens_list: List[int] = tokens[ bbsz_idx, step + 2 - no_repeat_ngram_size : step + 1 ].tolist() # before decoding the next token, prevent decoding of ngrams that have already appeared ngram_index = ",".join([str(x) for x in tokens_list]) return gen_ngrams[bbsz_idx].get(ngram_index, torch.jit.annotate(List[int], [])) def transpose_list(self, l: List[List[int]]): # GeneratorExp aren't supported in TS so ignoring the lint min_len = min([len(x) for x in l]) # noqa l2 = [[row[i] for row in l] for i in range(min_len)] return l2 def _no_repeat_ngram(self, tokens, lprobs, bsz: int, beam_size: int, step: int): # for each beam and batch sentence, generate a list of previous ngrams gen_ngrams: List[Dict[str, List[int]]] = [ torch.jit.annotate(Dict[str, List[int]], {}) for bbsz_idx in range(bsz * beam_size) ] cpu_tokens = tokens.cpu() for bbsz_idx in range(bsz * beam_size): gen_tokens: List[int] = cpu_tokens[bbsz_idx].tolist() for ngram in self.transpose_list( [gen_tokens[i:] for i in range(self.no_repeat_ngram_size)] ): key = ",".join([str(x) for x in ngram[:-1]]) gen_ngrams[bbsz_idx][key] = gen_ngrams[bbsz_idx].get( key, torch.jit.annotate(List[int], []) ) + [ngram[-1]] if step + 2 - self.no_repeat_ngram_size >= 0: # no banned tokens if we haven't generated no_repeat_ngram_size tokens yet banned_tokens = [ self.calculate_banned_tokens( tokens, step, gen_ngrams, self.no_repeat_ngram_size, bbsz_idx ) for bbsz_idx in range(bsz * beam_size) ] else: banned_tokens = [ torch.jit.annotate(List[int], []) for bbsz_idx in range(bsz * beam_size) ] for bbsz_idx in range(bsz * beam_size): lprobs[bbsz_idx][ torch.tensor(banned_tokens[bbsz_idx]).long() ] = torch.tensor(-math.inf).to(lprobs) return lprobs class EnsembleModel(nn.Module): """A wrapper around an ensemble of models.""" def __init__(self, models): super().__init__() self.models_size = len(models) # method '__len__' is not supported in ModuleList for torch script self.single_model = models[0] self.models = nn.ModuleList(models) self.has_incremental: bool = False if all( hasattr(m, "decoder") and isinstance(m.decoder, FairseqIncrementalDecoder) for m in models ): self.has_incremental = True def forward(self): pass def has_encoder(self): return hasattr(self.single_model, "encoder") def has_incremental_states(self): return self.has_incremental def max_decoder_positions(self): return min([m.max_decoder_positions() for m in self.models]) @torch.jit.export def forward_encoder(self, net_input: Dict[str, Tensor]): if not self.has_encoder(): return None return [ model.encoder.forward_torchscript(net_input) for model in self.models ] @torch.jit.export def forward_decoder( self, tokens, encoder_outs: List[EncoderOut], incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]], temperature: float = 1.0, ): log_probs = [] avg_attn: Optional[Tensor] = None encoder_out: Optional[EncoderOut] = None for i, model in enumerate(self.models): if self.has_encoder(): encoder_out = encoder_outs[i] # decode each model if self.has_incremental_states(): decoder_out = model.decoder.forward( tokens, encoder_out=encoder_out, incremental_state=incremental_states[i], ) else: decoder_out = model.decoder.forward(tokens, encoder_out=encoder_out) attn: Optional[Tensor] = None decoder_len = len(decoder_out) if decoder_len > 1 and decoder_out[1] is not None: if isinstance(decoder_out[1], Tensor): attn = decoder_out[1] else: attn_holder = decoder_out[1]["attn"] if isinstance(attn_holder, Tensor): attn = attn_holder elif attn_holder is not None: attn = attn_holder[0] if attn is not None: attn = attn[:, -1, :] decoder_out_tuple = ( decoder_out[0][:, -1:, :].div_(temperature), None if decoder_len <= 1 else decoder_out[1], ) probs = model.get_normalized_probs( decoder_out_tuple, log_probs=True, sample=None ) probs = probs[:, -1, :] if self.models_size == 1: return probs, attn log_probs.append(probs) if attn is not None: if avg_attn is None: avg_attn = attn else: avg_attn.add_(attn) avg_probs = torch.logsumexp(torch.stack(log_probs, dim=0), dim=0) - math.log( self.models_size ) if avg_attn is not None: avg_attn.div_(self.models_size) return avg_probs, avg_attn @torch.jit.export def reorder_encoder_out(self, encoder_outs: Optional[List[EncoderOut]], new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ new_outs: List[EncoderOut] = [] if not self.has_encoder(): return new_outs for i, model in enumerate(self.models): assert encoder_outs is not None new_outs.append( model.encoder.reorder_encoder_out(encoder_outs[i], new_order) ) return new_outs @torch.jit.export def reorder_incremental_state( self, incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]], new_order, ): if not self.has_incremental_states(): return for i, model in enumerate(self.models): model.decoder.reorder_incremental_state_scripting( incremental_states[i], new_order ) class SequenceGeneratorWithAlignment(SequenceGenerator): def __init__(self, models, tgt_dict, left_pad_target=False, **kwargs): """Generates translations of a given source sentence. Produces alignments following "Jointly Learning to Align and Translate with Transformer Models" (Garg et al., EMNLP 2019). Args: left_pad_target (bool, optional): Whether or not the hypothesis should be left padded or not when they are teacher forced for generating alignments. """ super().__init__(EnsembleModelWithAlignment(models), tgt_dict, **kwargs) self.left_pad_target = left_pad_target @torch.no_grad() def generate(self, models, sample, **kwargs): finalized = super()._generate(sample, **kwargs) src_tokens = sample["net_input"]["src_tokens"] bsz = src_tokens.shape[0] beam_size = self.beam_size src_tokens, src_lengths, prev_output_tokens, tgt_tokens = self._prepare_batch_for_alignment( sample, finalized ) if any(getattr(m, "full_context_alignment", False) for m in self.model.models): attn = self.model.forward_align(src_tokens, src_lengths, prev_output_tokens) else: attn = [ finalized[i // beam_size][i % beam_size]["attention"].transpose(1, 0) for i in range(bsz * beam_size) ] if src_tokens.device != "cpu": src_tokens = src_tokens.to('cpu') tgt_tokens = tgt_tokens.to('cpu') attn = [i.to('cpu') for i in attn] # Process the attn matrix to extract hard alignments. for i in range(bsz * beam_size): alignment = utils.extract_hard_alignment( attn[i], src_tokens[i], tgt_tokens[i], self.pad, self.eos ) finalized[i // beam_size][i % beam_size]["alignment"] = alignment return finalized def _prepare_batch_for_alignment(self, sample, hypothesis): src_tokens = sample["net_input"]["src_tokens"] bsz = src_tokens.shape[0] src_tokens = ( src_tokens[:, None, :] .expand(-1, self.beam_size, -1) .contiguous() .view(bsz * self.beam_size, -1) ) src_lengths = sample["net_input"]["src_lengths"] src_lengths = ( src_lengths[:, None] .expand(-1, self.beam_size) .contiguous() .view(bsz * self.beam_size) ) prev_output_tokens = data_utils.collate_tokens( [beam["tokens"] for example in hypothesis for beam in example], self.pad, self.eos, self.left_pad_target, move_eos_to_beginning=True, ) tgt_tokens = data_utils.collate_tokens( [beam["tokens"] for example in hypothesis for beam in example], self.pad, self.eos, self.left_pad_target, move_eos_to_beginning=False, ) return src_tokens, src_lengths, prev_output_tokens, tgt_tokens class EnsembleModelWithAlignment(EnsembleModel): """A wrapper around an ensemble of models.""" def __init__(self, models): super().__init__(models) def forward_align(self, src_tokens, src_lengths, prev_output_tokens): avg_attn = None for model in self.models: decoder_out = model(src_tokens, src_lengths, prev_output_tokens) attn = decoder_out[1]["attn"] if avg_attn is None: avg_attn = attn else: avg_attn.add_(attn) if len(self.models) > 1: avg_attn.div_(len(self.models)) return avg_attn @torch.jit.script class BeamContainer(object): def __init__(self, score: float, elem: Dict[str, Tensor]): self.score = score self.elem = elem def __lt__(self, other): # type: (BeamContainer) -> bool # Due to https://github.com/pytorch/pytorch/issues/20388, # this has to use old style type annotations # Match original behavior of sorted function when two scores are equal. return self.score <= other.score
39,633
39.31943
118
py
RegularizedBN
RegularizedBN-main/fairseq/legacy_distributed_data_parallel.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ A modified version of the legacy DistributedDataParallel module that uses c10d communication primitives. This version is simpler than the latest PyTorch version and is useful for debugging. Notably it does not overlap gradient communication with the backward pass, which makes it slower but more robust than the PyTorch version. This version also supports the *no_sync* context manager, which allows faster training with `--update-freq`. """ from collections import OrderedDict from contextlib import contextmanager import copy import torch from torch import nn from torch.autograd import Variable from . import distributed_utils class LegacyDistributedDataParallel(nn.Module): """Implements distributed data parallelism at the module level. A simplified version of :class:`torch.nn.parallel.DistributedDataParallel`. This version uses a c10d process group for communication and does not broadcast buffers. Args: module (~torch.nn.Module): module to be parallelized world_size (int): number of parallel workers process_group (optional): the c10d process group to be used for distributed data all-reduction. If None, the default process group will be used. buffer_size (int, optional): number of elements to buffer before performing all-reduce (default: 256M). """ def __init__(self, module, world_size, process_group=None, buffer_size=2**28): super().__init__() self.module = module self.world_size = world_size self.process_group = process_group # Never use a bigger buffer than the number of model params self.buffer_size = min(buffer_size, sum(p.numel() for p in module.parameters())) self.buffer = None # We can also forcibly accumulate grads locally and only do the # all-reduce at some later time self.accumulate_grads = False # make per-device lists of parameters paramlists = OrderedDict() for param in self.module.parameters(): device = param.device if paramlists.get(device) is None: paramlists[device] = [] paramlists[device] += [param] self.per_device_params = list(paramlists.values()) def __getstate__(self): attrs = copy.copy(self.__dict__) return attrs def __setstate__(self, state): super().__setstate__(state) @contextmanager def no_sync(self): """A context manager to disable gradient synchronization.""" old_accumulate_grads = self.accumulate_grads self.accumulate_grads = True yield self.accumulate_grads = old_accumulate_grads def forward(self, *inputs, **kwargs): return self.module(*inputs, **kwargs) def all_reduce(self): """ This function must be called explicitly after backward to reduce gradients. There is no automatic hook like c10d. """ def all_reduce_params(params): buffer = self.buffer nonzero_buffer = False if len(params) > 1: offset = 0 for p in params: sz = p.numel() if p.grad is not None: buffer[offset:offset+sz].copy_(p.grad.data.view(-1)) nonzero_buffer = True else: buffer[offset:offset+sz].zero_() offset += sz else: # we only have a single grad to all-reduce p = params[0] if p.grad is not None: buffer = p.grad.data nonzero_buffer = True elif p.numel() <= self.buffer.numel(): buffer = buffer[:p.numel()] buffer.zero_() else: buffer = torch.zeros_like(p) if nonzero_buffer: buffer.div_(self.world_size) distributed_utils.all_reduce(buffer, self.process_group) # copy all-reduced grads back into their original place offset = 0 for p in params: sz = p.numel() if p.grad is not None: p.grad.data.copy_(buffer[offset:offset+sz].view_as(p)) else: p.grad = buffer[offset:offset+sz].view_as(p).clone() offset += sz def reduction_fn(): # This function only needs to be called once if self.accumulate_grads: return if self.buffer is None: self.buffer = next(self.module.parameters()).new(self.buffer_size) for params in self.per_device_params: # All-reduce the gradients in buckets offset = 0 buffered_params = [] for param in params: if not param.requires_grad: continue if param.grad is None: param.grad = torch.zeros_like(param) if param.grad.requires_grad: raise RuntimeError("DistributedDataParallel only works " "with gradients that don't require " "grad") sz = param.numel() if sz > self.buffer.numel(): # all-reduce big params directly all_reduce_params([param]) else: if offset + sz > self.buffer.numel(): all_reduce_params(buffered_params) offset = 0 buffered_params.clear() buffered_params.append(param) offset += sz if len(buffered_params) > 0: all_reduce_params(buffered_params) reduction_fn()
6,223
35.397661
88
py
RegularizedBN
RegularizedBN-main/fairseq/options.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import sys from typing import Callable, List, Optional import torch from fairseq import utils from fairseq.data.indexed_dataset import get_available_dataset_impl def get_preprocessing_parser(default_task="translation"): parser = get_parser("Preprocessing", default_task) add_preprocess_args(parser) return parser def get_training_parser(default_task="translation"): parser = get_parser("Trainer", default_task) add_dataset_args(parser, train=True) add_distributed_training_args(parser) add_model_args(parser) add_optimization_args(parser) add_checkpoint_args(parser) return parser def get_generation_parser(interactive=False, default_task="translation"): parser = get_parser("Generation", default_task) add_dataset_args(parser, gen=True) add_distributed_training_args(parser, default_world_size=1) add_generation_args(parser) if interactive: add_interactive_args(parser) return parser def get_interactive_generation_parser(default_task="translation"): return get_generation_parser(interactive=True, default_task=default_task) def get_eval_lm_parser(default_task="language_modeling"): parser = get_parser("Evaluate Language Model", default_task) add_dataset_args(parser, gen=True) add_distributed_training_args(parser, default_world_size=1) add_eval_lm_args(parser) return parser def get_validation_parser(default_task=None): parser = get_parser("Validation", default_task) add_dataset_args(parser, train=True) add_distributed_training_args(parser, default_world_size=1) group = parser.add_argument_group("Evaluation") add_common_eval_args(group) return parser def csv_str_list(x): return x.split(',') def eval_str_list(x, type=float): if x is None: return None if isinstance(x, str): x = eval(x) try: return list(map(type, x)) except TypeError: return [type(x)] def eval_str_dict(x, type=dict): if x is None: return None if isinstance(x, str): x = eval(x) return x def eval_bool(x, default=False): if x is None: return default try: return bool(eval(x)) except TypeError: return default def parse_args_and_arch( parser: argparse.ArgumentParser, input_args: List[str] = None, parse_known: bool = False, suppress_defaults: bool = False, modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None, ): """ Args: parser (ArgumentParser): the parser input_args (List[str]): strings to parse, defaults to sys.argv parse_known (bool): only parse known arguments, similar to `ArgumentParser.parse_known_args` suppress_defaults (bool): parse while ignoring all default values modify_parser (Optional[Callable[[ArgumentParser], None]]): function to modify the parser, e.g., to set default values """ if suppress_defaults: # Parse args without any default values. This requires us to parse # twice, once to identify all the necessary task/model args, and a second # time with all defaults set to None. args = parse_args_and_arch( parser, input_args=input_args, parse_known=parse_known, suppress_defaults=False, ) suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser]) suppressed_parser.set_defaults(**{k: None for k, v in vars(args).items()}) args = suppressed_parser.parse_args(input_args) return argparse.Namespace( **{k: v for k, v in vars(args).items() if v is not None} ) from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY # Before creating the true parser, we need to import optional user module # in order to eagerly import custom tasks, optimizers, architectures, etc. usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False) usr_parser.add_argument("--user-dir", default=None) usr_args, _ = usr_parser.parse_known_args(input_args) utils.import_user_module(usr_args) if modify_parser is not None: modify_parser(parser) # The parser doesn't know about model/criterion/optimizer-specific args, so # we parse twice. First we parse the model/criterion/optimizer, then we # parse a second time after adding the *-specific arguments. # If input_args is given, we will parse those args instead of sys.argv. args, _ = parser.parse_known_args(input_args) #还没有args.data # Add model-specific args to parser. if hasattr(args, "arch"): model_specific_group = parser.add_argument_group( "Model-specific configuration", # Only include attributes which are explicitly given as command-line # arguments or which have default values. argument_default=argparse.SUPPRESS, ) ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group) # Add *-specific args to parser. from fairseq.registry import REGISTRIES for registry_name, REGISTRY in REGISTRIES.items(): choice = getattr(args, registry_name, None) if choice is not None: cls = REGISTRY["registry"][choice] if hasattr(cls, "add_args"): cls.add_args(parser) if hasattr(args, "task"): from fairseq.tasks import TASK_REGISTRY TASK_REGISTRY[args.task].add_args(parser) if getattr(args, "use_bmuf", False): # hack to support extra args for block distributed data parallelism from fairseq.optim.bmuf import FairseqBMUF FairseqBMUF.add_args(parser) # Modify the parser a second time, since defaults may have been reset if modify_parser is not None: modify_parser(parser) # Parse a second time. if parse_known: args, extra = parser.parse_known_args(input_args) else: args = parser.parse_args(input_args) extra = None # Post-process args. if hasattr(args, "max_sentences_valid") and args.max_sentences_valid is None: args.max_sentences_valid = args.max_sentences if hasattr(args, "max_tokens_valid") and args.max_tokens_valid is None: args.max_tokens_valid = args.max_tokens if getattr(args, "memory_efficient_fp16", False): args.fp16 = True if getattr(args, "memory_efficient_bf16", False): args.bf16 = True args.tpu = getattr(args, "tpu", False) args.bf16 = getattr(args, "bf16", False) if args.bf16: args.tpu = True if args.tpu and args.fp16: raise ValueError("Cannot combine --fp16 and --tpu, use --bf16 on TPUs") if getattr(args, "seed", None) is None: args.seed = 1 # default seed for training args.no_seed_provided = True else: args.no_seed_provided = False # Apply architecture configuration. if hasattr(args, "arch"): ARCH_CONFIG_REGISTRY[args.arch](args) if parse_known: return args, extra else: return args def get_parser(desc, default_task="translation"): # Before creating the true parser, we need to import optional user module # in order to eagerly import custom tasks, optimizers, architectures, etc. usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False) usr_parser.add_argument("--user-dir", default=None) usr_args, _ = usr_parser.parse_known_args() utils.import_user_module(usr_args) parser = argparse.ArgumentParser(allow_abbrev=False) # fmt: off parser.add_argument('--no-progress-bar', action='store_true', help='disable progress bar') parser.add_argument('--log-interval', type=int, default=100, metavar='N', help='log progress every N batches (when progress bar is disabled)') parser.add_argument('--log-format', default=None, help='log format to use', choices=['json', 'none', 'simple', 'tqdm']) parser.add_argument('--tensorboard-logdir', metavar='DIR', default='', help='path to save logs for tensorboard, should match --logdir ' 'of running tensorboard (default: no tensorboard logging)') parser.add_argument('--seed', default=None, type=int, metavar='N', help='pseudo random number generator seed') parser.add_argument('--cpu', action='store_true', help='use CPU instead of CUDA') parser.add_argument('--tpu', action='store_true', help='use TPU instead of CUDA') parser.add_argument('--bf16', action='store_true', help='use bfloat16; implies --tpu') parser.add_argument('--fp16', action='store_true', help='use FP16') parser.add_argument('--memory-efficient-bf16', action='store_true', help='use a memory-efficient version of BF16 training; implies --bf16') parser.add_argument('--memory-efficient-fp16', action='store_true', help='use a memory-efficient version of FP16 training; implies --fp16') parser.add_argument('--fp16-no-flatten-grads', action='store_true', help='don\'t flatten FP16 grads tensor') parser.add_argument('--fp16-init-scale', default=2 ** 7, type=int, help='default FP16 loss scale') parser.add_argument('--fp16-scale-window', type=int, help='number of updates before increasing loss scale') parser.add_argument('--fp16-scale-tolerance', default=0.0, type=float, help='pct of updates that can overflow before decreasing the loss scale') parser.add_argument('--min-loss-scale', default=1e-4, type=float, metavar='D', help='minimum FP16 loss scale, after which training is stopped') parser.add_argument('--threshold-loss-scale', type=float, help='threshold FP16 loss scale from below') parser.add_argument('--user-dir', default=None, help='path to a python module containing custom extensions (tasks and/or architectures)') parser.add_argument('--empty-cache-freq', default=0, type=int, help='how often to clear the PyTorch CUDA cache (0 to disable)') parser.add_argument('--all-gather-list-size', default=16384, type=int, help='number of bytes reserved for gathering stats from workers') parser.add_argument('--model-parallel-size', type=int, metavar='N', default=1, help='total number of GPUs to parallelize model over') parser.add_argument('--checkpoint-suffix', default='', help='suffix to add to the checkpoint file name') parser.add_argument('--quantization-config-path', default=None, help='path to quantization config file') parser.add_argument('--profile', action='store_true', help='enable autograd profiler emit_nvtx') from fairseq.registry import REGISTRIES for registry_name, REGISTRY in REGISTRIES.items(): parser.add_argument( '--' + registry_name.replace('_', '-'), default=REGISTRY['default'], choices=REGISTRY['registry'].keys(), ) # Task definitions can be found under fairseq/tasks/ from fairseq.tasks import TASK_REGISTRY parser.add_argument('--task', metavar='TASK', default=default_task, choices=TASK_REGISTRY.keys(), help='task') # fmt: on return parser def add_preprocess_args(parser): group = parser.add_argument_group("Preprocessing") # fmt: off group.add_argument("-s", "--source-lang", default=None, metavar="SRC", help="source language") group.add_argument("-t", "--target-lang", default=None, metavar="TARGET", help="target language") group.add_argument("--trainpref", metavar="FP", default=None, help="train file prefix") group.add_argument("--validpref", metavar="FP", default=None, help="comma separated, valid file prefixes") group.add_argument("--testpref", metavar="FP", default=None, help="comma separated, test file prefixes") group.add_argument("--align-suffix", metavar="FP", default=None, help="alignment file suffix") group.add_argument("--destdir", metavar="DIR", default="data-bin", help="destination dir") group.add_argument("--thresholdtgt", metavar="N", default=0, type=int, help="map words appearing less than threshold times to unknown") group.add_argument("--thresholdsrc", metavar="N", default=0, type=int, help="map words appearing less than threshold times to unknown") group.add_argument("--tgtdict", metavar="FP", help="reuse given target dictionary") group.add_argument("--srcdict", metavar="FP", help="reuse given source dictionary") group.add_argument("--nwordstgt", metavar="N", default=-1, type=int, help="number of target words to retain") group.add_argument("--nwordssrc", metavar="N", default=-1, type=int, help="number of source words to retain") group.add_argument("--alignfile", metavar="ALIGN", default=None, help="an alignment file (optional)") parser.add_argument('--dataset-impl', metavar='FORMAT', default='mmap', choices=get_available_dataset_impl(), help='output dataset implementation') group.add_argument("--joined-dictionary", action="store_true", help="Generate joined dictionary") group.add_argument("--only-source", action="store_true", help="Only process the source language") group.add_argument("--padding-factor", metavar="N", default=8, type=int, help="Pad dictionary size to be multiple of N") group.add_argument("--workers", metavar="N", default=1, type=int, help="number of parallel workers") # fmt: on return parser def add_dataset_args(parser, train=False, gen=False): group = parser.add_argument_group("Dataset and data loading") # fmt: off group.add_argument('--num-workers', default=1, type=int, metavar='N', help='how many subprocesses to use for data loading') group.add_argument('--skip-invalid-size-inputs-valid-test', action='store_true', help='ignore too long or too short lines in valid and test set') group.add_argument('--max-tokens', type=int, metavar='N', help='maximum number of tokens in a batch') group.add_argument('--max-sentences', '--batch-size', type=int, metavar='N', help='maximum number of sentences in a batch') group.add_argument('--required-batch-size-multiple', default=8, type=int, metavar='N', help='batch size will either be less than this value, ' 'or a multiple of this value') parser.add_argument('--dataset-impl', metavar='FORMAT', choices=get_available_dataset_impl(), help='output dataset implementation') group.add_argument('--data-buffer-size', default=10, type=int, metavar='N', help='number of batches to preload') if train: group.add_argument('--train-subset', default='train', metavar='SPLIT', help='data subset to use for training (e.g. train, valid, test)') group.add_argument('--valid-subset', default='valid', metavar='SPLIT', help='comma separated list of data subsets to use for validation' ' (e.g. train, valid, test)') group.add_argument('--validate-interval', type=int, default=1, metavar='N', help='validate every N epochs') group.add_argument('--validate-interval-updates', type=int, default=0, metavar='N', help='validate every N updates') group.add_argument('--validate-after-updates', type=int, default=0, metavar='N', help='dont validate until reaching this many updates') group.add_argument('--fixed-validation-seed', default=None, type=int, metavar='N', help='specified random seed for validation') group.add_argument('--disable-validation', action='store_true', help='disable validation') group.add_argument('--max-tokens-valid', type=int, metavar='N', help='maximum number of tokens in a validation batch' ' (defaults to --max-tokens)') group.add_argument('--max-sentences-valid', type=int, metavar='N', help='maximum number of sentences in a validation batch' ' (defaults to --max-sentences)') group.add_argument('--curriculum', default=0, type=int, metavar='N', help='don\'t shuffle batches for first N epochs') if gen: group.add_argument('--gen-subset', default='test', metavar='SPLIT', help='data subset to generate (train, valid, test)') group.add_argument('--num-shards', default=1, type=int, metavar='N', help='shard generation over N shards') group.add_argument('--shard-id', default=0, type=int, metavar='ID', help='id of the shard to generate (id < num_shards)') # fmt: on return group def add_distributed_training_args(parser, default_world_size=None): group = parser.add_argument_group("Distributed training") # fmt: off if default_world_size is None: default_world_size = max(1, torch.cuda.device_count()) group.add_argument('--distributed-world-size', type=int, metavar='N', default=default_world_size, help='total number of GPUs across all nodes (default: all visible GPUs)') group.add_argument('--distributed-rank', default=0, type=int, help='rank of the current worker') group.add_argument('--distributed-backend', default='nccl', type=str, help='distributed backend') group.add_argument('--distributed-init-method', default=None, type=str, help='typically tcp://hostname:port that will be used to ' 'establish initial connetion') group.add_argument('--distributed-port', default=-1, type=int, help='port number (not required if using --distributed-init-method)') group.add_argument('--device-id', '--local_rank', default=0, type=int, help='which GPU to use (usually configured automatically)') group.add_argument('--distributed-no-spawn', action='store_true', help='do not spawn multiple processes even if multiple GPUs are visible') # "c10d" is PyTorch's DDP implementation and provides the fastest # training. "no_c10d" is a more robust, but slightly slower DDP # implementation. Try this if you get warning messages about # inconsistent gradients between workers, or if some of your model # parameters are not always used. group.add_argument('--ddp-backend', default='c10d', type=str, choices=['c10d', 'no_c10d'], help='DistributedDataParallel backend') group.add_argument('--bucket-cap-mb', default=25, type=int, metavar='MB', help='bucket size for reduction') group.add_argument('--fix-batches-to-gpus', action='store_true', help='don\'t shuffle batches between GPUs; this reduces overall ' 'randomness and may affect precision but avoids the cost of ' 're-reading the data') group.add_argument('--find-unused-parameters', default=False, action='store_true', help='disable unused parameter detection (not applicable to ' 'no_c10d ddp-backend') group.add_argument('--fast-stat-sync', default=False, action='store_true', help='[deprecated] this is now defined per Criterion') group.add_argument('--broadcast-buffers', default=False, action='store_true', help='Copy non-trainable parameters between GPUs, such as ' 'batchnorm population statistics') group.add_argument('--distributed-wrapper', default='DDP', type=str, choices=['DDP', 'SlowMo'], help='DistributedDataParallel backend') # Add arguments for SlowMo - these will be used when SlowMo is enabled via above group.add_argument('--slowmo-momentum', default=None, type=float, help='SlowMo momentum term; by default use 0.0 for 16 GPUs, ' '0.2 for 32 GPUs; 0.5 for 64 GPUs, 0.6 for > 64 GPUs') group.add_argument('--slowmo-algorithm', default='LocalSGD', choices=['LocalSGD', 'SGP'], help='whether to use LocalSGD or SGP') group.add_argument('--localsgd-frequency', default=3, type=int, help='Local SGD allreduce frequency') group.add_argument('--nprocs-per-node', type=int, metavar='N', default=max(1, torch.cuda.device_count()), help='number of GPUs in each node. An allreduce operation across GPUs in ' 'a node is very fast. Hence, we do allreduce across GPUs in a node, ' 'and gossip across different nodes') # fmt: on return group def add_optimization_args(parser): group = parser.add_argument_group("Optimization") # fmt: off group.add_argument('--max-epoch', '--me', default=0, type=int, metavar='N', help='force stop training at specified epoch') group.add_argument('--max-update', '--mu', default=0, type=int, metavar='N', help='force stop training at specified update') group.add_argument('--stop-time-hours', default=0, type=float, metavar='N', help='force stop training after specified cumulative time (if >0)') group.add_argument('--clip-norm', default=0.0, type=float, metavar='NORM', help='clip threshold of gradients') group.add_argument('--sentence-avg', action='store_true', help='normalize gradients by the number of sentences in a batch' ' (default is to normalize by number of tokens)') group.add_argument('--update-freq', default='1', metavar='N1,N2,...,N_K', type=lambda uf: eval_str_list(uf, type=int), help='update parameters every N_i batches, when in epoch i') group.add_argument('--lr', '--learning-rate', default='0.25', type=eval_str_list, metavar='LR_1,LR_2,...,LR_N', help='learning rate for the first N epochs; all epochs >N using LR_N' ' (note: this may be interpreted differently depending on --lr-scheduler)') group.add_argument('--min-lr', default=-1, type=float, metavar='LR', help='stop training when the learning rate reaches this minimum') group.add_argument('--use-bmuf', default=False, action='store_true', help='specify global optimizer for syncing models on different GPUs/shards') # fmt: on return group def add_checkpoint_args(parser): group = parser.add_argument_group("Checkpointing") # fmt: off group.add_argument('--save-dir', metavar='DIR', default='checkpoints', help='path to save checkpoints') group.add_argument('--restore-file', default='checkpoint_last.pt', help='filename from which to load checkpoint ' '(default: <save-dir>/checkpoint_last.pt') group.add_argument('--finetune-from-model', default=None, type=str, help='finetune from a pretrained model; ' 'note that meters and lr scheduler will be reset') group.add_argument('--reset-dataloader', action='store_true', help='if set, does not reload dataloader state from the checkpoint') group.add_argument('--reset-lr-scheduler', action='store_true', help='if set, does not load lr scheduler state from the checkpoint') group.add_argument('--reset-meters', action='store_true', help='if set, does not load meters from the checkpoint') group.add_argument('--reset-optimizer', action='store_true', help='if set, does not load optimizer state from the checkpoint') group.add_argument('--optimizer-overrides', default="{}", type=str, metavar='DICT', help='a dictionary used to override optimizer args when loading a checkpoint') group.add_argument('--save-interval', type=int, default=1, metavar='N', help='save a checkpoint every N epochs') group.add_argument('--save-interval-updates', type=int, default=0, metavar='N', help='save a checkpoint (and validate) every N updates') group.add_argument('--keep-interval-updates', type=int, default=-1, metavar='N', help='keep the last N checkpoints saved with --save-interval-updates') group.add_argument('--keep-last-epochs', type=int, default=-1, metavar='N', help='keep last N epoch checkpoints') group.add_argument('--keep-best-checkpoints', type=int, default=-1, metavar='N', help='keep best N checkpoints based on scores') group.add_argument('--no-save', action='store_true', help='don\'t save models or checkpoints') group.add_argument('--no-epoch-checkpoints', action='store_true', help='only store last and best checkpoints') group.add_argument('--no-last-checkpoints', action='store_true', help='don\'t store last checkpoints') group.add_argument('--no-save-optimizer-state', action='store_true', help='don\'t save optimizer-state as part of checkpoint') group.add_argument('--best-checkpoint-metric', type=str, default='loss', help='metric to use for saving "best" checkpoints') group.add_argument('--maximize-best-checkpoint-metric', action='store_true', help='select the largest metric value for saving "best" checkpoints') group.add_argument('--patience', type=int, default=-1, metavar='N', help=('early stop training if valid performance doesn\'t ' 'improve for N consecutive validation runs; note ' 'that this is influenced by --validate-interval')) # fmt: on return group def add_common_eval_args(group): # fmt: off group.add_argument('--path', metavar='FILE', help='path(s) to model file(s), colon separated') group.add_argument('--remove-bpe', '--post-process', nargs='?', const='@@ ', default=None, help='remove BPE tokens before scoring (can be set to sentencepiece)') group.add_argument('--quiet', action='store_true', help='only print final scores') group.add_argument('--model-overrides', default="{}", type=str, metavar='DICT', help='a dictionary used to override model args at generation ' 'that were used during model training') group.add_argument('--results-path', metavar='RESDIR', type=str, default=None, help='path to save eval results (optional)"') # fmt: on def add_eval_lm_args(parser): group = parser.add_argument_group("LM Evaluation") add_common_eval_args(group) # fmt: off group.add_argument('--output-word-probs', action='store_true', help='if set, outputs words and their predicted log probabilities to standard output') group.add_argument('--output-word-stats', action='store_true', help='if set, outputs word statistics such as word count, average probability, etc') group.add_argument('--context-window', default=0, type=int, metavar='N', help='ensures that every evaluated token has access to a context of at least this size,' ' if possible') group.add_argument('--softmax-batch', default=sys.maxsize, type=int, metavar='N', help='if BxT is more than this, will batch the softmax over vocab to this amount of tokens' ' in order to fit into GPU memory') # fmt: on def add_generation_args(parser): group = parser.add_argument_group("Generation") add_common_eval_args(group) # fmt: off group.add_argument('--beam', default=5, type=int, metavar='N', help='beam size') group.add_argument('--nbest', default=1, type=int, metavar='N', help='number of hypotheses to output') group.add_argument('--max-len-a', default=0, type=float, metavar='N', help=('generate sequences of maximum length ax + b, ' 'where x is the source length')) group.add_argument('--max-len-b', default=200, type=int, metavar='N', help=('generate sequences of maximum length ax + b, ' 'where x is the source length')) group.add_argument('--min-len', default=1, type=float, metavar='N', help=('minimum generation length')) group.add_argument('--match-source-len', default=False, action='store_true', help=('generations should match the source length')) group.add_argument('--no-early-stop', action='store_true', help='deprecated') group.add_argument('--unnormalized', action='store_true', help='compare unnormalized hypothesis scores') group.add_argument('--no-beamable-mm', action='store_true', help='don\'t use BeamableMM in attention layers') group.add_argument('--lenpen', default=1, type=float, help='length penalty: <1.0 favors shorter, >1.0 favors longer sentences') group.add_argument('--unkpen', default=0, type=float, help='unknown word penalty: <0 produces more unks, >0 produces fewer') group.add_argument('--replace-unk', nargs='?', const=True, default=None, help='perform unknown replacement (optionally with alignment dictionary)') group.add_argument('--sacrebleu', action='store_true', help='score with sacrebleu') group.add_argument('--score-reference', action='store_true', help='just score the reference translation') group.add_argument('--prefix-size', default=0, type=int, metavar='PS', help='initialize generation by target prefix of given length') group.add_argument('--no-repeat-ngram-size', default=0, type=int, metavar='N', help='ngram blocking such that this size ngram cannot be repeated in the generation') group.add_argument('--sampling', action='store_true', help='sample hypotheses instead of using beam search') group.add_argument('--sampling-topk', default=-1, type=int, metavar='PS', help='sample from top K likely next words instead of all words') group.add_argument('--sampling-topp', default=-1.0, type=float, metavar='PS', help='sample from the smallest set whose cumulative probability mass exceeds p for next words') group.add_argument('--constraints', const="ordered", nargs="?", choices=["ordered", "unordered"], help='enables lexically constrained decoding') group.add_argument('--temperature', default=1., type=float, metavar='N', help='temperature for generation') group.add_argument('--diverse-beam-groups', default=-1, type=int, metavar='N', help='number of groups for Diverse Beam Search') group.add_argument('--diverse-beam-strength', default=0.5, type=float, metavar='N', help='strength of diversity penalty for Diverse Beam Search') group.add_argument('--diversity-rate', default=-1.0, type=float, metavar='N', help='strength of diversity penalty for Diverse Siblings Search') group.add_argument('--print-alignment', action='store_true', help='if set, uses attention feedback to compute and print alignment to source tokens') group.add_argument('--print-step', action='store_true') # arguments for iterative refinement generator group.add_argument('--iter-decode-eos-penalty', default=0.0, type=float, metavar='N', help='if > 0.0, it penalized early-stopping in decoding.') group.add_argument('--iter-decode-max-iter', default=10, type=int, metavar='N', help='maximum iterations for iterative refinement.') group.add_argument('--iter-decode-force-max-iter', action='store_true', help='if set, run exact the maximum number of iterations without early stop') group.add_argument('--iter-decode-with-beam', default=1, type=int, metavar='N', help='if > 1, model will generate translations varying by the lengths.') group.add_argument('--iter-decode-with-external-reranker', action='store_true', help='if set, the last checkpoint are assumed to be a reranker to rescore the translations'), group.add_argument('--retain-iter-history', action='store_true', help='if set, decoding returns the whole history of iterative refinement') group.add_argument('--retain-dropout', action='store_true', help='Use dropout at inference time') group.add_argument('--retain-dropout-modules', default=None, nargs='+', type=str, help='if set, only retain dropout for the specified modules; ' 'if not set, then dropout will be retained for all modules') # special decoding format for advanced decoding. group.add_argument('--decoding-format', default=None, type=str, choices=['unigram', 'ensemble', 'vote', 'dp', 'bs']) # fmt: on return group def add_interactive_args(parser): group = parser.add_argument_group("Interactive") # fmt: off group.add_argument('--buffer-size', default=0, type=int, metavar='N', help='read this many sentences into a buffer before processing them') group.add_argument('--input', default='-', type=str, metavar='FILE', help='file to read from; use - for stdin') # fmt: on def add_model_args(parser): group = parser.add_argument_group("Model configuration") # fmt: off # Model definitions can be found under fairseq/models/ # # The model architecture can be specified in several ways. # In increasing order of priority: # 1) model defaults (lowest priority) # 2) --arch argument # 3) --encoder/decoder-* arguments (highest priority) from fairseq.models import ARCH_MODEL_REGISTRY group.add_argument('--arch', '-a', metavar='ARCH', choices=ARCH_MODEL_REGISTRY.keys(), help='model architecture') # fmt: on return group
36,241
52.375552
120
py
RegularizedBN
RegularizedBN-main/fairseq/checkpoint_utils_bn.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #**************** #for testing bn #**************** import collections import logging import os import re import traceback from collections import OrderedDict from typing import Union import torch from fairseq.file_io import PathManager from fairseq.models import FairseqDecoder, FairseqEncoder from torch.serialization import default_restore_location logger = logging.getLogger(__name__) def save_checkpoint(args, trainer, epoch_itr, val_loss,dummy): from fairseq import distributed_utils, meters # only one worker should attempt to create the required dir if args.distributed_rank == 0: os.makedirs(args.save_dir, exist_ok=True) prev_best = getattr(save_checkpoint, "best", val_loss) #prev_best = 0 if val_loss is not None: best_function = max if args.maximize_best_checkpoint_metric else min save_checkpoint.best = best_function(val_loss, prev_best) if args.no_save or not trainer.is_data_parallel_master: return def is_better(a, b): return a >= b if args.maximize_best_checkpoint_metric else a <= b write_timer = meters.StopwatchMeter() write_timer.start() epoch = epoch_itr.epoch end_of_epoch = epoch_itr.end_of_epoch() updates = trainer.get_num_updates() #print("get here!");exit() suffix = getattr(args, "checkpoint_suffix", "") checkpoint_conds = collections.OrderedDict() checkpoint_conds["checkpoint{}{}.pt".format(dummy, suffix)] = ( end_of_epoch and not args.no_epoch_checkpoints and epoch % args.save_interval == 0 ) checkpoint_conds["checkpoint_{}_{}{}.pt".format(epoch, updates, suffix)] = ( not end_of_epoch and args.save_interval_updates > 0 and updates % args.save_interval_updates == 0 ) checkpoint_conds["checkpoint_best{}.pt".format(suffix)] = val_loss is not None and ( not hasattr(save_checkpoint, "best") or is_better(val_loss, save_checkpoint.best) ) if val_loss is not None and args.keep_best_checkpoints > 0: checkpoint_conds["checkpoint.best_{}_{:.2f}.pt".format( args.best_checkpoint_metric, val_loss)] = ( not hasattr(save_checkpoint, "best") or is_better(val_loss, save_checkpoint.best) ) checkpoint_conds["checkpoint_last{}.pt".format(suffix)] = not args.no_last_checkpoints extra_state = {"train_iterator": epoch_itr.state_dict(), "val_loss": val_loss} if hasattr(save_checkpoint, "best"): extra_state.update({"best": save_checkpoint.best}) #print(checkpoint_conds.items()) checkpoints = [ os.path.join(args.save_dir, fn) for fn, cond in checkpoint_conds.items() if 1#cond ] print(checkpoints) if len(checkpoints) > 0: trainer.save_checkpoint(checkpoints[0], extra_state) #for cp in checkpoints[1:]: # PathManager.copy(checkpoints[0], cp, overwrite=True) write_timer.stop() logger.info( "saved checkpoint {} (epoch {} @ {} updates, score {}) (writing took {} seconds)".format( checkpoints[0], epoch, updates, val_loss, write_timer.sum ) ) if not end_of_epoch and args.keep_interval_updates > 0: # remove old checkpoints; checkpoints are sorted in descending order checkpoints = checkpoint_paths( args.save_dir, pattern=r"checkpoint_\d+_(\d+)\.pt" ) for old_chk in checkpoints[args.keep_interval_updates :]: if os.path.lexists(old_chk): os.remove(old_chk) if args.keep_last_epochs > 0: # remove old epoch checkpoints; checkpoints are sorted in descending order checkpoints = checkpoint_paths(args.save_dir, pattern=r"checkpoint(\d+)\.pt") for old_chk in checkpoints[args.keep_last_epochs :]: if os.path.lexists(old_chk): os.remove(old_chk) if args.keep_best_checkpoints > 0: # only keep the best N checkpoints according to validation metric checkpoints = checkpoint_paths( args.save_dir, pattern=r"checkpoint\.best_{}_(\d+\.?\d*)\.pt".format(args.best_checkpoint_metric)) if not args.maximize_best_checkpoint_metric: checkpoints = checkpoints[::-1] for old_chk in checkpoints[args.keep_best_checkpoints:]: if os.path.lexists(old_chk): os.remove(old_chk) def load_checkpoint(args, trainer, **passthrough_args): """ Load a checkpoint and restore the training iterator. *passthrough_args* will be passed through to ``trainer.get_train_iterator``. """ reset_optimizer = args.reset_optimizer reset_lr_scheduler = args.reset_lr_scheduler optimizer_overrides = eval(args.optimizer_overrides) reset_meters = args.reset_meters reset_dataloader = args.reset_dataloader if getattr(args, 'finetune_from_model', None) is not None \ and (reset_optimizer or reset_lr_scheduler or reset_meters or reset_dataloader): raise ValueError("--finetune-from-model can not be set together with either --reset-optimizer" " or reset_lr_scheduler or reset_meters or reset_dataloader") suffix = getattr(args, "checkpoint_suffix", "") if args.restore_file == "checkpoint_last.pt": # default value of restore_file is 'checkpoint_last.pt' checkpoint_path = os.path.join(args.save_dir, "checkpoint_last{}.pt".format(suffix)) first_launch = not PathManager.exists(checkpoint_path) if getattr(args, 'finetune_from_model', None) is not None and first_launch: # if there is no last checkpoint to restore, start the finetune from pretrained model # else just use usual logic to load checkpoint, e.g. restart from last checkpoint and etc. if PathManager.exists(args.finetune_from_model): checkpoint_path = args.finetune_from_model reset_optimizer = True reset_lr_scheduler = True reset_meters = True reset_dataloader = True logger.info(f'loading pretrained model from {checkpoint_path}: ' 'optimizer, lr scheduler, meters, dataloader will be reset') else: raise ValueError(f'--funetune-from-model {args.finetune_from_model} does not exist') elif getattr(args, "model_parallel_size", 1) > 1: checkpoint_path = args.restore_file.replace(".pt", suffix + ".pt") else: checkpoint_path = args.restore_file if args.restore_file != "checkpoint_last.pt" and getattr(args, 'finetune_from_model', None): raise ValueError( '--finetune-from-model and --restore-file (non-default value) ' 'can not be specified together: ' + str(args)) extra_state = trainer.load_checkpoint( checkpoint_path, reset_optimizer, reset_lr_scheduler, optimizer_overrides, reset_meters=reset_meters, ) if ( extra_state is not None and "best" in extra_state and not reset_optimizer and not reset_meters ): save_checkpoint.best = extra_state["best"] if extra_state is not None and not reset_dataloader: # restore iterator from checkpoint itr_state = extra_state["train_iterator"] epoch_itr = trainer.get_train_iterator( epoch=itr_state["epoch"], load_dataset=True, **passthrough_args ) epoch_itr.load_state_dict(itr_state) else: epoch_itr = trainer.get_train_iterator( epoch=1, load_dataset=True, **passthrough_args ) trainer.lr_step(epoch_itr.epoch) return extra_state, epoch_itr def load_checkpoint_to_cpu(path, arg_overrides=None): """Loads a checkpoint to CPU (with upgrading for backward compatibility).""" with PathManager.open(path, "rb") as f: state = torch.load( f, map_location=lambda s, l: default_restore_location(s, "cpu") ) args = state["args"] if arg_overrides is not None: for arg_name, arg_val in arg_overrides.items(): setattr(args, arg_name, arg_val) state = _upgrade_state_dict(state) return state def load_model_ensemble(filenames, arg_overrides=None, task=None, strict=True, suffix=''): """Loads an ensemble of models. Args: filenames (List[str]): checkpoint files to load arg_overrides (Dict[str,Any], optional): override model args that were used during model training task (fairseq.tasks.FairseqTask, optional): task to use for loading """ ensemble, args, _task = load_model_ensemble_and_task( filenames, arg_overrides, task, strict, suffix, ) return ensemble, args def load_model_ensemble_and_task(filenames, arg_overrides=None, task=None, strict=True, suffix=''): from fairseq import tasks ensemble = [] for filename in filenames: filename = filename.replace(".pt", suffix + ".pt") if not PathManager.exists(filename): raise IOError("Model file not found: {}".format(filename)) state = load_checkpoint_to_cpu(filename, arg_overrides) args = state["args"] if task is None: task = tasks.setup_task(args) # build model for ensemble model = task.build_model(args) model.load_state_dict(state["model"], strict=strict, args=args) ensemble.append(model) return ensemble, args, task def checkpoint_paths(path, pattern=r"checkpoint(\d+)\.pt"): """Retrieves all checkpoints found in `path` directory. Checkpoints are identified by matching filename to the specified pattern. If the pattern contains groups, the result will be sorted by the first group in descending order. """ pt_regexp = re.compile(pattern) files = os.listdir(path) entries = [] for i, f in enumerate(files): m = pt_regexp.fullmatch(f) if m is not None: idx = float(m.group(1)) if len(m.groups()) > 0 else i entries.append((idx, m.group(0))) return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)] def torch_persistent_save(obj, f): if isinstance(f, str): with PathManager.open(f, "wb") as h: torch_persistent_save(obj, h) return for i in range(3): try: return torch.save(obj, f) except Exception: if i == 2: logger.error(traceback.format_exc()) def save_state( filename, args, model_state_dict, criterion, optimizer, lr_scheduler, num_updates, optim_history=None, extra_state=None, ): from fairseq import utils if optim_history is None: optim_history = [] if extra_state is None: extra_state = {} state_dict = { "args": args, "model": model_state_dict or {}, "optimizer_history": optim_history + [ { "criterion_name": criterion.__class__.__name__, "optimizer_name": optimizer.__class__.__name__, "lr_scheduler_state": lr_scheduler.state_dict(), "num_updates": num_updates, } ], "extra_state": extra_state, } if utils.has_parameters(criterion): state_dict["criterion"] = criterion.state_dict() if not args.no_save_optimizer_state: state_dict["last_optimizer_state"] = optimizer.state_dict() # convert all state to CPU state_dict = utils.move_to_cpu(state_dict) with PathManager.open(filename, "wb") as f: torch_persistent_save(state_dict, f) def _upgrade_state_dict(state): """Helper for upgrading old model checkpoints.""" from fairseq import models, registry, tasks # add optimizer_history if "optimizer_history" not in state: state["optimizer_history"] = [ {"criterion_name": "CrossEntropyCriterion", "best_loss": state["best_loss"]} ] state["last_optimizer_state"] = state["optimizer"] del state["optimizer"] del state["best_loss"] # move extra_state into sub-dictionary if "epoch" in state and "extra_state" not in state: state["extra_state"] = { "epoch": state["epoch"], "batch_offset": state["batch_offset"], "val_loss": state["val_loss"], } del state["epoch"] del state["batch_offset"] del state["val_loss"] # reduce optimizer history's memory usage (only keep the last state) if "optimizer" in state["optimizer_history"][-1]: state["last_optimizer_state"] = state["optimizer_history"][-1]["optimizer"] for optim_hist in state["optimizer_history"]: del optim_hist["optimizer"] # record the optimizer class name if "optimizer_name" not in state["optimizer_history"][-1]: state["optimizer_history"][-1]["optimizer_name"] = "FairseqNAG" # move best_loss into lr_scheduler_state if "lr_scheduler_state" not in state["optimizer_history"][-1]: state["optimizer_history"][-1]["lr_scheduler_state"] = { "best": state["optimizer_history"][-1]["best_loss"] } del state["optimizer_history"][-1]["best_loss"] # keep track of number of updates if "num_updates" not in state["optimizer_history"][-1]: state["optimizer_history"][-1]["num_updates"] = 0 # old model checkpoints may not have separate source/target positions if hasattr(state["args"], "max_positions") and not hasattr( state["args"], "max_source_positions" ): state["args"].max_source_positions = state["args"].max_positions state["args"].max_target_positions = state["args"].max_positions # use stateful training data iterator if "train_iterator" not in state["extra_state"]: state["extra_state"]["train_iterator"] = { "epoch": state["extra_state"]["epoch"], "iterations_in_epoch": state["extra_state"].get("batch_offset", 0), } # default to translation task if not hasattr(state["args"], "task"): state["args"].task = "translation" # --raw-text and --lazy-load are deprecated if getattr(state["args"], "raw_text", False): state["args"].dataset_impl = "raw" elif getattr(state["args"], "lazy_load", False): state["args"].dataset_impl = "lazy" # epochs start at 1 if state["extra_state"]["train_iterator"] is not None: state["extra_state"]["train_iterator"]["epoch"] = max( state["extra_state"]["train_iterator"].get("epoch", 1), 1, ) # set any missing default values in the task, model or other registries registry.set_defaults(state["args"], tasks.TASK_REGISTRY[state["args"].task]) registry.set_defaults(state["args"], models.ARCH_MODEL_REGISTRY[state["args"].arch]) for registry_name, REGISTRY in registry.REGISTRIES.items(): choice = getattr(state["args"], registry_name, None) if choice is not None: cls = REGISTRY["registry"][choice] registry.set_defaults(state["args"], cls) return state def prune_state_dict(state_dict, args): """Prune the given state_dict if desired for LayerDrop (https://arxiv.org/abs/1909.11556). Training with LayerDrop allows models to be robust to pruning at inference time. This function prunes state_dict to allow smaller models to be loaded from a larger model and re-maps the existing state_dict for this to occur. It's called by functions that load models from checkpoints and does not need to be called directly. """ if not args or args.arch == "ptt_transformer": # args should not be none, but don't crash if it is. return state_dict encoder_layers_to_keep = ( args.encoder_layers_to_keep if "encoder_layers_to_keep" in vars(args) else None ) decoder_layers_to_keep = ( args.decoder_layers_to_keep if "decoder_layers_to_keep" in vars(args) else None ) if not encoder_layers_to_keep and not decoder_layers_to_keep: return state_dict # apply pruning logger.info( "Pruning model to specified layer configuration - this works best if the model was trained with LayerDrop" ) def create_pruning_pass(layers_to_keep, layer_name): keep_layers = sorted( [int(layer_string) for layer_string in layers_to_keep.split(",")] ) mapping_dict = {} for i in range(len(keep_layers)): mapping_dict[str(keep_layers[i])] = str(i) regex = re.compile(r"^{layer}.*\.layers\.(\d+)".format(layer=layer_name)) return {"substitution_regex": regex, "mapping_dict": mapping_dict} pruning_passes = [] if encoder_layers_to_keep: pruning_passes.append(create_pruning_pass(encoder_layers_to_keep, "encoder")) if decoder_layers_to_keep: pruning_passes.append(create_pruning_pass(decoder_layers_to_keep, "decoder")) new_state_dict = {} for layer_name in state_dict.keys(): match = re.search(r"\.layers\.(\d+)\.", layer_name) # if layer has no number in it, it is a supporting layer, such as an # embedding if not match: new_state_dict[layer_name] = state_dict[layer_name] continue # otherwise, layer should be pruned. original_layer_number = match.group(1) # figure out which mapping dict to replace from for pruning_pass in pruning_passes: if original_layer_number in pruning_pass["mapping_dict"] and pruning_pass[ "substitution_regex" ].search(layer_name): new_layer_number = pruning_pass["mapping_dict"][original_layer_number] substitution_match = pruning_pass["substitution_regex"].search( layer_name ) new_state_key = ( layer_name[: substitution_match.start(1)] + new_layer_number + layer_name[substitution_match.end(1) :] ) new_state_dict[new_state_key] = state_dict[layer_name] # Since layers are now pruned, *_layers_to_keep are no longer needed. # This is more of "It would make it work fix" rather than a proper fix. if "encoder_layers_to_keep" in vars(args): args.encoder_layers_to_keep = None if "decoder_layers_to_keep" in vars(args): args.decoder_layers_to_keep = None return new_state_dict def load_pretrained_component_from_model( component: Union[FairseqEncoder, FairseqDecoder], checkpoint: str ): """ Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the provided `component` object. If state_dict fails to load, there may be a mismatch in the architecture of the corresponding `component` found in the `checkpoint` file. """ if not PathManager.exists(checkpoint): raise IOError("Model file not found: {}".format(checkpoint)) state = load_checkpoint_to_cpu(checkpoint) if isinstance(component, FairseqEncoder): component_type = "encoder" elif isinstance(component, FairseqDecoder): component_type = "decoder" else: raise ValueError( "component to load must be either a FairseqEncoder or " "FairseqDecoder. Loading other component types are not supported." ) component_state_dict = OrderedDict() for key in state["model"].keys(): if key.startswith(component_type): # encoder.input_layers.0.0.weight --> input_layers.0.0.weight component_subkey = key[len(component_type) + 1 :] component_state_dict[component_subkey] = state["model"][key] component.load_state_dict(component_state_dict, strict=True) return component def verify_checkpoint_directory(save_dir: str) -> None: if not os.path.exists(save_dir): os.makedirs(save_dir, exist_ok=True) temp_file_path = os.path.join(save_dir, "dummy") try: with open(temp_file_path, "w"): pass except OSError as e: logger.warning("Unable to access checkpoint save directory: {}".format(save_dir)) raise e else: os.remove(temp_file_path)
20,533
37.597744
114
py
RegularizedBN
RegularizedBN-main/fairseq/token_generation_constraints.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Implements tracking of constraints for a beam item. A list of constraints is given as a list of one or more token sequences, each of length at least one token. For example, for an input sentence > Die maschinelle Übersetzung ist schwer zu kontrollieren. We could have the constraints: * to influence * hard There are two implementations: * OrderedConstraintState: Tracks progress through an ordered list of multitoken constraints. * UnorderedConstraintState: Tracks progress through an unordered list of multitoken constraints. The difference is that in the first, the constraints are assumed to be in order; the algorithm will permit zero or more tokens between them. In the second, the constraints are not ordered, so many orderings will be explored. The same sequence can be present any number of times, and will appear that many times in the output. """ import torch from collections import Counter from typing import Tuple, List, Optional, Set class ConstraintState: def __init__(self): pass def pack_constraints(batch_constraints: List[List[torch.Tensor]]) -> torch.Tensor: """Takes a list of list of constraints in tensor form (a list of tensor constraints for each sentence) and transforms it into a packed Tensor. For example, here is a batch of size 3 with 3, 0, and 1 constraints: [ [ [3 1 2], [3], [4 5 6 7], ] [], [ [1 8 9 10 1 4 11 12], ] ] Its corresponding packed structure is: [ [ 3 3 1 2 0 3 0 4 5 6 7 0], [ 0 0 0 0 0 0 0 0 0 0 0 0], [ 1 1 8 9 10 1 4 11 12 0 0 0] ] The packed tensor has shape (batch size, maxlen), where maxlen is defined below. Each row contains concatenated constraint tokens for that sentence, with 0 appended after each constraint. The first item in each row is the number of constraints for that sentence. So maxlen is the maximum of (number of constraints) + (sum length of constraints) + 1. across all sentences in the batch. """ # The maximum word length of concatenated constraints for any sentence max_constraints_len = 1 for sentence_constraints in batch_constraints: if len(sentence_constraints): # number of constraints, plus sum of constrain lens, plus a zero after each constraints_len = 1 + sum([c.size(0) for c in sentence_constraints]) + len(sentence_constraints) max_constraints_len = max(max_constraints_len, constraints_len) batch_size = len(batch_constraints) constraints_tensor = torch.zeros((batch_size, max_constraints_len)).long() for i, sentence_constraints in enumerate(batch_constraints): constraints_tensor[i, 0] = len(sentence_constraints) offset = 1 for j, constraint in enumerate(sentence_constraints): this_len = constraint.size(0) constraints_tensor[i, offset:offset+this_len] = constraint offset += this_len + 1 return constraints_tensor.long() def unpack_constraints(constraint_tensor: torch.Tensor) -> List[torch.Tensor]: """ Transforms *one row* of a packed constraint tensor (e.g., for one sentence in the batch) into a list of constraint tensors. """ constraint_list = [] num_constraints = constraint_tensor[0] constraints = constraint_tensor.tolist() offset = 1 for i in range(num_constraints): where = constraints.index(0, offset) constraint_list.append(constraint_tensor[offset:where]) offset = where + 1 return constraint_list class ConstraintNode: """ Represents a node in a trie managing unordered constraints. """ def __init__(self, token: int = None, parent=None): # The token associate with this node (None for the root) self.token = int(token) if token is not None else None # The parent (None at the root) self.parent = parent # Whether this node is a completed constraint self.terminal = 0 # List of child nodes self.children = {} # The cumulative number of constraints from this point in the # trie forward self.num_constraints = 0 @property def id(self): return self.token def __str__(self): term = self.terminal != 0 return f"[{self.token}].{term}#{self.num_constraints}" def __getitem__(self, key: int): return self.children.get(key, None) def next_tokens(self) -> Set[int]: """The set of child labels.""" return set(self.children.keys()) @staticmethod def create(constraints: List[List[int]]): root = ConstraintNode() for sequence in constraints: root.add_sequence(sequence) return root @staticmethod def print_graph(node: "ConstraintNode"): if len(node.children) == 0: return str(node) else: s = f"({node}" for child in node.children.values(): s += " " + ConstraintNode.print_graph(child) s += ")" return s def token_counts(self) -> Counter: """Returns a counter of the number of times each token is used in a constraint. """ token_counts = Counter() kids = list(self.children.values()) while len(kids) > 0: kid = kids.pop() token_counts[kid.id] += kid.num_constraints kids += list(kid.children.values()) return token_counts def tokens(self) -> Set[int]: """Returns the set of tokens in constraints.""" return set(self.token_counts().keys()) def add_sequence(self, sequence: List[int]): """Adds a constraint, represented as a list of integers, to the trie.""" assert len(sequence) > 0 token = int(sequence[0]) if token not in self.children: self.children[token] = ConstraintNode(token, parent=self) node = self.children[token] if len(sequence) == 1: node.terminal += 1 node.num_constraints += 1 parent = node.parent while parent is not None: parent.num_constraints += 1 parent = parent.parent else: node.add_sequence(sequence[1:]) class UnorderedConstraintState(ConstraintState): """ Records progress through the set of constraints for each item in the beam using a trie. """ def __init__(self, node: ConstraintNode, copy_from: "ConstraintState" = None): self.node = node if copy_from is None: # The root node self.root = node # The set of states in the graph that have been completed self.completed = Counter() # The... self.generated = Counter() # The list of tokens we need to generate self.needed_tokens = self.root.tokens() else: self.completed = Counter(copy_from.completed) self.generated = Counter(copy_from.generated) self.root = copy_from.root # Mark the node as generated if self.node != self.root: self.generated[node] += 1 @staticmethod def create(constraint_tensor: torch.Tensor): constraint_list = unpack_constraints(constraint_tensor) constraint_trie_root = ConstraintNode.create(constraint_list) return UnorderedConstraintState(constraint_trie_root) def __str__(self): gen_str = ",".join([str(node) for node in self.generated]) return f"{self.name}/{self.bank}({gen_str})x{self.num_completed}" def __copy__(self): copied_state = UnorderedConstraintState(self.node, copy_from=self) return copied_state def copy(self): return self.__copy__() @property def name(self): if self.node.id is None: return "ROOT" else: return str(self.node.id) @property def is_root(self): return self.node == self.root @property def bank(self): return sum(self.generated.values()) @property def num_completed(self): """The number of constraints (not constraint tokens) that are completed. In addition to the already-completed states, we need to account for the current state, which might get marked as completed when another token is generated. """ in_final = self.node.terminal and self.completed[self.node] < self.node.terminal return sum(self.completed.values()) + in_final @property def finished(self): return self.root.num_constraints - self.num_completed == 0 @property def token_counts(self): return self.root.token_counts() @property def tokens(self): return self.root.tokens() @property def num_constraint_tokens(self): return sum(self.token_counts.values()) def next_tokens(self) -> Set[int]: """Returns the list of tokens that could come next. These are (a) all tokens extending the root state and, for non-root states, additionally all tokens extending the current state.""" if self.node != self.root: return self.root.next_tokens().union(self.node.next_tokens()) else: return self.root.next_tokens() def advance(self, token: int): """Reads in a token and advances the state. Here's how it works. We can advance to the next state if: - there is a matching child - its path isn't blocked A path is blocked when all constraints that are descendants of that node have already been generated, in the current state. If we are not able to advance from the current state, we "fall off the graph" and return to the root state. There, we again try to advance, checking the same criteria. In any case, when falling off the graph, we need to do some bookkeeping. We: - check whether any constraints were met (all prefixes of current state) - if one is found, mark it as completed - adjust visited nodes accordingly """ token = int(token) next_state = None child = self.node[token] if child is not None and self.generated[child] < child.num_constraints: next_state = UnorderedConstraintState(child, copy_from=self) def rewind(): """If we're mid-trie and an "illegal" token is chosen next, we need to reset our state to the root state. However, along the way, we need to check whether a prefix of the current trie state represents a state we could mark as completed. """ node = self.node while node != self.root: if node.terminal and self.completed[node] < node.terminal: next_state.completed[node] += 1 return next_state.generated[node] -= 1 node = node.parent # Fall off the graph, check the root if next_state is None and token in self.root.next_tokens(): child = self.root[token] # We can only traverse this edge if it's not saturated if self.generated[child] < child.num_constraints: next_state = UnorderedConstraintState(child, copy_from=self) else: next_state = UnorderedConstraintState(self.root, copy_from=self) # Rewind rewind() elif next_state is None: next_state = UnorderedConstraintState(self.root, copy_from=self) # Rewind rewind() return next_state class ConstraintSequence: def __init__(self, sequences: List[List[int]]): """Represents a set of possibly multitoken constraints by concatenating them and internally recording the end points. """ self.sequences = [] self.endpoints = [] self.num_tokens = 0 self.tokens = set() for sequence in sequences: for token in sequence: self.tokens.add(token) self.num_tokens += len(sequence) self.endpoints += [False for x in range(len(sequence) - 1)] + [True] self.sequences += sequence def __getitem__(self, key: int): return self.sequences[key] def __len__(self): return len(self.sequences) def __str__(self): return str(self.sequences) class OrderedConstraintState(ConstraintState): """ Records progress through the set of linear nonbranching constraints with gaps. """ def __init__(self, sequence: ConstraintSequence, state: int = -1): self.sequence = sequence self.state = state @staticmethod def create(constraint_tensor: torch.Tensor): constraint_list = unpack_constraints(constraint_tensor) return OrderedConstraintState(ConstraintSequence(constraint_list), -1) def __str__(self): return f"{self.state}/{self.bank}x{self.num_completed}" def __copy__(self): return OrderedConstraintState(self.sequence, self.state) def copy(self): return self.__copy__() @property def num_completed(self): if self.state == -1: return 0 count = len(list(filter(lambda x: x, self.sequence.endpoints[0:self.state+1]))) return count @property def is_root(self): return self.state == -1 @property def name(self): if self.state == -1: return "ROOT" else: return str(self.sequence[self.state]) @property def bank(self) -> int: return self.state + 1 @property def finished(self): return self.state + 1 == len(self.sequence) @property def token_counts(self): return self.sequence.token_counts() @property def tokens(self): return self.sequence.tokens @property def num_constraint_tokens(self): return sum(self.token_counts.values()) def next_tokens(self) -> Set[int]: """Returns the list of tokens that could come next. These are (a) all tokens extending the root state and, for non-root states, additionally all tokens extending the current state.""" tokens = set() if self.state > 0: tokens.add(self.sequence[0]) if not self.finished: tokens.add(self.sequence[self.state + 1]) return tokens def advance(self, token: int): """Reads in a token and advances the state. Here's how it works. We can advance to the next state if: - there is a matching child - its path isn't blocked A path is blocked when all constraints that are descendants of that node have already been generated, in the current state. If we are not able to advance from the current state, we "fall off the graph" and return to the root state. There, we again try to advance, checking the same criteria. In any case, when falling off the graph, we need to do some bookkeeping. We: - check whether any constraints were met (all prefixes of current state) - if one is found, mark it as completed - adjust visited nodes accordingly """ token = int(token) # print(f"{self} ADVANCE({token}) {self.sequence} -> ", end="") if self.finished: # Accept anything next_state = self.copy() elif self.sequence[self.state + 1] == token: # Advance to the next token next_state = OrderedConstraintState(self.sequence, self.state + 1) elif self.sequence.endpoints[self.state]: # Accept anything between constraints (*) next_state = self.copy() elif token == self.sequence[0]: # Start over having generated the first token next_state = OrderedConstraintState(self.sequence, 0) else: # Start over from the root next_state = OrderedConstraintState(self.sequence, -1) return next_state
16,525
31.986028
108
py
RegularizedBN
RegularizedBN-main/fairseq/file_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Utilities for working with the local dataset cache. This file is adapted from `AllenNLP <https://github.com/allenai/allennlp>`_. and `huggingface <https://github.com/huggingface>`_. """ import fnmatch from functools import wraps, partial from hashlib import sha256 from io import open import json import logging import os import shutil import tarfile import tempfile try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv('TORCH_HOME', os.path.join( os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))) default_cache_path = os.path.join(torch_cache_home, 'pytorch_fairseq') try: from urllib.parse import urlparse except ImportError: from urlparse import urlparse try: from pathlib import Path PYTORCH_FAIRSEQ_CACHE = Path( os.getenv('PYTORCH_FAIRSEQ_CACHE', default_cache_path)) except (AttributeError, ImportError): PYTORCH_FAIRSEQ_CACHE = os.getenv( 'PYTORCH_FAIRSEQ_CACHE', default_cache_path) CONFIG_NAME = "config.json" WEIGHTS_NAME = "pytorch_model.bin" logger = logging.getLogger(__name__) # pylint: disable=invalid-name def load_archive_file(archive_file): # redirect to the cache, if necessary try: resolved_archive_file = cached_path(archive_file, cache_dir=None) except EnvironmentError: logger.info( "Archive name '{}' was not found in archive name list. " "We assumed '{}' was a path or URL but couldn't find any file " "associated to this path or URL.".format( archive_file, archive_file, ) ) return None if resolved_archive_file == archive_file: logger.info("loading archive file {}".format(archive_file)) else: logger.info("loading archive file {} from cache at {}".format( archive_file, resolved_archive_file)) # Extract archive to temp dir and replace .tar.bz2 if necessary tempdir = None if not os.path.isdir(resolved_archive_file): tempdir = tempfile.mkdtemp() logger.info("extracting archive file {} to temp dir {}".format( resolved_archive_file, tempdir)) ext = os.path.splitext(archive_file)[1][1:] with tarfile.open(resolved_archive_file, 'r:' + ext) as archive: top_dir = os.path.commonprefix(archive.getnames()) archive.extractall(tempdir) os.remove(resolved_archive_file) shutil.move(os.path.join(tempdir, top_dir), resolved_archive_file) shutil.rmtree(tempdir) return resolved_archive_file def url_to_filename(url, etag=None): """ Convert `url` into a hashed filename in a repeatable way. If `etag` is specified, append its hash to the URL's, delimited by a period. """ url_bytes = url.encode('utf-8') url_hash = sha256(url_bytes) filename = url_hash.hexdigest() if etag: etag_bytes = etag.encode('utf-8') etag_hash = sha256(etag_bytes) filename += '.' + etag_hash.hexdigest() return filename def filename_to_url(filename, cache_dir=None): """ Return the url and etag (which may be ``None``) stored for `filename`. Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist. """ if cache_dir is None: cache_dir = PYTORCH_FAIRSEQ_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) cache_path = os.path.join(cache_dir, filename) if not os.path.exists(cache_path): raise EnvironmentError("file {} not found".format(cache_path)) meta_path = cache_path + '.json' if not os.path.exists(meta_path): raise EnvironmentError("file {} not found".format(meta_path)) with open(meta_path, encoding="utf-8") as meta_file: metadata = json.load(meta_file) url = metadata['url'] etag = metadata['etag'] return url, etag def cached_path(url_or_filename, cache_dir=None): """ Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and then return the path. """ if cache_dir is None: cache_dir = PYTORCH_FAIRSEQ_CACHE if isinstance(url_or_filename, Path): url_or_filename = str(url_or_filename) if isinstance(cache_dir, Path): cache_dir = str(cache_dir) parsed = urlparse(url_or_filename) if parsed.scheme in ('http', 'https', 's3'): # URL, so get it from the cache (downloading if necessary) return get_from_cache(url_or_filename, cache_dir) elif os.path.exists(url_or_filename): # File, and it exists. return url_or_filename elif parsed.scheme == '': # File, but it doesn't exist. raise EnvironmentError("file {} not found".format(url_or_filename)) else: # Something unknown raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename)) def split_s3_path(url): """Split a full s3 path into the bucket name and path.""" parsed = urlparse(url) if not parsed.netloc or not parsed.path: raise ValueError("bad s3 path {}".format(url)) bucket_name = parsed.netloc s3_path = parsed.path # Remove '/' at beginning of path. if s3_path.startswith("/"): s3_path = s3_path[1:] return bucket_name, s3_path def s3_request(func): """ Wrapper function for s3 requests in order to create more helpful error messages. """ @wraps(func) def wrapper(url, *args, **kwargs): from botocore.exceptions import ClientError try: return func(url, *args, **kwargs) except ClientError as exc: if int(exc.response["Error"]["Code"]) == 404: raise EnvironmentError("file {} not found".format(url)) else: raise return wrapper @s3_request def s3_etag(url): """Check ETag on S3 object.""" import boto3 s3_resource = boto3.resource("s3") bucket_name, s3_path = split_s3_path(url) s3_object = s3_resource.Object(bucket_name, s3_path) return s3_object.e_tag @s3_request def s3_get(url, temp_file): """Pull a file directly from S3.""" import boto3 s3_resource = boto3.resource("s3") bucket_name, s3_path = split_s3_path(url) s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file) def request_wrap_timeout(func, url): import requests for attempt, timeout in enumerate([10, 20, 40, 60, 60]): try: return func(timeout=timeout) except requests.exceptions.Timeout as e: logger.warning("Request for %s timed-out (attempt %d). Retrying with a timeout of %d secs", url, attempt, timeout, exc_info=e) continue raise RuntimeError(f"Unable to fetch file {url}") def http_get(url, temp_file): import requests from tqdm import tqdm req = request_wrap_timeout(partial(requests.get, url, stream=True), url) content_length = req.headers.get('Content-Length') total = int(content_length) if content_length is not None else None progress = tqdm(unit="B", total=total) for chunk in req.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks progress.update(len(chunk)) temp_file.write(chunk) progress.close() def get_from_cache(url, cache_dir=None): """ Given a URL, look for the corresponding dataset in the local cache. If it's not there, download it. Then return the path to the cached file. """ if cache_dir is None: cache_dir = PYTORCH_FAIRSEQ_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) if not os.path.exists(cache_dir): os.makedirs(cache_dir) # Get eTag to add to filename, if it exists. if url.startswith("s3://"): etag = s3_etag(url) else: try: import requests response = request_wrap_timeout(partial(requests.head, url, allow_redirects=True), url) if response.status_code != 200: etag = None else: etag = response.headers.get("ETag") except EnvironmentError: etag = None filename = url_to_filename(url, etag) # get cache path to put the file cache_path = os.path.join(cache_dir, filename) # If we don't have a connection (etag is None) and can't identify the file # try to get the last downloaded one if not os.path.exists(cache_path) and etag is None: matching_files = fnmatch.filter(os.listdir(cache_dir), filename + '.*') matching_files = list(filter(lambda s: not s.endswith('.json'), matching_files)) if matching_files: cache_path = os.path.join(cache_dir, matching_files[-1]) if not os.path.exists(cache_path): # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with tempfile.NamedTemporaryFile() as temp_file: logger.info("%s not found in cache, downloading to %s", url, temp_file.name) # GET file object if url.startswith("s3://"): s3_get(url, temp_file) else: http_get(url, temp_file) # we are copying the file before closing it, so flush to avoid truncation temp_file.flush() # shutil.copyfileobj() starts at the current position, so go to the start temp_file.seek(0) logger.info("copying %s to cache at %s", temp_file.name, cache_path) with open(cache_path, 'wb') as cache_file: shutil.copyfileobj(temp_file, cache_file) logger.info("creating metadata file for %s", cache_path) meta = {'url': url, 'etag': etag} meta_path = cache_path + '.json' with open(meta_path, 'w') as meta_file: output_string = json.dumps(meta) meta_file.write(output_string) logger.info("removing temp file %s", temp_file.name) return cache_path def read_set_from_file(filename): ''' Extract a de-duped collection (set) of text from a file. Expected file format is one item per line. ''' collection = set() with open(filename, 'r', encoding='utf-8') as file_: for line in file_: collection.add(line.rstrip()) return collection def get_file_extension(path, dot=True, lower=True): ext = os.path.splitext(path)[1] ext = ext if dot else ext[1:] return ext.lower() if lower else ext
11,036
32.243976
103
py
RegularizedBN
RegularizedBN-main/fairseq/incremental_decoding_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, Optional import uuid from torch import Tensor class FairseqIncrementalState(object): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.init_incremental_state() def init_incremental_state(self): self._incremental_state_id = str(uuid.uuid4()) def _get_full_incremental_state_key(self, key: str) -> str: return "{}.{}".format(self._incremental_state_id, key) def get_incremental_state( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], key: str, ) -> Optional[Dict[str, Optional[Tensor]]]: """Helper for getting incremental state for an nn.Module.""" full_key = self._get_full_incremental_state_key(key) if incremental_state is None or full_key not in incremental_state: return None return incremental_state[full_key] def set_incremental_state( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], key: str, value: Dict[str, Optional[Tensor]], ) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]: """Helper for setting incremental state for an nn.Module.""" if incremental_state is not None: full_key = self._get_full_incremental_state_key(key) incremental_state[full_key] = value return incremental_state def with_incremental_state(cls): cls.__bases__ = (FairseqIncrementalState,) + tuple(b for b in cls.__bases__ if b != FairseqIncrementalState) return cls
1,760
33.529412
112
py
RegularizedBN
RegularizedBN-main/fairseq/search.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import Optional, List import torch import torch.nn as nn from torch import Tensor from fairseq.token_generation_constraints import ConstraintState, UnorderedConstraintState, OrderedConstraintState class Search(nn.Module): def __init__(self, tgt_dict): super().__init__() self.pad = tgt_dict.pad() self.unk = tgt_dict.unk() self.eos = tgt_dict.eos() self.vocab_size = len(tgt_dict) self.src_lengths = torch.tensor(-1) self.supports_constraints = False def step(self, step, lprobs, scores): """Take a single search step. Args: step: the current search step, starting at 0 lprobs: (bsz x input_beam_size x vocab_size) the model's log-probabilities over the vocabulary at the current step scores: (bsz x input_beam_size x step) the historical model scores of each hypothesis up to this point Return: A tuple of (scores, indices, beams) where: scores: (bsz x output_beam_size) the scores of the chosen elements; output_beam_size can be larger than input_beam_size, e.g., we may return 2*input_beam_size to account for EOS indices: (bsz x output_beam_size) the indices of the chosen elements beams: (bsz x output_beam_size) the hypothesis ids of the chosen elements, in the range [0, input_beam_size) """ raise NotImplementedError @torch.jit.export def set_src_lengths(self, src_lengths): self.src_lengths = src_lengths @torch.jit.export def init_constraints(self, batch_constraints: Optional[Tensor], beam_size: int): """Initialize constraint states for constrained decoding (if supported). Args: batch_constraints: (torch.Tensor, optional) the list of constraints, in packed form beam_size: (int) the beam size Returns: *encoder_out* rearranged according to *new_order* """ pass def prune_sentences(self, batch_idxs: Tensor): """ Removes constraint states for completed sentences (if supported). This is called from sequence_generator._generate() when sentences are deleted from the batch. Args: batch_idxs: Indices of *sentences* whose constraint state should be *kept*. """ pass def update_constraints(self, active_hypos: Tensor): """ Updates the constraint states by selecting the beam items that are retained. This is called at each time step of sequence_generator._generate() when the set of 2 * {beam_size} candidate hypotheses are reduced to the beam size. Args: active_hypos: (batch size, beam size) list of integers denoting, for each sentence, which beam candidate items should be kept. """ pass class BeamSearch(Search): def __init__(self, tgt_dict): super().__init__(tgt_dict) self.constraint_states = None @torch.jit.export def step(self, step: int, lprobs, scores: Optional[Tensor]): bsz, beam_size, vocab_size = lprobs.size() if step == 0: # at the first step all hypotheses are equally likely, so use # only the first beam lprobs = lprobs[:, ::beam_size, :].contiguous() else: # make probs contain cumulative scores for each hypothesis assert scores is not None lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1) top_prediction = torch.topk( lprobs.view(bsz, -1), k=min( # Take the best 2 x beam_size predictions. We'll choose the first # beam_size of these which don't predict eos to continue with. beam_size * 2, lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad ), ) scores_buf = top_prediction[0] indices_buf = top_prediction[1] # Project back into relative indices and beams beams_buf = indices_buf // vocab_size indices_buf = indices_buf.fmod(vocab_size) # At this point, beams_buf and indices_buf are single-dim and contain relative indices return scores_buf, indices_buf, beams_buf class LexicallyConstrainedBeamSearch(Search): """Implements lexically constrained beam search as described in Fast Lexically Constrained Decoding with Dynamic Beam Allocation for Neural Machine Translation. Post & Vilar, NAACL 2018. https://www.aclweb.org/anthology/N18-1119/ and Improved Lexically Constrained Decoding for Translation and Monolingual Rewriting. Hu et al, NAACL 2019. https://www.aclweb.org/anthology/N19-1090/ This is accomplished by maintaining, for each beam hypothesis, a ConstraintState object (see constraints.py) that tracks which constraints have been generated and using this information to shape the beam for each input sentence. """ def __init__(self, tgt_dict, representation): super().__init__(tgt_dict) self.representation = representation self.vocab_size = len(tgt_dict) self.num_cands = 0 self.supports_constraints = True @torch.jit.export def init_constraints(self, batch_constraints: Optional[Tensor], beam_size: int): self.constraint_states = [] for constraint_tensor in batch_constraints: if self.representation == "ordered": constraint_state = OrderedConstraintState.create(constraint_tensor) elif self.representation == "unordered": constraint_state = UnorderedConstraintState.create(constraint_tensor) self.constraint_states.append([constraint_state for i in range(beam_size)]) @torch.jit.export def prune_sentences(self, batch_idxs: Tensor): self.constraint_states = [self.constraint_states[i] for i in batch_idxs.tolist()] @torch.jit.export def update_constraints(self, active_hypos: Tensor): if self.constraint_states: batch_size = active_hypos.size(0) for sentid in range(batch_size): self.constraint_states[sentid] = [self.constraint_states[sentid][i] for i in active_hypos[sentid]] @torch.jit.export def step(self, step: int, lprobs: Tensor, scores: Optional[Tensor]): """ A constrained step builds a large candidates list from the following: - the top 2 * {beam_size} items over the whole beam - for each item in the beam - the top {each_k} (default 1) - all next constraints We then compute the constrained state of each beam item, and assign stripe codes: 0 to the best in each bank, 1 to the 2nd-best, and so on. We then sort by (stripe, score), and truncate the list at 2 * beam size. Args: step: the decoder step lprobs: (batch size, beam size, target vocab) the target-vocab distributions for each item in the beam. Retrun: A tuple of (scores, indices, beams, constraints) where: scores: (batch, output beam size) the scores of the chosen elements indices: (batch, output beam size) the target vocab indices of the chosen elements beams: (batch, output beam size) the 0-indexed hypothesis ids of the chosen elements constraints: (batch, output beam size) the new constraint states """ each_k = 1 device = lprobs.device batch_size, beam_size, vocab_size = lprobs.size() self.num_cands = min( # Just take the k-best. We'll get another k from the 1-best from each # row, plus more from the constraints beam_size * 2, lprobs.view(batch_size, -1).size(1) - 1, # -1 so we never select pad ) # STEP 0: Preliminary. Prevent EOS for unfinished hyps across all batch items constraint_states = self.constraint_states if constraint_states and step > 0: not_finished_indices = [] for sentno, sent_constraints in enumerate(constraint_states): for beamno, state in enumerate(sent_constraints): index = sentno * beam_size + beamno if not state.finished: not_finished_indices.append(index) not_finished_indices = torch.tensor(not_finished_indices) if not_finished_indices.numel() > 0: lprobs.view(batch_size * beam_size, -1)[not_finished_indices, self.eos] = -math.inf if step == 0: # at the first step all hypotheses are equally likely, so use # only the first beam entry for each batch item lprobs = lprobs[:, ::beam_size, :].contiguous() else: # make probs contain cumulative scores for each hypothesis assert scores is not None lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1) top_prediction = torch.topk( lprobs.view(batch_size, -1), self.num_cands, ) scores_buf, indices_buf = top_prediction # Project back into relative indices and beams beams_buf = indices_buf // vocab_size indices_buf = indices_buf.fmod(vocab_size) # Short circuit if there are no constraints in this batch if not constraint_states: return scores_buf, indices_buf, beams_buf # STEP 1: get top-1 from each hypothesis across all sentences in the batch if step > 0: top_scores, top_indices = torch.topk( lprobs.view(batch_size * beam_size, -1), k=each_k, dim=1, ) top_scores = top_scores.view(batch_size, -1) top_indices = top_indices.view(batch_size, -1) scores_buf = torch.cat((scores_buf, top_scores), dim=1) indices_buf = torch.cat((indices_buf, top_indices), dim=1) new_beams = torch.arange(0, beam_size, device=device).repeat(batch_size, 1) beams_buf = torch.cat((beams_buf, new_beams), dim=1) # Now, process sentences in the batch one by one. new_scores_buf = torch.zeros((batch_size, 2 * beam_size), device=device) new_indices_buf = torch.zeros((batch_size, 2 * beam_size), device=device).long() new_beams_buf = torch.zeros((batch_size, 2 * beam_size), device=device).long() for sentno, states in enumerate(constraint_states): scores, indices, beams, new_states = self.step_sentence(step, sentno, lprobs[sentno], constraint_states[sentno], beams_buf[sentno].clone(), indices_buf[sentno].clone(), scores_buf[sentno].clone()) new_scores_buf[sentno] = scores new_indices_buf[sentno] = indices new_beams_buf[sentno] = beams self.constraint_states[sentno] = new_states return new_scores_buf, new_indices_buf, new_beams_buf @torch.jit.export def step_sentence(self, step: int, sentno: int, lprobs: Tensor, constraint_states: List[List[ConstraintState]], beams_buf: Tensor, indices_buf: Tensor, scores_buf: Tensor): """Does per-sentence processing. Adds all constraints for each hypothesis to the list of candidates; then removes duplicates, sorts, and dynamically stripes across the banks. All tensor inputs are collapsed to those pertaining to a single input sentence. """ device = lprobs.device # STEP 2: Add all constraints for each beam item for beamno, state in enumerate(constraint_states): next_tokens = torch.tensor(list(state.next_tokens()), device=device).long() if next_tokens.numel() != 0: indices_buf = torch.cat((indices_buf, next_tokens)) next_beams = torch.tensor(beamno, device=device).repeat(next_tokens.size(0)).long() beams_buf = torch.cat((beams_buf, next_beams)) next_values = lprobs[beamno].take(next_tokens.view(-1)) scores_buf = torch.cat((scores_buf, next_values)) # At the 0th time step, there is just one beam item if step == 0: break # STEP 3: Compute the "bank" for each candidate. This is the # number of constraints it's generated. We need this so that # we can do round-robin allocation of the beam across these # banks. If C is the number of constraints, we select the best # item in bank C, then the best in bank C-1, etc, followed by # the 2nd-best in bank C, the 2nd-best in bank C-1, etc, and so # on, until the maximum beam size. We accomplish this by # creating a sort key and striping across the banks. # Compute the new states for all candidates cands_size = indices_buf.size(0) constraint_states = [constraint_states[beams_buf[i]].advance(indices_buf[i]) for i in range(cands_size)] banks = torch.tensor([state.bank for state in constraint_states], device=device) # STEP 4: Sort num_constraint_tokens = len(state.tokens) # Sort by keys (bank, score) (i.e., sort banks together, and scores # within banks). AFAIK pytorch doesn't support either stable sort or # multi-key sorting, so we have to hack this. MAX_SCORE = -100 sort_key = (num_constraint_tokens - banks) * MAX_SCORE + scores_buf sort_values, sort_indices = sort_key.sort(dim=0, descending=True) scores_buf = scores_buf[sort_indices] indices_buf = indices_buf[sort_indices] beams_buf = beams_buf[sort_indices] banks = banks[sort_indices] # Sort the constraints to follow suit constraint_states = [constraint_states[i] for i in sort_indices] # STEP 5: Remove duplicates. The topk calls (overall and # per-row) plus the per-row generation of constraints will # produce duplicates. Here we remove them. def roll(t): """Rolls a 1d tensor left by 1. [0, 1, 2, 3, 4] becomes [4, 0, 1, 2, 3] """ return torch.cat((t[-1].unsqueeze(0), t[0:-1]), dim=0) # We map candidates (beam, token_id) to a single dimension. # This is then shifted by 1. We can then easily identify # duplicates and create a mask that identifies unique # extensions. uniques_mask = (beams_buf * (self.vocab_size + 1) + indices_buf) uniques_mask = roll(uniques_mask) != uniques_mask # Use the mask to pare down the data structures scores_buf = torch.masked_select(scores_buf, uniques_mask) indices_buf = torch.masked_select(indices_buf, uniques_mask) beams_buf = torch.masked_select(beams_buf, uniques_mask) banks = torch.masked_select(banks, uniques_mask) i = 1 for mask in uniques_mask[1:]: if not mask: constraint_states.pop(i) i += mask # STEP 6: Assign IDs round-robin across banks, sort, and # truncate. Now that the candidates are sorted by (bank, # score) and uniqed, we dynamically allocate the {beam_size} # beam by striping across the candidates. These stripes will # be used as sort keys to do round-robin selection. This is # accomplished in a single pass with offsets. Sorting by # highest-banks (furthest-along hypotheses) first ensures # progress through the constraints. # # e.g., BANKS: 3 3 3 2 2 2 2 1 1 1 0 0 # OLD STRIPES: 0 1 2 0 1 2 3 0 1 2 0 1 # NEW STRIPES: 0 1+4 2+8 0+1 1+5 2+9 3+11 0+2 1+6 2+10 0+3 1+7 # = 0 5 10 1 6 11 13 2 7 12 3 8 # # Sorting by this then gives the following banks: # # 3 2 1 0 3 2 1 0 3 2 1 2 # # We'll take the top {beam_size} of these. stripe_offsets = [offset * (len(banks) + 1) for offset in range(len(banks) + 1)] stripes = torch.zeros_like(banks) cur_bank_count = -1 cur_bank = banks[0] for i, bank in enumerate(banks): if bank != cur_bank: cur_bank_count = 0 cur_bank = bank else: cur_bank_count += 1 stripes[i] = num_constraint_tokens - bank + stripe_offsets[cur_bank_count] # STEP 7: Sort by the stripes values sort_values, sort_indices = stripes.sort(dim=0) scores_buf = scores_buf[sort_indices] indices_buf = indices_buf[sort_indices] beams_buf = beams_buf[sort_indices] constraint_states = [constraint_states[i] for i in sort_indices] # STEP 8: Truncate to the candidates size! scores_buf = scores_buf[:self.num_cands] indices_buf = indices_buf[:self.num_cands] beams_buf = beams_buf[:self.num_cands] return scores_buf, indices_buf, beams_buf, constraint_states class LengthConstrainedBeamSearch(Search): def __init__(self, tgt_dict, min_len_a, min_len_b, max_len_a, max_len_b): super().__init__(tgt_dict) self.min_len_a = min_len_a self.min_len_b = min_len_b self.max_len_a = max_len_a self.max_len_b = max_len_b self.beam = BeamSearch(tgt_dict) self.needs_src_lengths = True def step(self, step: int, lprobs, scores): min_lens = self.min_len_a * self.src_lengths + self.min_len_b max_lens = self.max_len_a * self.src_lengths + self.max_len_b lprobs[step < min_lens, :, self.eos] = -math.inf lprobs[step >= max_lens, :, self.eos] = 0 return self.beam.step(step, lprobs, scores) class DiverseBeamSearch(Search): """Diverse Beam Search. See "Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence Models" for details. We only implement the Hamming Diversity penalty here, which performed best in the original paper. """ def __init__(self, tgt_dict, num_groups, diversity_strength): super().__init__(tgt_dict) self.num_groups = num_groups self.diversity_strength = -diversity_strength self.beam = BeamSearch(tgt_dict) @torch.jit.export def step(self, step: int, lprobs, scores): bsz, beam_size, vocab_size = lprobs.size() if beam_size % self.num_groups != 0: raise ValueError( "DiverseBeamSearch requires --beam to be divisible by the number of groups" ) # initialize diversity penalty diversity_buf = torch.zeros(lprobs[:, 0, :].size()).to(lprobs) scores_G, indices_G, beams_G = [], [], [] for g in range(self.num_groups): lprobs_g = lprobs[:, g :: self.num_groups, :] scores_g = scores[:, g :: self.num_groups, :] if step > 0 else None # apply diversity penalty if g > 0: lprobs_g = torch.add( lprobs_g, other=diversity_buf.unsqueeze(1), alpha=self.diversity_strength, ) else: lprobs_g = lprobs_g.contiguous() scores_buf, indices_buf, beams_buf = self.beam.step( step, lprobs_g, scores_g ) beams_buf.mul_(self.num_groups).add_(g) scores_G.append(scores_buf.clone()) indices_G.append(indices_buf.clone()) beams_G.append(beams_buf.clone()) # update diversity penalty diversity_buf.scatter_add_( 1, indices_buf, torch.ones(indices_buf.size()).to(diversity_buf) ) # interleave results from different groups scores_buf = torch.stack(scores_G, dim=2).view(bsz, -1) indices_buf = torch.stack(indices_G, dim=2).view(bsz, -1) beams_buf = torch.stack(beams_G, dim=2).view(bsz, -1) return scores_buf, indices_buf, beams_buf class Sampling(Search): sampling_topk: int sampling_topp: float def __init__(self, tgt_dict, sampling_topk=-1, sampling_topp=-1.0): super().__init__(tgt_dict) self.sampling_topk = sampling_topk self.sampling_topp = sampling_topp def _sample_topp(self, lprobs): """Sample among the smallest set of elements whose cumulative probability mass exceeds p. See `"The Curious Case of Neural Text Degeneration" (Holtzman et al., 2019) <https://arxiv.org/abs/1904.09751>`_. Args: lprobs: (bsz x input_beam_size x vocab_size) the model's log-probabilities over the vocabulary at the current step Return: A tuple of (trimed_probs, truncated_indices) where: trimed_probs: (bsz x input_beam_size x ?) the model's probabilities over the elements selected to sample from. The width of the third dimension is determined by top-P. truncated_indices: (bsz x input_beam_size x ?) the indices of the chosen elements. """ probs = lprobs.exp_() # sort the last dimension (vocab dimension) in descending order sorted_probs, sorted_indices = probs.sort(descending=True) # compute a mask to indicate the words to be included in the top-P set. cumsum_probs = sorted_probs.cumsum(dim=2) mask = cumsum_probs.lt(self.sampling_topp) # note that mask was computed by 'lt'. One more word needs to be included # so that the cumulative probability mass can exceed p. cumsum_mask = mask.cumsum(dim=2) last_included = cumsum_mask[:, :, -1:] last_included.clamp_(0, mask.size()[2] - 1) mask = mask.scatter_(2, last_included, 1) # truncate unnecessary dims. max_dim = last_included.max() truncated_mask = mask[:, :, : max_dim + 1] truncated_probs = sorted_probs[:, :, : max_dim + 1] truncated_indices = sorted_indices[:, :, : max_dim + 1] # trim the words that are not in top-P by setting their probabilities # to 0, so that they would not be sampled later. trim_mask = ~truncated_mask trimed_probs = truncated_probs.masked_fill_(trim_mask, 0) return trimed_probs, truncated_indices @torch.jit.export def step(self, step: int, lprobs, scores): bsz, beam_size, vocab_size = lprobs.size() if step == 0: # at the first step all hypotheses are equally likely, so use # only the first beam lprobs = lprobs[:, ::beam_size, :].contiguous() if self.sampling_topp > 0: # only sample from the smallest set of words whose cumulative probability mass exceeds p probs, top_indices = self._sample_topp(lprobs) elif self.sampling_topk > 0: # only sample from top-k candidates lprobs, top_indices = lprobs.topk(self.sampling_topk) probs = lprobs.exp_() else: probs = lprobs.exp_() # dummy data to be consistent with true branch for type check top_indices = torch.empty(0).to(probs) # sample if step == 0: indices_buf = torch.multinomial( probs.view(bsz, -1), beam_size, replacement=True, ).view(bsz, beam_size) else: indices_buf = torch.multinomial( probs.view(bsz * beam_size, -1), 1, replacement=True, ).view(bsz, beam_size) if step == 0: # expand to beam size probs = probs.expand(bsz, beam_size, -1) # gather scores scores_buf = torch.gather( probs, dim=2, index=indices_buf.unsqueeze(-1) ) scores_buf = scores_buf.log_().view(bsz, -1) # remap indices if using top-k or top-P sampling if self.sampling_topk > 0 or self.sampling_topp > 0: indices_buf = torch.gather( top_indices.expand(bsz, beam_size, -1), dim=2, index=indices_buf.unsqueeze(-1), ).squeeze(2) if step == 0: beams_buf = indices_buf.new_zeros(bsz, beam_size) else: beams_buf = torch.arange(0, beam_size).to(indices_buf).repeat(bsz, 1) # make scores cumulative scores_buf.add_( torch.gather(scores[:, :, step - 1], dim=1, index=beams_buf) ) return scores_buf, indices_buf, beams_buf class DiverseSiblingsSearch(Search): """ Beam search with diverse siblings. See "A Simple, Fast Diverse Decoding Algorithm for Neural Generation" for details. https://arxiv.org/abs/1611.08562 1/ Calculate hypotheses for each beam 2/ Intra-sibling ordering 3/ Rewrite scores 4/ Choose top K hypotheses if diversity_rate == 0 is equivalent to BeamSearch """ def __init__(self, tgt_dict, diversity_rate): super().__init__(tgt_dict) self.diversity_rate = diversity_rate self.beam = BeamSearch(tgt_dict) def step(self, step: int, lprobs, scores): bsz, beam_size, vocab_size = lprobs.size() k = min( # Take the best 2 x beam_size predictions. We'll choose the first # beam_size of these which don't predict eos to continue with. beam_size * 2, lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad ) s_list: List[Tensor] i_list: List[Tensor] s_list = [torch.empty(0).to(lprobs) for i in range(beam_size)] i_list = [torch.LongTensor().to(device=lprobs.device) for i in range(beam_size)] sibling_score = torch.arange(1, k + 1).to(lprobs) * self.diversity_rate if step == 0: return self.beam.step(step, lprobs, scores) lprobs.add_(scores[:, :, step - 1].unsqueeze(-1)) # 1/ Calculate hypotheses for each beam for i in range(beam_size): torch.topk(lprobs[:, i, :].view(bsz, -1), k, out=(s_list[i], i_list[i])) i_list[i].fmod_(vocab_size) # 2/ Intra-sibling ordering by default from topk + 3/ Rewrite scores s_list[i].sub_(sibling_score) # 4/ Choose top K hypotheses indices = torch.stack(i_list, dim=1).view(bsz, -1) final_scores = torch.empty(0).to(lprobs) final_indices = torch.LongTensor().to(device=lprobs.device) final_beams = torch.LongTensor().to(device=lprobs.device) (final_scores, final_indices) = torch.topk( torch.stack(s_list, dim=1).view(bsz, -1), k, ) final_beams = final_indices // k for i in range(bsz): final_indices[i] = indices[i][final_indices[i]] return final_scores, final_indices, final_beams
27,939
40.0279
114
py
RegularizedBN
RegularizedBN-main/fairseq/nan_detector.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import torch logger = logging.getLogger(__name__) class NanDetector: """ Detects the first NaN or Inf in forward and/or backward pass and logs, together with the module name """ def __init__(self, model, forward=True, backward=True): self.bhooks = [] self.fhooks = [] self.forward = forward self.backward = backward self.reset() for name, mod in model.named_modules(): mod.__module_name = name self.add_hooks(mod) def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_traceback): self.close() def add_hooks(self, module): if self.forward: self.fhooks.append(module.register_forward_hook(self.fhook_fn)) if self.backward: self.bhooks.append(module.register_backward_hook(self.bhook_fn)) def reset(self): self.has_printed_f = False self.has_printed_b = False def _detect(self, tensor, name, backward): err = None if ( torch.is_floating_point(tensor) # single value tensors (like the loss) will not provide much info and tensor.numel() >= 2 ): with torch.no_grad(): if torch.isnan(tensor).any(): err = "NaN" elif torch.isinf(tensor).any(): err = "Inf" if err is not None: err = f"{err} detected in output of {name}, shape: {tensor.shape}, {'backward' if backward else 'forward'}" return err def _apply(self, module, inp, x, backward): if torch.is_tensor(x): if isinstance(inp, tuple) and len(inp) > 0: inp = inp[0] err = self._detect(x, module.__module_name, backward) if err is not None: if torch.is_tensor(inp) and not backward: err += ( f" input max: {inp.max().item()}, input min: {inp.min().item()}" ) has_printed_attr = 'has_printed_b' if backward else 'has_printed_f' logger.warning(err) setattr(self, has_printed_attr, True) elif isinstance(x, dict): for v in x.values(): self._apply(module, inp, v, backward) elif isinstance(x, list) or isinstance(x, tuple): for v in x: self._apply(module, inp, v, backward) def fhook_fn(self, module, inp, output): if not self.has_printed_f: self._apply(module, inp, output, backward=False) def bhook_fn(self, module, inp, output): if not self.has_printed_b: self._apply(module, inp, output, backward=True) def close(self): for hook in self.fhooks + self.bhooks: hook.remove()
3,041
32.065217
119
py
RegularizedBN
RegularizedBN-main/fairseq/iterative_refinement_generator.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections import namedtuple import torch import numpy as np from fairseq import utils DecoderOut = namedtuple('IterativeRefinementDecoderOut', [ 'output_tokens', 'output_scores', 'attn', 'step', 'max_step', 'history' ]) class IterativeRefinementGenerator(object): def __init__( self, tgt_dict, models=None, eos_penalty=0.0, max_iter=10, max_ratio=2, beam_size=1, decoding_format=None, retain_dropout=False, adaptive=True, retain_history=False, reranking=False, ): """ Generates translations based on iterative refinement. Args: tgt_dict: target dictionary eos_penalty: if > 0.0, it penalized early-stopping in decoding max_iter: maximum number of refinement iterations max_ratio: generate sequences of maximum length ax, where x is the source length decoding_format: decoding mode in {'unigram', 'ensemble', 'vote', 'dp', 'bs'} retain_dropout: retaining dropout in the inference adaptive: decoding with early stop """ self.bos = tgt_dict.bos() self.pad = tgt_dict.pad() self.unk = tgt_dict.unk() self.eos = tgt_dict.eos() self.vocab_size = len(tgt_dict) self.eos_penalty = eos_penalty self.max_iter = max_iter self.max_ratio = max_ratio self.beam_size = beam_size self.reranking = reranking self.decoding_format = decoding_format self.retain_dropout = retain_dropout self.retain_history = retain_history self.adaptive = adaptive self.models = models def generate_batched_itr( self, data_itr, maxlen_a=None, maxlen_b=None, cuda=False, timer=None, prefix_size=0, ): """Iterate over a batched dataset and yield individual translations. Args: maxlen_a/b: generate sequences of maximum length ax + b, where x is the source sentence length. cuda: use GPU for generation timer: StopwatchMeter for timing generations. """ for sample in data_itr: if "net_input" not in sample: continue if timer is not None: timer.start() with torch.no_grad(): hypos = self.generate( self.models, sample, prefix_tokens=sample["target"][:, :prefix_size] if prefix_size > 0 else None, ) if timer is not None: timer.stop(sample["ntokens"]) for i, id in enumerate(sample["id"]): # remove padding src = utils.strip_pad(sample["net_input"]["src_tokens"][i, :], self.pad) ref = utils.strip_pad(sample["target"][i, :], self.pad) yield id, src, ref, hypos[i] @torch.no_grad() def generate(self, models, sample, prefix_tokens=None, constraints=None): if constraints is not None: raise NotImplementedError("Constrained decoding with the IterativeRefinementGenerator is not supported") # TODO: iterative refinement generator does not support ensemble for now. if not self.retain_dropout: for model in models: model.eval() model, reranker = models[0], None if self.reranking: assert len(models) > 1, "Assuming the last checkpoint is the reranker" assert self.beam_size > 1, "Reranking requires multiple translation for each example" reranker = models[-1] models = models[:-1] if len(models) > 1 and hasattr(model, 'enable_ensemble'): assert model.allow_ensemble, "{} does not support ensembling".format(model.__class__.__name__) model.enable_ensemble(models) # TODO: better encoder inputs? src_tokens = sample["net_input"]["src_tokens"] src_lengths = sample["net_input"]["src_lengths"] bsz, src_len = src_tokens.size() # initialize encoder_out = model.forward_encoder([src_tokens, src_lengths]) prev_decoder_out = model.initialize_output_tokens(encoder_out, src_tokens) if self.beam_size > 1: assert model.allow_length_beam, \ "{} does not support decoding with length beam.".format(model.__class__.__name__) # regenerate data based on length-beam length_beam_order = utils.new_arange(src_tokens, self.beam_size, bsz).t().reshape(-1) encoder_out = model.encoder.reorder_encoder_out(encoder_out, length_beam_order) prev_decoder_out = model.regenerate_length_beam(prev_decoder_out, self.beam_size) bsz = bsz * self.beam_size sent_idxs = torch.arange(bsz) prev_output_tokens = prev_decoder_out.output_tokens.clone() if self.retain_history: prev_decoder_out = prev_decoder_out._replace(history=[prev_output_tokens]) finalized = [[] for _ in range(bsz)] def is_a_loop(x, y, s, a): b, l_x, l_y = x.size(0), x.size(1), y.size(1) if l_x > l_y: y = torch.cat([y, x.new_zeros(b, l_x - l_y).fill_(self.pad)], 1) s = torch.cat([s, s.new_zeros(b, l_x - l_y)], 1) if a is not None: a = torch.cat([a, a.new_zeros(b, l_x - l_y, a.size(2))], 1) elif l_x < l_y: x = torch.cat([x, y.new_zeros(b, l_y - l_x).fill_(self.pad)], 1) return (x == y).all(1), y, s, a def finalized_hypos(step, prev_out_token, prev_out_score, prev_out_attn): cutoff = prev_out_token.ne(self.pad) tokens = prev_out_token[cutoff] if prev_out_score is None: scores, score = None, None else: scores = prev_out_score[cutoff] score = scores.mean() if prev_out_attn is None: hypo_attn, alignment = None, None else: hypo_attn = prev_out_attn[cutoff] alignment = hypo_attn.max(dim=1)[1] return { "steps": step, "tokens": tokens, "positional_scores": scores, "score": score, "hypo_attn": hypo_attn, "alignment": alignment, } for step in range(self.max_iter + 1): decoder_options = { "eos_penalty": self.eos_penalty, "max_ratio": self.max_ratio, "decoding_format": self.decoding_format, } prev_decoder_out = prev_decoder_out._replace( step=step, max_step=self.max_iter + 1, ) decoder_out = model.forward_decoder( prev_decoder_out, encoder_out, **decoder_options ) if self.adaptive: # terminate if there is a loop terminated, out_tokens, out_scores, out_attn = is_a_loop( prev_output_tokens, decoder_out.output_tokens, decoder_out.output_scores, decoder_out.attn ) decoder_out = decoder_out._replace( output_tokens=out_tokens, output_scores=out_scores, attn=out_attn, ) else: terminated = decoder_out.output_tokens.new_zeros(decoder_out.output_tokens.size(0)).bool() if step == self.max_iter: # reach last iteration, terminate terminated.fill_(1) # collect finalized sentences finalized_idxs = sent_idxs[terminated] finalized_tokens = decoder_out.output_tokens[terminated] finalized_scores = decoder_out.output_scores[terminated] finalized_attn = ( None if (decoder_out.attn is None or decoder_out.attn.size(0) == 0) else decoder_out.attn[terminated] ) if self.retain_history: finalized_history_tokens = [h[terminated] for h in decoder_out.history] for i in range(finalized_idxs.size(0)): finalized[finalized_idxs[i]] = [ finalized_hypos( step, finalized_tokens[i], finalized_scores[i], None if finalized_attn is None else finalized_attn[i], ) ] if self.retain_history: finalized[finalized_idxs[i]][0]['history'] = [] for j in range(len(finalized_history_tokens)): finalized[finalized_idxs[i]][0]['history'].append( finalized_hypos( step, finalized_history_tokens[j][i], None, None ) ) # check if all terminated if terminated.sum() == terminated.size(0): break # for next step not_terminated = ~terminated prev_decoder_out = decoder_out._replace( output_tokens=decoder_out.output_tokens[not_terminated], output_scores=decoder_out.output_scores[not_terminated], attn=decoder_out.attn[not_terminated] if (decoder_out.attn is not None and decoder_out.attn.size(0) > 0) else None, history=[h[not_terminated] for h in decoder_out.history] if decoder_out.history is not None else None, ) encoder_out = model.encoder.reorder_encoder_out(encoder_out, not_terminated.nonzero(as_tuple=False).squeeze()) sent_idxs = sent_idxs[not_terminated] prev_output_tokens = prev_decoder_out.output_tokens.clone() if self.beam_size > 1: if reranker is not None: finalized = self.rerank( reranker, finalized, [src_tokens, src_lengths], self.beam_size ) # aggregate information from length beam finalized = [ finalized[np.argmax( [finalized[self.beam_size * i + j][0]['score'] for j in range(self.beam_size)] ) + self.beam_size * i] for i in range(len(finalized) // self.beam_size) ] return finalized def rerank(self, reranker, finalized, encoder_input, beam_size): def rebuild_batch(finalized): finalized_tokens = [f[0]['tokens'] for f in finalized] finalized_maxlen = max(f.size(0) for f in finalized_tokens) final_output_tokens = finalized_tokens[0].new_zeros(len(finalized_tokens), finalized_maxlen).fill_(self.pad) for i, f in enumerate(finalized_tokens): final_output_tokens[i, :f.size(0)] = f return final_output_tokens final_output_tokens = rebuild_batch(finalized) final_output_tokens[:, 0] = self.eos # autoregressive model assumes starting with EOS reranker_encoder_out = reranker.encoder(*encoder_input) length_beam_order = utils.new_arange( final_output_tokens, beam_size, reranker_encoder_out.encoder_out.size(1)).t().reshape(-1) reranker_encoder_out = reranker.encoder.reorder_encoder_out(reranker_encoder_out, length_beam_order) reranking_scores = reranker.get_normalized_probs( reranker.decoder(final_output_tokens[:, :-1], reranker_encoder_out), True, None) reranking_scores = reranking_scores.gather(2, final_output_tokens[:, 1:, None]) reranking_masks = final_output_tokens[:, 1:].ne(self.pad) reranking_scores = reranking_scores[:, :, 0].masked_fill_(~reranking_masks, 0).sum(1) reranking_scores = reranking_scores / reranking_masks.sum(1).type_as(reranking_scores) for i in range(len(finalized)): finalized[i][0]['score'] = reranking_scores[i] return finalized
12,517
38.36478
122
py
RegularizedBN
RegularizedBN-main/fairseq/trainer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Train a network across multiple GPUs. """ import contextlib from itertools import chain import logging import sys import time from typing import Any, Dict, List import torch from fairseq import checkpoint_utils, distributed_utils, models, optim, utils from fairseq.file_io import PathManager from fairseq.logging import meters, metrics from fairseq.nan_detector import NanDetector from fairseq.optim import lr_scheduler logger = logging.getLogger(__name__) class Trainer(object): """Main class for data parallel training. This class supports synchronous distributed data parallel training, where multiple workers each have a full model replica and gradients are accumulated across workers before each update. We use :class:`~torch.nn.parallel.DistributedDataParallel` to handle communication of the gradients across workers. """ def __init__(self, args, task, model, criterion, quantizer=None): self.args = args self.task = task # catalog shared parameters shared_params = _catalog_shared_params(model) self.tpu = getattr(args, 'tpu', False) self.cuda = torch.cuda.is_available() and not args.cpu and not self.tpu if self.cuda: self.device = torch.device('cuda') elif self.tpu: self.device = utils.get_tpu_device(args) else: self.device = torch.device('cpu') # copy model and criterion to current device/dtype self._criterion = criterion self._model = model if self.tpu: import torch_xla.core.xla_model as xm self._model = xm.send_cpu_data_to_device(self._model, self.device) if args.fp16: self._criterion = self._criterion.half() self._model = self._model.half() elif args.bf16: self._criterion = self._criterion.to(dtype=torch.bfloat16) self._model = self._model.to(dtype=torch.bfloat16) self._criterion = self._criterion.to(device=self.device) self._model = self._model.to(device=self.device) # check that shared parameters are preserved after device transfer for shared_param in shared_params: ref = _get_module_by_path(self._model, shared_param[0]) for path in shared_param[1:]: logger.info( 'detected shared parameter: {} <- {}'.format(shared_param[0], path) ) _set_module_by_path(self._model, path, ref) self._dummy_batch = "DUMMY" # indicates we don't have a dummy batch at first self._lr_scheduler = None self._num_updates = 0 self._num_xla_compiles = 0 # for TPUs self._optim_history = None self._optimizer = None self._warn_once = set() self._wrapped_criterion = None self._wrapped_model = None #打印一下self.data_parallel_world_size 是不是代表GPU个数 # TODO(myleott): support tpu if self.cuda and self.data_parallel_world_size > 1: self._grad_norm_buf = torch.cuda.DoubleTensor(self.data_parallel_world_size) else: self._grad_norm_buf = None self.quantizer = quantizer if self.quantizer is not None: self.quantizer.set_trainer(self) # get detailed cuda environment if self.cuda: self.cuda_env = utils.CudaEnvironment() if self.data_parallel_world_size > 1: self.cuda_env_arr = distributed_utils.all_gather_list(self.cuda_env) else: self.cuda_env_arr = [self.cuda_env] if self.data_parallel_rank == 0: utils.CudaEnvironment.pretty_print_cuda_env_list(self.cuda_env_arr) else: self.cuda_env = None self.cuda_env_arr = None metrics.log_start_time("wall", priority=790, round=0) self._start_time = time.time() self._previous_training_time = 0 self._cumulative_training_time = None def reinitialize(self): """Reinitialize the Trainer, typically after model params change.""" self._lr_scheduler = None self._optimizer = None self._wrapped_criterion = None self._wrapped_model = None @property def data_parallel_world_size(self): return self.args.distributed_world_size @property def data_parallel_process_group(self): if self.tpu: return ('tpu', None) else: return None @property def data_parallel_rank(self): return self.args.distributed_rank @property def is_data_parallel_master(self): return distributed_utils.is_master(self.args) @property def criterion(self): if self._wrapped_criterion is None: if ( utils.has_parameters(self._criterion) and self.data_parallel_world_size > 1 and not self.args.use_bmuf and not self.tpu ): self._wrapped_criterion = models.DistributedFairseqModel( self.args, self._criterion, process_group=self.data_parallel_process_group ) else: self._wrapped_criterion = self._criterion return self._wrapped_criterion @property def model(self): if self._wrapped_model is None: if ( self.data_parallel_world_size > 1 and not self.args.use_bmuf and not self.tpu ): self._wrapped_model = models.DistributedFairseqModel( self.args, self._model, process_group=self.data_parallel_process_group ) else: self._wrapped_model = self._model return self._wrapped_model @property def optimizer(self): if self._optimizer is None: self._build_optimizer() return self._optimizer @property def lr_scheduler(self): if self._lr_scheduler is None: self._build_optimizer() # this will initialize self._lr_scheduler return self._lr_scheduler def _build_optimizer(self): #print(type(self.model.parameters()),type(self.criterion.parameters())); #<class 'generator'> <class 'generator'> #for name, para in self.model.named_parameters(): # print(para); #与model.parameters()元素一致,这样可以通过name筛选,然后放到一个list里 if 1 or self.args.setting_encoder_lr==0: params = list( filter( lambda p: p.requires_grad, chain(self.model.parameters(), self.criterion.parameters()), ) ) else: #for name, para in self.model.named_parameters(): # print(name) #exit() #包含encoder norm的参数不decay param1 = [] param2 = [] ''' for name, para in self.model.named_parameters(): if para.requires_grad: if name.find('WNScale')<0 and name.find('weight_g')<0: param1.append(para) else: print(name) if self.args.cwn_adjust or self.args.wn_adjust: print("adjust") param2.append(para) ''' for name, para in self.model.named_parameters(): if para.requires_grad: if name.find('decoder')>=0: param1.append(para) else: print(name) param2.append(para) p1 = {'params':param1,'lr':0.0015} p2 = {'params':param2} params = [p1,p2] #print('before') #for x in self.criterion.parameters(): #空的 # print(x) #print('after') #print(params[-1]); if self.args.fp16 or self.args.bf16: #false if self.cuda and torch.cuda.get_device_capability(0)[0] < 7: logger.info( "NOTE: your device does NOT support faster training with --fp16, " "please switch to FP32 which is likely to be faster" ) if self.args.memory_efficient_fp16 or self.args.memory_efficient_bf16: self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer( self.args, params ) else: self._optimizer = optim.FP16Optimizer.build_optimizer(self.args, params) else: if self.cuda and torch.cuda.get_device_capability(0)[0] >= 7: #false logger.info("NOTE: your device may support faster training with --fp16") self._optimizer = optim.build_optimizer(self.args, params) #here! if self.args.use_bmuf: #false self._optimizer = optim.FairseqBMUF(self.args, self._optimizer) # We should initialize the learning rate scheduler immediately after # building the optimizer, so that the initial learning rate is set. self._lr_scheduler = lr_scheduler.build_lr_scheduler(self.args, self.optimizer) self._lr_scheduler.step_update(0) def save_checkpoint(self, filename, extra_state): """Save all training state in a checkpoint file.""" if self.is_data_parallel_master: # only save one checkpoint extra_state["metrics"] = metrics.state_dict() extra_state["previous_training_time"] = self.cumulative_training_time() checkpoint_utils.save_state( filename, self.args, self.get_model().state_dict(), self.get_criterion(), self.optimizer, self.lr_scheduler, self.get_num_updates(), self._optim_history, extra_state, ) def load_checkpoint( self, filename, reset_optimizer=False, reset_lr_scheduler=False, optimizer_overrides=None, reset_meters=False, ): """Load all training state from a checkpoint file.""" extra_state, self._optim_history, last_optim_state = None, [], None bexists = PathManager.isfile(filename) if bexists: state = checkpoint_utils.load_checkpoint_to_cpu(filename) # load model parameters try: self.get_model().load_state_dict( state["model"], strict=True, args=self.args ) if utils.has_parameters(self.get_criterion()): self.get_criterion().load_state_dict( state["criterion"], strict=True ) except Exception: raise Exception( "Cannot load model parameters from checkpoint {}; " "please ensure that the architectures match.".format(filename) ) extra_state = state["extra_state"] self._optim_history = state["optimizer_history"] last_optim_state = state.get("last_optimizer_state", None) if last_optim_state is not None and not reset_optimizer: # rebuild optimizer after loading model, since params may have changed self._build_optimizer() # only reload optimizer and lr_scheduler if they match last_optim = self._optim_history[-1] assert ( last_optim["criterion_name"] == self.get_criterion().__class__.__name__ ), "Criterion does not match; please reset the optimizer (--reset-optimizer)." assert ( last_optim["optimizer_name"] == self.optimizer.__class__.__name__ ), "Optimizer does not match; please reset the optimizer (--reset-optimizer)." if not reset_lr_scheduler: self.lr_scheduler.load_state_dict(last_optim["lr_scheduler_state"]) self.optimizer.load_state_dict(last_optim_state, optimizer_overrides) self.set_num_updates(last_optim["num_updates"]) if extra_state is not None: epoch = extra_state["train_iterator"]["epoch"] logger.info( "loaded checkpoint {} (epoch {} @ {} updates)".format( filename, epoch, self.get_num_updates() ) ) if "previous_training_time" in extra_state: self._previous_training_time = extra_state["previous_training_time"] self._start_time = time.time() self.lr_step(epoch) if "metrics" in extra_state and not reset_meters: metrics.load_state_dict(extra_state["metrics"]) # reset TimeMeters, since their start times don't make sense anymore for meter in metrics.get_meters("default"): if isinstance(meter, meters.TimeMeter): meter.reset() else: logger.info("no existing checkpoint found {}".format(filename)) return extra_state def get_train_iterator( #生成了训练数据迭代器 --train_subset='train'控制 self, epoch, combine=True, load_dataset=True, data_selector=None, shard_batch_itr=True, ): """Return an EpochBatchIterator over the training set for a given epoch.""" #epoch=1,2,3,.... #combine=True #第一个epoch: load_dataset=True, 后续epoch为False #data_selector=None #shard_batch_itr=True if load_dataset: logger.info("loading train data for epoch {}".format(epoch)) self.task.load_dataset( self.args.train_subset, epoch=epoch, combine=combine, data_selector=data_selector, ) return self.task.get_batch_iterator( dataset=self.task.dataset(self.args.train_subset), max_tokens=self.args.max_tokens, max_sentences=self.args.max_sentences, max_positions=utils.resolve_max_positions( self.task.max_positions(), self.model.max_positions(), self.args.max_tokens, ), ignore_invalid_inputs=True, required_batch_size_multiple=self.args.required_batch_size_multiple, seed=self.args.seed, num_shards=self.data_parallel_world_size if shard_batch_itr else 1, shard_id=self.data_parallel_rank if shard_batch_itr else 0, num_workers=self.args.num_workers, epoch=epoch, ) def get_valid_iterator( #生成了验证数据迭代器 --valid_subset='valid'控制 self, subset, ): """Return an EpochBatchIterator over given validation subset for a given epoch.""" return self.task.get_batch_iterator( dataset=self.task.dataset(subset), max_tokens=self.args.max_tokens_valid, max_sentences=self.args.max_sentences_valid, max_positions=utils.resolve_max_positions( self.task.max_positions(), self.model.max_positions(), ), ignore_invalid_inputs=self.args.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=self.args.required_batch_size_multiple, seed=self.args.seed, num_shards=self.data_parallel_world_size, shard_id=self.data_parallel_rank, num_workers=self.args.num_workers, ) def begin_epoch(self, epoch): """Called at the beginning of each epoch.""" logger.info("begin training epoch {}".format(epoch)) if self.quantizer is not None: self.quantizer.begin_epoch(epoch) # task specific setup per epoch self.task.begin_epoch(epoch, self.get_model()) if self.tpu: import torch_xla.core.xla_model as xm xm.rendezvous('begin_epoch') # wait for all workers xm.mark_step() @metrics.aggregate("train") def train_step(self, samples, raise_oom=False): """Do forward, backward and parameter update.""" if self._dummy_batch == "DUMMY": self._dummy_batch = samples[0] self._set_seed() self.model.train() self.criterion.train() self.zero_grad() metrics.log_start_time("train_wall", priority=800, round=0) # forward and backward pass logging_outputs, sample_size, ooms = [], 0, 0 #打印samples长度,看看samples的长度是不是update_freq #print(len(samples)); #是update_freq for i, sample in enumerate(samples): #samples = [sample], 仅迭代一次 sample:tokens=3400 sample = self._prepare_sample(sample) #将数据搬到gpu上 if sample is None: #false # when sample is None, run forward/backward on a dummy batch # and ignore the resulting gradients sample = self._prepare_sample(self._dummy_batch) is_dummy_batch = True else: is_dummy_batch = False def maybe_no_sync(): """ Whenever *samples* contains more than one mini-batch, we want to accumulate gradients locally and only call all-reduce in the last backwards pass. """ if ( self.data_parallel_world_size > 1 and hasattr(self.model, "no_sync") and i < len(samples) - 1 ): return self.model.no_sync() else: return contextlib.ExitStack() # dummy contextmanager try: with maybe_no_sync(): # forward and backward #在这里用task调用了训练函数 #调用了! 包括: 前向传播、反向梯度计算 loss, sample_size_i, logging_output = self.task.train_step( sample=sample, model=self.model, criterion=self.criterion, optimizer=self.optimizer, update_num=self.get_num_updates(), ignore_grad=is_dummy_batch, ) del loss logging_outputs.append(logging_output) sample_size += sample_size_i # emptying the CUDA cache after the first step can # reduce the chance of OOM if self.cuda and self.get_num_updates() == 0: torch.cuda.empty_cache() except RuntimeError as e: if "out of memory" in str(e): self._log_oom(e) if raise_oom: raise e logger.warning( "attempting to recover from OOM in forward/backward pass" ) ooms += 1 self.zero_grad() if self.cuda: torch.cuda.empty_cache() if self.args.distributed_world_size == 1: return None else: raise e if self.tpu and i < len(samples) - 1: #false # tpu-comment: every XLA operation before marking step is # appended to the IR graph, and processing too many batches # before marking step can lead to OOM errors. # To handle gradient accumulation use case, we explicitly # mark step here for every forward pass without a backward pass import torch_xla.core.xla_model as xm xm.mark_step() if is_dummy_batch: #false if torch.is_tensor(sample_size): sample_size.zero_() else: sample_size *= 0. if torch.is_tensor(sample_size): sample_size = sample_size.float() else: sample_size = float(sample_size) # gather logging outputs from all replicas #print(self._sync_stats());exit() if self._sync_stats(): #false 应该是多卡数据并行才需要 train_time = self._local_cumulative_training_time() logging_outputs, (sample_size, ooms, total_train_time) = self._aggregate_logging_outputs( logging_outputs, sample_size, ooms, train_time, ignore=is_dummy_batch, ) self._cumulative_training_time = total_train_time / self.data_parallel_world_size if hasattr(self.model, 'all_reduce'): self.model.all_reduce() overflow = False try: if self.tpu and self.data_parallel_world_size > 1: #false import torch_xla.core.xla_model as xm gradients = xm._fetch_gradients(self.optimizer.optimizer) xm.all_reduce('sum', gradients, scale=1.0 / self.data_parallel_world_size) with torch.autograd.profiler.record_function("multiply-grads"): # multiply gradients by (# GPUs / sample_size) since DDP # already normalizes by the number of GPUs. Thus we get # (sum_of_gradients / sample_size). if not self.args.use_bmuf: #true self.optimizer.multiply_grads(self.data_parallel_world_size / sample_size) elif sample_size > 0: # BMUF needs to check sample size num = self.data_parallel_world_size if self._sync_stats() else 1 self.optimizer.multiply_grads(num / sample_size) with torch.autograd.profiler.record_function("clip-grads"): # clip grads # 进入执行, 对反向传播计算好的grad进行clip操作 grad_norm = self.clip_grad_norm(self.args.clip_norm) # check that grad norms are consistent across workers if ( not self.args.use_bmuf and self.args.distributed_wrapper != 'SlowMo' and not self.tpu ): self._check_grad_norms(grad_norm) #执行 with torch.autograd.profiler.record_function("optimizer"): # take an optimization step self.optimizer.step() #对参数进行梯度更新 except FloatingPointError: # re-run the forward and backward pass with hooks attached to print # out where it fails with NanDetector(self.model): self.task.train_step( sample, self.model, self.criterion, self.optimizer, self.get_num_updates(), ignore_grad=False ) raise except OverflowError as e: overflow = True logger.info("NOTE: overflow detected, " + str(e)) grad_norm = torch.tensor(0.).cuda() self.zero_grad() except RuntimeError as e: if "out of memory" in str(e): self._log_oom(e) logger.error("OOM during optimization, irrecoverable") raise e # Some distributed wrappers (e.g., SlowMo) need access to the optimizer after the step if hasattr(self.model, 'perform_additional_optimizer_actions'): #false if hasattr(self.optimizer, 'fp32_params'): self.model.perform_additional_optimizer_actions(self.optimizer.optimizer, self.optimizer.fp32_params) else: self.model.perform_additional_optimizer_actions(self.optimizer.optimizer) if not overflow or self.args.distributed_wrapper == 'SlowMo': #true self.set_num_updates(self.get_num_updates() + 1) if self.tpu: #false # mark step on TPUs import torch_xla.core.xla_model as xm xm.mark_step() # only log stats every log_interval steps # this causes wps to be misreported when log_interval > 1 logging_output = {} if self.get_num_updates() % self.args.log_interval == 0: logging_output = self._reduce_and_log_stats( logging_outputs, sample_size, grad_norm, ) # log whenever there's an XLA compilation, since these # slow down training and may indicate opportunities for # optimization self._check_xla_compilation() else: #执行 # log stats logging_output = self._reduce_and_log_stats( logging_outputs, sample_size, grad_norm, ) # clear CUDA cache to reduce memory fragmentation if ( self.cuda and self.args.empty_cache_freq > 0 and ( (self.get_num_updates() + self.args.empty_cache_freq - 1) % self.args.empty_cache_freq ) == 0 ): torch.cuda.empty_cache() if self.args.fp16: #false metrics.log_scalar( "loss_scale", self.optimizer.scaler.loss_scale, priority=700, round=4, weight=0, ) metrics.log_stop_time("train_wall") return logging_output @metrics.aggregate("valid") def valid_step(self, sample, raise_oom=False): """Do forward pass in evaluation mode.""" if self._dummy_batch == "DUMMY": self._dummy_batch = sample if self.tpu: import torch_xla.core.xla_model as xm xm.rendezvous('valid_step') # wait for all workers xm.mark_step() with torch.no_grad(): self.model.eval() self.criterion.eval() sample = self._prepare_sample(sample) if sample is None: sample = self._prepare_sample(self._dummy_batch) is_dummy_batch = True else: is_dummy_batch = False try: _loss, sample_size, logging_output = self.task.valid_step( sample, self.model, self.criterion ) except RuntimeError as e: if "out of memory" in str(e): self._log_oom(e) if not raise_oom: logger.warning( "ran out of memory in validation step, retrying batch" ) for p in self.model.parameters(): if p.grad is not None: p.grad = None # free some memory if self.cuda: torch.cuda.empty_cache() return self.valid_step(sample, raise_oom=True) raise e logging_outputs = [logging_output] if is_dummy_batch: if torch.is_tensor(sample_size): sample_size.zero_() else: sample_size *= 0. # gather logging outputs from all replicas if self.data_parallel_world_size > 1: logging_outputs, (sample_size, ) = self._aggregate_logging_outputs( logging_outputs, sample_size, ignore=is_dummy_batch, ) # log validation stats logging_output = self._reduce_and_log_stats(logging_outputs, sample_size) return logging_output def zero_grad(self): self.optimizer.zero_grad() def lr_step(self, epoch, val_loss=None): """Adjust the learning rate at the end of the epoch.""" self.lr_scheduler.step(epoch, val_loss) # prefer updating the LR based on the number of steps return self.lr_step_update() def lr_step_update(self): """Update the learning rate after each update.""" new_lr = self.lr_scheduler.step_update(self.get_num_updates()) metrics.log_scalar("lr", new_lr, weight=0, priority=300) return new_lr def get_lr(self): """Get the current learning rate.""" return self.optimizer.get_lr() def get_model(self): """Get the (non-wrapped) model instance.""" return self._model def get_criterion(self): """Get the (non-wrapped) criterion instance.""" return self._criterion def get_meter(self, name): """[deprecated] Get a specific meter by name.""" from fairseq import meters if 'get_meter' not in self._warn_once: self._warn_once.add('get_meter') utils.deprecation_warning( 'Trainer.get_meter is deprecated. Please use fairseq.metrics instead.' ) train_meters = metrics.get_meters("train") if train_meters is None: train_meters = {} if name == "train_loss" and "loss" in train_meters: return train_meters["loss"] elif name == "train_nll_loss": # support for legacy train.py, which assumed this meter is # always initialized m = train_meters.get("nll_loss", None) return m or meters.AverageMeter() elif name == "wall": # support for legacy train.py, which assumed this meter is # always initialized m = metrics.get_meter("default", "wall") return m or meters.TimeMeter() elif name == "wps": m = metrics.get_meter("train", "wps") return m or meters.TimeMeter() elif name in {"valid_loss", "valid_nll_loss"}: # support for legacy train.py, which assumed these meters # are always initialized k = name[len("valid_"):] m = metrics.get_meter("valid", k) return m or meters.AverageMeter() elif name == "oom": return meters.AverageMeter() elif name in train_meters: return train_meters[name] return None def get_num_updates(self): """Get the number of parameters updates.""" return self._num_updates def set_num_updates(self, num_updates): """Set the number of parameters updates.""" self._num_updates = num_updates self.lr_step_update() if self.quantizer: self.quantizer.step_update(self._num_updates) metrics.log_scalar("num_updates", self._num_updates, weight=0, priority=200) def clip_grad_norm(self, clip_norm): return self.optimizer.clip_grad_norm(clip_norm, aggregate_norm_fn=None) def cumulative_training_time(self): if self._cumulative_training_time is None: # single GPU return self._local_cumulative_training_time() else: return self._cumulative_training_time def _local_cumulative_training_time(self): """Aggregate training time in seconds.""" return time.time() - self._start_time + self._previous_training_time def _prepare_sample(self, sample): if sample == "DUMMY": raise Exception( "Trying to use an uninitialized 'dummy' batch. This usually indicates " "that the total number of batches is smaller than the number of " "participating GPUs. Try reducing the batch size or using fewer GPUs." ) if sample is None or len(sample) == 0: return None if self.cuda: sample = utils.move_to_cuda(sample) def apply_half(t): if t.dtype is torch.float32: return t.half() return t def apply_bfloat16(t): if t.dtype is torch.float32: return t.to(dtype=torch.bfloat16) return t if self.args.fp16: sample = utils.apply_to_sample(apply_half, sample) if self.args.bf16: sample = utils.apply_to_sample(apply_bfloat16, sample) return sample def _set_seed(self): # Set seed based on args.seed and the update number so that we get # reproducible results when resuming from checkpoints seed = self.args.seed + self.get_num_updates() utils.set_torch_seed(seed) def _sync_stats(self): # Return True if it's using multiple GPUs and DDP or multiple GPUs with # BMUF and it's a bmuf sync with warmup iterations completed before. if self.data_parallel_world_size == 1: return False elif self.args.use_bmuf: return ( (self.get_num_updates() + 1) % self.args.global_sync_iter == 0 and (self.get_num_updates() + 1) > self.args.warmup_iterations ) else: return True def _log_oom(self, exc): msg = "OOM: Ran out of memory with exception: {}".format(exc) logger.warning(msg) if torch.cuda.is_available() and hasattr(torch.cuda, "memory_summary"): for device_idx in range(torch.cuda.device_count()): logger.warning(torch.cuda.memory_summary(device=device_idx)) sys.stderr.flush() def _aggregate_logging_outputs( self, logging_outputs: List[Dict[str, Any]], *extra_stats_to_sum, ignore=False, ): if self.task.__class__.logging_outputs_can_be_summed(self.get_criterion()): return self._fast_stat_sync_sum( logging_outputs, *extra_stats_to_sum, ignore=ignore ) else: return self._all_gather_list_sync( logging_outputs, *extra_stats_to_sum, ignore=ignore ) def _all_gather_list_sync( self, logging_outputs: List[Dict[str, Any]], *extra_stats_to_sum, ignore=False, ): """ Sync logging outputs across workers. all_gather_list_sync is suitable when logging outputs are complex types. """ if self.tpu: raise NotImplementedError if ignore: logging_outputs = [] results = list(zip( *distributed_utils.all_gather_list( [logging_outputs] + list(extra_stats_to_sum), max_size=getattr(self.args, 'all_gather_list_size', 16384), group=self.data_parallel_process_group, ) )) logging_outputs, extra_stats_to_sum = results[0], results[1:] logging_outputs = list(chain.from_iterable(logging_outputs)) extra_stats_to_sum = [sum(s) for s in extra_stats_to_sum] return logging_outputs, extra_stats_to_sum def _fast_stat_sync_sum( self, logging_outputs: List[Dict[str, Any]], *extra_stats_to_sum, ignore=False, ): """ Sync logging outputs across workers. fast_stat_sync_sum is faster than all_gather_list_sync, but is only suitable when logging outputs are scalars and can be summed. Note that *logging_outputs* cannot contain any nested dicts/lists. """ data = {} for i, stat in enumerate(extra_stats_to_sum): data['extra_stats_' + str(i)] = stat if len(logging_outputs) > 0: log_keys = list(logging_outputs[0].keys()) for k in log_keys: if not ignore: v = sum(log[k] for log in logging_outputs if k in log) else: v = logging_outputs[0][k] v = torch.zeros_like(v) if torch.is_tensor(v) else 0 data['logging_outputs_' + k] = v else: log_keys = None data = distributed_utils.all_reduce_dict( data, device=self.device, group=self.data_parallel_process_group ) extra_stats_to_sum = [ data['extra_stats_' + str(i)] for i in range(len(extra_stats_to_sum)) ] if log_keys is not None: logging_outputs = [{k: data['logging_outputs_' + k] for k in log_keys}] else: logging_outputs = [] return logging_outputs, extra_stats_to_sum def _check_grad_norms(self, grad_norm): """Check that grad norms are consistent across workers.""" if self._grad_norm_buf is not None: self._grad_norm_buf.zero_() self._grad_norm_buf[self.data_parallel_rank] = grad_norm distributed_utils.all_reduce( self._grad_norm_buf, group=self.data_parallel_process_group ) def is_consistent(tensor): max_abs_diff = torch.max(torch.abs(tensor - tensor[0])) return ( not torch.isfinite(tensor).any() or (max_abs_diff / (tensor[0] + 1e-6) < 1e-6).all() ) if not is_consistent(self._grad_norm_buf): pretty_detail = "\n".join( "rank {:3d} = {:.8f}".format(r, n) for r, n in enumerate(self._grad_norm_buf.tolist()) ) error_detail = "grad_norm across the workers:\n{}\n".format(pretty_detail) raise RuntimeError( "Fatal error: gradients are inconsistent between workers. " "Try --ddp-backend=no_c10d. " "Or are you mixing up different generation of GPUs in training?" + "\n" + "-" * 80 + "\n{}\n".format(error_detail) + "-" * 80 ) def _reduce_and_log_stats(self, logging_outputs, sample_size, grad_norm=None): if grad_norm is not None: metrics.log_speed("ups", 1., priority=100, round=2) metrics.log_scalar("gnorm", grad_norm, priority=400, round=3) if self.args.clip_norm > 0: metrics.log_scalar( "clip", torch.where( grad_norm > self.args.clip_norm, grad_norm.new_tensor(100), grad_norm.new_tensor(0), ), priority=500, round=1, ) with metrics.aggregate() as agg: if logging_outputs is not None: self.task.reduce_metrics(logging_outputs, self.get_criterion()) del logging_outputs # extra warning for criterions that don't properly log a loss value if "loss" not in agg: if "loss" not in self._warn_once: self._warn_once.add("loss") logger.warning( "Criterion.reduce_metrics did not log a 'loss' value, " "which may break some functionality" ) metrics.log_scalar("loss", -1) # support legacy interface if self.tpu: logging_output = {} else: logging_output = agg.get_smoothed_values() logging_output["sample_size"] = sample_size for key_to_delete in ["ppl", "wps", "wpb", "bsz"]: if key_to_delete in logging_output: del logging_output[key_to_delete] return logging_output def _check_xla_compilation(self, message=None): import torch_xla.debug.metrics as met compile_stats = met.metric_data("CompileTime") if compile_stats is None: return num_xla_compiles = compile_stats[0] if num_xla_compiles > self._num_xla_compiles: if message is None: message = ( "too many of these can lead to slow training, " "but we expect a few in the beginning" ) logging.info("NOTE: XLA compilation detected; {}".format(message)) self._num_xla_compiles = num_xla_compiles def _catalog_shared_params(module, memo=None, prefix=''): if memo is None: first_call = True memo = {} else: first_call = False for name, param in module._parameters.items(): param_prefix = prefix + ('.' if prefix else '') + name if param not in memo: memo[param] = [] memo[param].append(param_prefix) for name, m in module._modules.items(): if m is None: continue submodule_prefix = prefix + ('.' if prefix else '') + name _catalog_shared_params(m, memo, submodule_prefix) if first_call: return [x for x in memo.values() if len(x) > 1] def _get_module_by_path(module, path): path = path.split('.') for name in path: module = getattr(module, name) return module def _set_module_by_path(module, path, value): path = path.split('.') for name in path[:-1]: module = getattr(module, name) setattr(module, path[-1], value)
41,387
37.608209
117
py
RegularizedBN
RegularizedBN-main/fairseq/modules/transformer_sentence_encoder_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Callable, Optional import torch import torch.nn as nn from fairseq import utils from fairseq.modules import ( LayerNorm, MultiheadAttention, ) from fairseq.modules.quant_noise import quant_noise from fairseq.modules.fairseq_dropout import FairseqDropout class TransformerSentenceEncoderLayer(nn.Module): """ Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained models. """ def __init__( self, embedding_dim: int = 768, ffn_embedding_dim: int = 3072, num_attention_heads: int = 8, dropout: float = 0.1, attention_dropout: float = 0.1, activation_dropout: float = 0.1, activation_fn: str = 'relu', export: bool = False, q_noise: float = 0.0, qn_block_size: int = 8, init_fn: Callable = None, ) -> None: super().__init__() if init_fn is not None: init_fn() # Initialize parameters self.embedding_dim = embedding_dim self.dropout_module = FairseqDropout(dropout, module_name=self.__class__.__name__) self.activation_dropout_module = FairseqDropout(activation_dropout, module_name=self.__class__.__name__) # Initialize blocks self.activation_fn = utils.get_activation_fn(activation_fn) self.self_attn = self.build_self_attention( self.embedding_dim, num_attention_heads, dropout=attention_dropout, self_attention=True, q_noise=q_noise, qn_block_size=qn_block_size, ) # layer norm associated with the self attention layer self.self_attn_layer_norm = LayerNorm(self.embedding_dim, export=export) self.fc1 = self.build_fc1( self.embedding_dim, ffn_embedding_dim, q_noise=q_noise, qn_block_size=qn_block_size, ) self.fc2 = self.build_fc2( ffn_embedding_dim, self.embedding_dim, q_noise=q_noise, qn_block_size=qn_block_size, ) # layer norm associated with the position wise feed-forward NN self.final_layer_norm = LayerNorm(self.embedding_dim, export=export) def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size): return quant_noise( nn.Linear(input_dim, output_dim), q_noise, qn_block_size ) def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size): return quant_noise( nn.Linear(input_dim, output_dim), q_noise, qn_block_size ) def build_self_attention( self, embed_dim, num_attention_heads, dropout, self_attention, q_noise, qn_block_size, ): return MultiheadAttention( embed_dim, num_attention_heads, dropout=dropout, self_attention=True, q_noise=q_noise, qn_block_size=qn_block_size, ) def forward( self, x: torch.Tensor, self_attn_mask: Optional[torch.Tensor] = None, self_attn_padding_mask: Optional[torch.Tensor] = None, ): """ LayerNorm is applied either before or after the self-attention/ffn modules similar to the original Transformer implementation. """ residual = x x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, need_weights=False, attn_mask=self_attn_mask, ) x = self.dropout_module(x) x = residual + x x = self.self_attn_layer_norm(x) residual = x x = self.activation_fn(self.fc1(x)) x = self.activation_dropout_module(x) x = self.fc2(x) x = self.dropout_module(x) x = residual + x x = self.final_layer_norm(x) return x, attn
4,160
28.721429
112
py
RegularizedBN
RegularizedBN-main/fairseq/modules/learned_positional_embedding.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, Optional import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from torch import Tensor class LearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. Padding ids are ignored by either offsetting based on padding_idx or by setting padding_idx to None and ensuring that the appropriate position ids are passed to the forward function. """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int): super().__init__(num_embeddings, embedding_dim, padding_idx) self.onnx_trace = False if self.padding_idx is not None: self.max_positions = self.num_embeddings - self.padding_idx - 1 else: self.max_positions = self.num_embeddings def forward( self, input: Tensor, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, positions: Optional[Tensor] = None, ): """Input is expected to be of size [bsz x seqlen].""" assert (positions is None) or ( self.padding_idx is None ), "If positions is pre-computed then padding_idx should not be set." if positions is None: if incremental_state is not None: # positions is the same for every token when decoding a single step # Without the int() cast, it doesn't work in some cases when exporting to ONNX positions = torch.zeros( (1, 1), device=input.device, dtype=input.dtype ).fill_(int(self.padding_idx + input.size(1))) else: positions = utils.make_positions( input, self.padding_idx, onnx_trace=self.onnx_trace ) return F.embedding( positions, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse, )
2,259
35.451613
94
py
RegularizedBN
RegularizedBN-main/fairseq/modules/sparse_multihead_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch from .multihead_attention import MultiheadAttention class SparseMultiheadAttention(MultiheadAttention): """ Sparse Multi-Headed Attention. "Generating Long Sequences with Sparse Transformers". Implements fixed factorized self attention, where l=stride and c=expressivity. A(1) includes all words in the stride window and A(2) takes a summary of c words from the end of each stride window. If is_bidirectional=False, we do not include any words past the current word, as in the paper. """ def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, stride=32, expressivity=8, is_bidirectional=True): super().__init__( embed_dim, num_heads, kdim, vdim, dropout, bias, add_bias_kv, add_zero_attn, self_attention, encoder_decoder_attention ) self.is_bidirectional = is_bidirectional self.stride = stride self.expressivity = expressivity assert(self.stride > 0 and self.stride >= self.expressivity) # Used for Ai(2) calculations - beginning of [l-c, l] range def compute_checkpoint(self, word_index): if word_index % self.stride == 0 and word_index != 0: checkpoint_index = word_index - self.expressivity else: checkpoint_index = ( math.floor(word_index / self.stride) * self.stride + self.stride - self.expressivity ) return checkpoint_index # Computes Ai(2) def compute_subset_summaries(self, absolute_max): checkpoint_index = self.compute_checkpoint(0) subset_two = set() while checkpoint_index <= absolute_max-1: summary = set(range(checkpoint_index, min( checkpoint_index+self.expressivity+1, absolute_max) )) subset_two = subset_two.union(summary) checkpoint_index = self.compute_checkpoint(checkpoint_index+self.stride) return subset_two # Sparse Transformer Fixed Attention Pattern: https://arxiv.org/pdf/1904.10509.pdf def compute_fixed_attention_subset(self, word_index, tgt_len): # +1s account for range function; [min, max) -> [min, max] if not self.is_bidirectional: absolute_max = word_index + 1 else: absolute_max = tgt_len # Subset 1 - whole window rounded_index = math.floor((word_index + self.stride) / self.stride) * self.stride if word_index % self.stride == 0 and word_index != 0: subset_one = set(range(word_index-self.stride, min(absolute_max, word_index+1))) else: subset_one = set(range(max(0, rounded_index - self.stride), min( absolute_max, rounded_index+1)) ) # Subset 2 - summary per window # If bidirectional, subset 2 is the same for every index subset_two = set() if not self.is_bidirectional: subset_two = self.compute_subset_summaries(absolute_max) return subset_one.union(subset_two) # Compute sparse mask - if bidirectional, can pre-compute and store def buffered_sparse_mask(self, tensor, tgt_len, src_len): assert(tgt_len > self.stride) sparse_mask = torch.empty((tgt_len, src_len)).float().fill_(float('-inf')) # If bidirectional, subset 2 is the same for every index subset_summaries = set() if self.is_bidirectional: subset_summaries = self.compute_subset_summaries(tgt_len) for i in range(tgt_len): fixed_attention_subset = self.compute_fixed_attention_subset(i, tgt_len) fixed_attention_subset = fixed_attention_subset.union(subset_summaries) included_word_indices = torch.LongTensor(list(fixed_attention_subset)) sparse_mask[i].index_fill_(0, included_word_indices, 0) return sparse_mask.type_as(tensor) def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz): sparse_mask = self.buffered_sparse_mask(attn_weights, tgt_len, src_len) sparse_mask = sparse_mask.unsqueeze(0).expand(bsz * self.num_heads, tgt_len, src_len) attn_weights += sparse_mask
4,525
42.104762
100
py
RegularizedBN
RegularizedBN-main/fairseq/modules/statistics_utils.py
import numpy as np import torch from scipy import io #save: save statistics in a file #record: only record def save_residual_proportion(self, x, residual, module): # T,B,C assert module in ['att','ffn'], "wrong module in residual proportion!" if not self.training or not self.record_residual_proportion: return file = self.record_residual_att_file if module=='att' else self.record_residual_ffn_file if self.step%self.save_interval==0: self.x_proportion[file].append(x[:,0,:].norm()) self.residual_proportion[file].append(residual[:,0,:].norm()) self.total_proportion[file].append((x+residual)[:,0,:].norm()) if(self.step%self.save_interval==0): savefile = '{}_{}.mat'.format(file,self.step//self.save_interval) d = {} d['x'] = np.array(self.x_proportion[file]) d['residual'] = np.array(self.residual_proportion[file]) d['total'] = np.array(self.total_proportion[file]) self.x_proportion[file] = [] self.residual_proportion[file] = [] self.total_proportion[file] = [] io.savemat(savefile,d) def get_singular_values(cov, eps=1e-5): #input: covariance matrix C*C #output: singular values in increasing order: C C,_ = cov.shape cov += eps*torch.eye(C).cuda() #for numerical stability s, _ = torch.symeig(cov) return s def record_forward_weight_norm(self): if not self.training or not self.record_weight_norm: return if self.step%self.record_interval==0: w1, w2 = self.fc1.weight.data, self.fc2.weight.data self.w1_norm.append(w1.norm()) self.w2_norm.append(w2.norm()) cov1 = torch.matmul(w1.transpose(0,1),w1) cov2 = torch.matmul(w2,w2.transpose(0,1)) s1 = get_singular_values(cov1) s2 = get_singular_values(cov2) self.w_singular_value[self.record_w1_grad_file].append(s1.reshape([1,-1])) self.w_singular_value[self.record_w2_grad_file].append(s2.reshape([1,-1])) def save_forward_backward_weight_norm(self,savefile): w1_norm = torch.tensor(self.w1_norm).cpu().numpy() w2_norm = torch.tensor(self.w2_norm).cpu().numpy() w1_grad_norm = torch.tensor(self.w_grad_norm[self.record_w1_grad_file]).cpu().numpy() w2_grad_norm = torch.tensor(self.w_grad_norm[self.record_w2_grad_file]).cpu().numpy() w1_singular_value = torch.cat(self.w_singular_value[self.record_w1_grad_file],dim=0).cpu().numpy() w2_singular_value = torch.cat(self.w_singular_value[self.record_w2_grad_file],dim=0).cpu().numpy() d = {} d['w1_norm'] = w1_norm d['w2_norm'] = w2_norm d['w1_grad_norm'] = w1_grad_norm d['w2_grad_norm'] = w2_grad_norm d['w1_singular_value'] = w1_singular_value d['w2_singular_value'] = w2_singular_value file = "{}_{}.mat".format(savefile, self.step//self.save_interval) io.savemat(file,d) self.w1_norm = [] self.w2_norm = [] self.w_grad_norm[self.record_w1_grad_file] = [] self.w_grad_norm[self.record_w2_grad_file] = [] self.w_singular_value[self.record_w1_grad_file] = [] self.w_singular_value[self.record_w2_grad_file] = [] def save_attn_weights(self, attn_weights): #B*T*T(tgt_len, src_len) if not self.training or not self.record_attn_weight: return if self.step%self.save_interval==0: attn_weights = attn_weights[0] x_len = self.mask[0].sum() x_len = x_len.int() attn_weights = attn_weights[:x_len,:x_len] #要注意是left-pad还是right-pad bug! attn_weights = attn_weights.data.cpu().numpy() self.attn_weight_list.append(attn_weights) if(self.step%self.save_interval==0): file = '{}_{}.mat'.format(self.attn_weight_savefile,self.step//self.save_interval) d = {} d['attn_weight'] = np.array(self.attn_weight_list) self.attn_weight_list = [] io.savemat(file,d) def dominated_word(self, x, position, process): #x: T,B,C copyx = x copyx = copyx.transpose(0,1) #B,T,C norms = copyx.norm(dim=-1) #B,T self.d_norm[process][position].append(norms.norm().cpu().numpy().item()) #dominated word v, _ = norms.max(dim=-1,keepdim=True) #B,1 norms = norms/v mean = norms.mean(dim=-1) #B index = torch.argmax(norms,dim=-1,keepdim=True) #B,1 #求argmax还不够,还要满足第一与第二的比值足够大 利用topk函数 word = self.src_tokens.gather(dim=-1,index=index) value2, _ = torch.topk(norms,k=2) #B,2 self.d_dominated_word[process][position].append(word.cpu().numpy().reshape([-1,])) self.d_dominated_index[process][position].append(index.cpu().numpy().reshape([-1,])) self.d_dominated_r[process][position].append(mean.cpu().numpy().reshape([-1,])) self.d_dominated_top2_value[process][position].append(value2.cpu().numpy()) def zero_rate(self, x, position, process): T,B,C = x.shape copyx = x num_tokens = self.mask.sum() num_pads = B*T*C-num_tokens*C copyx = copyx*(self.mask.transpose(0,1)) num_zeros = (copyx==0).sum() num_zero_neurous = (((copyx==0).sum(dim=0))==T).sum() num_zero_words = (((copyx==0).sum(dim=-1))==C).sum() num_zero_words = num_zero_words-(B*T-num_tokens) num_pads = num_pads.type_as(num_zeros) num_zeros = num_zeros-num_pads num_zeros = num_zeros.cpu().numpy().item() num_tokens = num_tokens.cpu().numpy().item() num_zero_neurous = num_zero_neurous.cpu().numpy().item() num_zero_words = num_zero_words.cpu().numpy().item() r = num_zeros/num_tokens/C r_neuron = num_zero_neurous/B/C r_word = num_zero_words/num_tokens self.d_zero_rate[process][position].append([r, r_neuron, r_word]) def norm_distribution(self, x, position, process): #print(copyx.shape) copyx = x.transpose(0,1) #B,T,C items = min(copyx.shape[0],2) for i in range(items): temx = copyx[i] #T,C len_x = self.mask[i].sum() len_x = len_x.int() temx = temx[:len_x] #len_x, C bag = torch.zeros(self.max_len) bag[:len_x] = torch.norm(temx,dim=1) bag[-1] = len_x.float() self.d_norm_distribution[process][position].append(bag.reshape([1,-1])) #一维数组 def save_norm_statistics(self, x, position, process): #T,B,C if len(self.record_parts[process])==0: return if 'norm_distribution' in self.norm_items[process]: norm_distribution(self, x, position, process) if 'dominated_word' in self.norm_items[process]: dominated_word(self, x, position, process) if 'zero_rate' in self.norm_items[process]: zero_rate(self, x, position, process) if(self.step%self.save_interval==0): d = {} if 'norm_distribution' in self.norm_items[process]: save_norm_distribution = torch.cat(self.d_norm_distribution[process][position],dim=0).cpu().numpy() d['norm_distribution'] = save_norm_distribution self.d_norm_distribution[process][position] = [] if 'dominated_word' in self.norm_items[process]: save_dominated_word = np.concatenate(self.d_dominated_word[process][position]) save_dominated_index = np.concatenate(self.d_dominated_index[process][position]) save_dominated_r = np.concatenate(self.d_dominated_r[process][position]) save_dominated_top2_value = np.concatenate(self.d_dominated_top2_value[process][position],axis=0) save_norm = np.array(self.d_norm[process][position]) d['dominated_word'] = save_dominated_word d['dominated_index'] = save_dominated_index d['dominated_r'] = save_dominated_r d['dominated_top2_value'] = save_dominated_top2_value d['norm'] = save_norm self.d_dominated_word[process][position] = [] self.d_dominated_index[process][position] = [] self.d_dominated_r[process][position] = [] self.d_dominated_top2_value[process][position] = [] self.d_norm[process][position] = [] if 'zero_rate' in self.norm_items[process]: r_list = [i[0] for i in self.d_zero_rate[process][position]] r_neuron_list = [i[1] for i in self.d_zero_rate[process][position]] r_word_list = [i[2] for i in self.d_zero_rate[process][position]] save_zero_rate = np.array(r_list) save_zero_neuron_rate = np.array(r_neuron_list) save_zero_word_rate = np.array(r_word_list) d['zero_rate'] = save_zero_rate d['zero_neuron_rate'] = save_zero_neuron_rate d['zero_word_rate'] = save_zero_word_rate self.d_zero_rate[process][position] = [] file = "{}_{}.mat".format(self.d_savefile[process][position],self.step//self.save_interval) io.savemat(file,d) def save_condition_statistics(self, x, position, process): #T,B,C if len(self.condition_items[process])==0: return eps = 1e-5 T, B, C = x.shape x_len = self.mask.sum(dim=(1,2)) #B r = torch.zeros(1).cuda() c_max = torch.zeros(1).cuda() c_20 = torch.zeros(1).cuda() c_50 = torch.zeros(1).cuda() c_80 = torch.zeros(1).cuda() r_total = torch.zeros(1).cuda() intra_average_sim = torch.zeros(1).cuda() inter_average_sim = torch.zeros(1).cuda() total_average_sim = torch.zeros(1).cuda() s = torch.zeros(1).cuda() if 'r' in self.condition_items[process]: rx = x.transpose(0,1) #B,T,C #cov_r = torch.bmm(rx.transpose(1,2),rx) #B,C,C iters = min(rx.shape[0],1) for i in range(iters): feature = rx[i] #T,C cov_r = torch.matmul(feature.transpose(0,1),feature)/x_len[i] cov_r += torch.eye(C).cuda() #for numerical stability try: s, _ = torch.symeig(cov_r) #返回为增序特征值 特征值分解很费时间 except: print("eigenvalue decomposition error when computing stable rank! ") print(cov_r) print(feature[:,1].sum()) print(cov_r.shape) print(x_len[i]) #print(s) exit() s = torch.ones(cov_r.size(1)).cuda() temr = torch.sum(s)/s[-1] #square of singular values r += temr r = r/iters #r = r.cpu().numpy().item() #start_time = time.time() if 'c_max' in self.condition_items[process] or 'r_total' in self.condition_items[process]: yx = x.transpose(0,1) #B,T,C y = yx.reshape(-1,yx.shape[-1]) #(B*T)*C #y = y[index] #去掉pad word cov = torch.matmul(y.transpose(0,1),y)/y.shape[0] #C*C C=512 try: s, _ = torch.symeig(cov) #返回为增序特征值 特征值分解很费时间 except: print("eigenvalue decomposition error!") s = torch.ones(cov.size(1)).cuda() r_total = (torch.sum(s)/s[-1]) #square of singular values c_max = s[-1] c_20 = (c_max/s[len(s)*4//5]) c_50 = (c_max/s[len(s)//2]) c_80 = (c_max/s[len(s)*1//5]) #print("cov time: {}".format(time.time() - start_time)) #start_time = time.time() if 'intra_average_sim' in self.condition_items[process]: sx = x.transpose(0,1) #B,T,C sim_items = B//4 sx = sx[:sim_items] y = sx.reshape(-1,sx.shape[-1]) #(sim_items*T)*C y = y/(y.norm(dim=-1,keepdim=True)+eps) sim = torch.matmul(y, y.transpose(0,1)) #sim_items*T,sim_items*T #print("sim 1 time: {}".format(time.time() - start_time)) #start_time = time.time() intra_sim = 0 items_len = x_len[:sim_items].reshape([1,-1]) for i in range(sim_items): temsim = torch.sum(sim[T*i:T*(i+1),T*i:T*(i+1)]) intra_sim += temsim dim = torch.sum(items_len) inter_sim = torch.sum(sim)-intra_sim intra_sim -= dim total_average_sim = inter_sim+intra_sim intra_items = torch.sum(items_len**2)-torch.sum(items_len) total_items = dim*dim-dim inter_items = total_items-intra_items intra_average_sim = intra_sim/intra_items inter_average_sim = inter_sim/inter_items total_average_sim = total_average_sim/total_items #print("sim 2 time: {}".format(time.time() - start_time)) #start_time = time.time() collect_items = [c_max, c_20, c_50, c_80, r_total, r, intra_average_sim, inter_average_sim, total_average_sim] self.d_condition_number[process][position].append(collect_items) self.d_condition_singular_value[process][position].append(s.reshape([1,-1])) if self.step%self.save_interval==0: d = {} for i, name in enumerate(self.condition_items[process]): d[name] = np.array([b[i].cpu().numpy().item() for b in self.d_condition_number[process][position]]) singular_value = torch.cat(self.d_condition_singular_value[process][position],dim=0).cpu().numpy() d['condition_singular_value'] = singular_value[::10] #取1/10即可 self.d_condition_number[process][position] = [] self.d_condition_singular_value[process][position] = [] file = "{}_{}.mat".format(self.d_condition_savefile[process][position],self.step//self.save_interval) io.savemat(file,d) def probe_hook(self, position, process): def hook_template(x): copyx = x.data.clone() copyx = copyx*self.mask.transpose(0,1) #x can be forward features or backward gradient if 'norm' in self.record_parts[process]: save_norm_statistics(self, copyx, position, process) if 'condition' in self.record_parts[process]: save_condition_statistics(self, copyx, position, process) return hook_template def insert_forward_probe(self, x, position): return if self.record_process['forward'] and position in self.probe_positions["forward"]: cur_probe_hook = probe_hook(self, position, process="forward") cur_probe_hook(x) def insert_backward_probe(self, x, position): return if self.record_process['backward'] and position in self.probe_positions["backward"]: cur_probe_hook = probe_hook(self, position, process="backward") x.register_hook(cur_probe_hook) def insert_probe_unify(self, x, position, process): assert process in ['forward', 'backward'], "wrong process(insert_probe_unify)" if self.record_process[process] and position in self.probe_positions[process]: cur_probe_hook = probe_hook(self, position, process=process) if process=='forward': cur_probe_hook(x) else: x.register_hook(cur_probe_hook) def insert_probe(self, x, position): #includ forward & backward probe if not self.training or self.step%self.record_interval!=0: return insert_probe_unify(self, x, position, process='forward') insert_probe_unify(self, x, position, process='backward')
14,971
42.397101
115
py
RegularizedBN
RegularizedBN-main/fairseq/modules/multihead_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import Dict, Optional, Tuple import torch import torch.nn.functional as F from torch import Tensor, nn from torch.nn import Parameter from fairseq import utils from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.modules.fairseq_dropout import FairseqDropout from fairseq.modules.quant_noise import quant_noise #from fairseq.modules import CWN @with_incremental_state class MultiheadAttention(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ __count = 0 def update_cnt(self): MultiheadAttention.__count += 1 def __init__( self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, q_noise=0.0, qn_block_size=8, penalty=0, normalize_q=False, normalize_k=False, normalize_v=False, g0=10, fix_g0=False, ): super().__init__() self.id = self.__count #对每个attention设定唯一标识 self.update_cnt() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim self.num_heads = num_heads self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.head_dim = embed_dim // num_heads assert ( self.head_dim * num_heads == self.embed_dim ), "embed_dim must be divisible by num_heads" self.scaling = self.head_dim ** -0.5 self.self_attention = self_attention self.encoder_decoder_attention = encoder_decoder_attention self.penalty = penalty self.normalize_q = normalize_q self.normalize_k = normalize_k self.normalize_v = normalize_v self.fix_g0 = fix_g0 self.eps = 1e-5 if self.normalize_q or self.normalize_k: self.g0 = Parameter(torch.Tensor(1)) nn.init.constant_(self.g0, g0) if self.fix_g0: self.g0 = self.g0.cuda().detach() assert not self.self_attention or self.qkv_same_dim, ( "Self-attention requires query, key and " "value to be of the same size" ) self.k_proj = quant_noise(nn.Linear(self.kdim, embed_dim, bias=bias), q_noise, qn_block_size) self.v_proj = quant_noise(nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size) self.q_proj = quant_noise(nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size) #self.k_proj = CWN(self.kdim, embed_dim, NScale = -1, adjustScale=1) #self.v_proj = nn.Linear(self.vdim, embed_dim, bias=bias) #self.q_proj = CWN(embed_dim, embed_dim, NScale = -1, adjustScale=1) self.out_proj = quant_noise(nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size) if add_bias_kv: self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) else: self.bias_k = self.bias_v = None self.add_zero_attn = add_zero_attn self.reset_parameters() #注意在使用CWN时要去掉!!! self.onnx_trace = False self.tpu = False def prepare_for_onnx_export_(self): self.onnx_trace = True def prepare_for_tpu_(self, **kwargs): self.tpu = True def reset_parameters(self): if self.qkv_same_dim: # Empirically observed the convergence to be much better with # the scaled initialization nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2)) #weight与weight_v一样 nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2)) else: nn.init.xavier_uniform_(self.k_proj.weight) nn.init.xavier_uniform_(self.v_proj.weight) nn.init.xavier_uniform_(self.q_proj.weight) nn.init.xavier_uniform_(self.out_proj.weight) if self.out_proj.bias is not None: nn.init.constant_(self.out_proj.bias, 0.) if self.bias_k is not None: nn.init.xavier_normal_(self.bias_k) if self.bias_v is not None: nn.init.xavier_normal_(self.bias_v) def orth_loss(self, attention_score): #感觉计算量太大了 #需要额外的mask,mask住的位置可能为-inf,不能直接乘 #代码不完善 B, T, _ = attention_score.shape cov = torch.bmm(attention_score.transpose(1,2),attention_score) cur_loss = 0 #print("orth loss:",cur_loss) return cur_loss def norm_irrelevant_attention_weight(self,q,k): #q: B, tgt_len, head_dim #k: B, src_len, head_dim q = q/(q.norm(dim=-1,keepdim=True)+self.eps) k = k/(k.norm(dim=-1,keepdim=True)+self.eps) attn_weights = torch.bmm(q,k.transpose(1,2)) return attn_weights def loss(self): loss = 0 assert self.training==True, "wrongly adding attention loss!" if self.self_attention and self.id<6 and self.penalty>0: loss += self.penalty*orth_loss(self.attention_score) return loss def forward( self, query, key: Optional[Tensor], value: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, need_weights: bool = True, static_kv: bool = False, attn_mask: Optional[Tensor] = None, before_softmax: bool = False, need_head_weights: bool = False, ) -> Tuple[Tensor, Optional[Tensor]]: """Input shape: Time x Batch x Channel Args: key_padding_mask (ByteTensor, optional): mask to exclude keys that are pads, of shape `(batch, src_len)`, where padding elements are indicated by 1s. need_weights (bool, optional): return the attention weights, averaged over heads (default: False). attn_mask (ByteTensor, optional): typically used to implement causal attention, where the mask prevents the attention from looking forward in time (default: None). before_softmax (bool, optional): return the raw attention weights and values before the attention softmax. need_head_weights (bool, optional): return the attention weights for each head. Implies *need_weights*. Default: return the average attention weights over all heads. """ if need_head_weights: need_weights = True tgt_len, bsz, embed_dim = query.size() assert embed_dim == self.embed_dim assert list(query.size()) == [tgt_len, bsz, embed_dim] if ( not self.onnx_trace and not self.tpu # don't use PyTorch version on TPUs and incremental_state is None and not static_kv # A workaround for quantization to work. Otherwise JIT compilation # treats bias in linear module as method. and not torch.jit.is_scripting() ): #cross:false static_kv=true assert key is not None and value is not None #print("here");exit() #indeed here! #a = torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)) #print("a") #内部dropout是把weight设为0而不是-inf; 如果是作用在softmax之后的weight则没问题。 ''' return F.multi_head_attention_forward( query, key, value, self.embed_dim, self.num_heads, torch.empty([0]), a, self.bias_k, self.bias_v, self.add_zero_attn, self.dropout_module.p, self.out_proj.weight, self.out_proj.bias, self.training or self.dropout_module.apply_during_inference, key_padding_mask, need_weights, attn_mask, use_separate_proj_weight=True, q_proj_weight=self.q_proj.weight, k_proj_weight=self.k_proj.weight, v_proj_weight=self.v_proj.weight, ) ''' q = self.q_proj(query) k = self.k_proj(key) v = self.v_proj(value) if not self.normalize_q and not self.normalize_k: q *= self.scaling q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1) k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) src_len = k.size(1) if (self.self_attention and self.id<6): if self.normalize_q: q = q/(q.norm(dim=-1,keepdim=True)+self.eps) q *= self.g0 if self.normalize_k: k = k/(k.norm(dim=-1,keepdim=True)+self.eps) if self.normalize_v: v = v/(v.norm(dim=-1,keepdim=True)+self.eps) attn_weights = torch.bmm(q, k.transpose(1, 2)) if attn_mask is not None: #print(attn_mask);print(attn_mask.shape);print(attn_weights.shape);exit() #attn_mask = attn_mask.repeat(attn_weights.size(0),1,1) attn_mask = attn_mask.unsqueeze(0) attn_weights += attn_mask if key_padding_mask is not None: attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf") ) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights_float = utils.softmax( attn_weights, dim=-1, onnx_trace=self.onnx_trace ) attn_weights = attn_weights_float.type_as(attn_weights) self.attention_score = attn_weights #for addtional loss attn_probs = self.dropout_module(attn_weights) attn = torch.bmm(attn_probs, v) attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn = self.out_proj(attn) if need_weights: attn_weights = attn_weights_float.view( bsz, self.num_heads, tgt_len, src_len ).transpose(1, 0) if not need_head_weights: # average attention weights over heads attn_weights = attn_weights.mean(dim=0) return attn, attn_weights #print("here");exit() #here if incremental_state is not None: #cross:false saved_state = self._get_input_buffer(incremental_state) if saved_state is not None and "prev_key" in saved_state: # previous time steps are cached - no need to recompute # key and value if they are static if static_kv: assert self.encoder_decoder_attention and not self.self_attention key = value = None else: saved_state = None if self.self_attention: q = self.q_proj(query) k = self.k_proj(query) v = self.v_proj(query) elif self.encoder_decoder_attention: #cross: true # encoder-decoder attention q = self.q_proj(query) if key is None: assert value is None k = v = None else: k = self.k_proj(key) v = self.v_proj(key) else: assert key is not None and value is not None q = self.q_proj(query) k = self.k_proj(key) v = self.v_proj(value) q *= self.scaling if self.bias_k is not None: #false assert self.bias_v is not None k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)]) v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)]) if attn_mask is not None: attn_mask = torch.cat( [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1 ) if key_padding_mask is not None: key_padding_mask = torch.cat( [ key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1), ], dim=1, ) q = ( q.contiguous() .view(tgt_len, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if k is not None: k = ( k.contiguous() .view(-1, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if v is not None: v = ( v.contiguous() .view(-1, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if saved_state is not None: #cross: false # saved states are stored with shape (bsz, num_heads, seq_len, head_dim) if "prev_key" in saved_state: _prev_key = saved_state["prev_key"] assert _prev_key is not None prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: k = prev_key else: assert k is not None k = torch.cat([prev_key, k], dim=1) if "prev_value" in saved_state: _prev_value = saved_state["prev_value"] assert _prev_value is not None prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: v = prev_value else: assert v is not None v = torch.cat([prev_value, v], dim=1) prev_key_padding_mask: Optional[Tensor] = None if "prev_key_padding_mask" in saved_state: prev_key_padding_mask = saved_state["prev_key_padding_mask"] assert k is not None and v is not None key_padding_mask = MultiheadAttention._append_prev_key_padding_mask( key_padding_mask=key_padding_mask, prev_key_padding_mask=prev_key_padding_mask, batch_size=bsz, src_len=k.size(1), static_kv=static_kv, ) saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim) saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim) saved_state["prev_key_padding_mask"] = key_padding_mask # In this branch incremental_state is never None assert incremental_state is not None incremental_state = self._set_input_buffer(incremental_state, saved_state) assert k is not None src_len = k.size(1) # This is part of a workaround to get around fork/join parallelism # not supporting Optional types. if key_padding_mask is not None and key_padding_mask.dim() == 0: #cross:false key_padding_mask = None if key_padding_mask is not None: #cross:true(unnecessary code) assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == src_len if self.add_zero_attn:#cross:false assert v is not None src_len += 1 k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1) v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1) if attn_mask is not None: attn_mask = torch.cat( [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1 ) if key_padding_mask is not None: key_padding_mask = torch.cat( [ key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as( key_padding_mask ), ], dim=1, ) attn_weights = torch.bmm(q, k.transpose(1, 2)) attn_weights = MultiheadAttention.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz) #nothing done assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len] if attn_mask is not None: #cross: false attn_mask = attn_mask.unsqueeze(0) if self.onnx_trace: attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1) attn_weights += attn_mask if key_padding_mask is not None: #cross: true # don't attend to padding symbols attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) if not self.tpu: #true attn_weights = attn_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf") ) else: attn_weights = attn_weights.transpose(0, 2) attn_weights = attn_weights.masked_fill(key_padding_mask, float('-inf')) attn_weights = attn_weights.transpose(0, 2) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if before_softmax: #false return attn_weights, v attn_weights_float = utils.softmax( attn_weights, dim=-1, onnx_trace=self.onnx_trace ) attn_weights = attn_weights_float.type_as(attn_weights) attn_probs = self.dropout_module(attn_weights) assert v is not None attn = torch.bmm(attn_probs, v) assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim] if self.onnx_trace and attn.size(1) == 1: #cross: false # when ONNX tracing a single decoder step (sequence length == 1) # the transpose is a no-op copy before view, thus unnecessary attn = attn.contiguous().view(tgt_len, bsz, embed_dim) else: attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn = self.out_proj(attn) attn_weights: Optional[Tensor] = None if need_weights: attn_weights = attn_weights_float.view( bsz, self.num_heads, tgt_len, src_len ).transpose(1, 0) if not need_head_weights: # average attention weights over heads attn_weights = attn_weights.mean(dim=0) return attn, attn_weights @staticmethod def _append_prev_key_padding_mask( key_padding_mask: Optional[Tensor], prev_key_padding_mask: Optional[Tensor], batch_size: int, src_len: int, static_kv: bool, ) -> Optional[Tensor]: # saved key padding masks have shape (bsz, seq_len) if prev_key_padding_mask is not None and static_kv: new_key_padding_mask = prev_key_padding_mask elif prev_key_padding_mask is not None and key_padding_mask is not None: new_key_padding_mask = torch.cat( [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1 ) # During incremental decoding, as the padding token enters and # leaves the frame, there will be a time when prev or current # is None elif prev_key_padding_mask is not None: filler = torch.zeros( (batch_size, src_len - prev_key_padding_mask.size(1)), device=prev_key_padding_mask.device, ) new_key_padding_mask = torch.cat( [prev_key_padding_mask.float(), filler.float()], dim=1 ) elif key_padding_mask is not None: filler = torch.zeros( (batch_size, src_len - key_padding_mask.size(1)), device=key_padding_mask.device, ) new_key_padding_mask = torch.cat( [filler.float(), key_padding_mask.float()], dim=1 ) else: new_key_padding_mask = prev_key_padding_mask return new_key_padding_mask @torch.jit.export def reorder_incremental_state( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor ): """Reorder buffered internal state (for incremental generation).""" input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: for k in input_buffer.keys(): input_buffer_k = input_buffer[k] if input_buffer_k is not None: if self.encoder_decoder_attention and input_buffer_k.size(0) == new_order.size(0): break input_buffer[k] = input_buffer_k.index_select(0, new_order) incremental_state = self._set_input_buffer(incremental_state, input_buffer) return incremental_state def _get_input_buffer( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] ) -> Dict[str, Optional[Tensor]]: result = self.get_incremental_state(incremental_state, "attn_state") if result is not None: return result else: empty_result: Dict[str, Optional[Tensor]] = {} return empty_result def _set_input_buffer( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], buffer: Dict[str, Optional[Tensor]], ): return self.set_incremental_state(incremental_state, "attn_state", buffer) def apply_sparse_mask(attn_weights, tgt_len: int, src_len: int, bsz: int): return attn_weights def upgrade_state_dict_named(self, state_dict, name): prefix = name + "." if name != "" else "" items_to_add = {} keys_to_remove = [] for k in state_dict.keys(): if k.endswith(prefix + "in_proj_weight"): # in_proj_weight used to be q + k + v with same dimensions dim = int(state_dict[k].shape[0] / 3) items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim] items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim] items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :] keys_to_remove.append(k) k_bias = prefix + "in_proj_bias" if k_bias in state_dict.keys(): dim = int(state_dict[k].shape[0] / 3) items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim] items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][ dim : 2 * dim ] items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :] keys_to_remove.append(prefix + "in_proj_bias") for k in keys_to_remove: del state_dict[k] for key, value in items_to_add.items(): state_dict[key] = value
23,984
40.070205
110
py
RegularizedBN
RegularizedBN-main/fairseq/modules/transpose_last.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ transpose last 2 dimensions of the input """ import torch.nn as nn class TransposeLast(nn.Module): def __init__(self, deconstruct_idx=None): super().__init__() self.deconstruct_idx = deconstruct_idx def forward(self, x): if self.deconstruct_idx is not None: x = x[self.deconstruct_idx] return x.transpose(-2, -1)
550
25.238095
65
py
RegularizedBN
RegularizedBN-main/fairseq/modules/norm_select.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .norm.mask_layernorm3d import MaskLayerNorm3d from .norm.mask_batchnorm3d import MaskBatchNorm3d from .norm.mask_powernorm3d import MaskPowerNorm3d from .norm.mask_groupnorm import GroupNorm from .norm.mask_groupscale import MaskGroupScale from .norm.mask_identity import MaskIdentityNorm from torch import nn def parse_norm(norm): args = norm.split("_") return args def NormSelect(norm_type, embed_dim, layer_id=-1, prefix='None'): #print(norm_type) args = parse_norm(norm_type) norm_type = args[0] if norm_type == "layer": if len(args)==1: print("nn.layernorm") return nn.LayerNorm(embed_dim) #return MaskLayerNorm3d(embed_dim) elif len(args)==2: return MaskLayerNorm3d(embed_dim, affine=int(args[1])) else: return MaskLayerNorm3d(embed_dim, affine=int(args[1]), square=int(args[2])) elif norm_type == "identity": return MaskIdentityNorm(embed_dim) elif norm_type == "group": #assert len(args)==6, "wrong groupnorm argument!" return GroupNorm(embed_dim, num_groups=int(args[1])) #return GroupNorm(embed_dim, num_groups=int(args[1]), affine=int(args[2]) ,subtract_type=args[3], robust_mean=int(args[4]),robust_std=int(args[5])) elif norm_type == "power": if args[-1]=='select': if args[-2].find(str(layer_id))>=0: return MaskPowerNorm3d(embed_dim, prefix=prefix, penalty_var=float(args[1])) else: return MaskLayerNorm3d(embed_dim, affine=1) if len(args)==1: return MaskPowerNorm3d(embed_dim, prefix=prefix) elif len(args)==2: return MaskPowerNorm3d(embed_dim, prefix=prefix, penalty_var=float(args[1])) elif norm_type == "batch": # return MaskBatchNorm(embed_dim) if len(args)==3: return MaskBatchNorm3d(embed_dim, affine=int(args[1]), with_seq=int(args[2]), prefix=prefix) elif len(args)==4: return MaskBatchNorm3d(embed_dim, penalty_type=args[1], penalty_mean=float(args[2]), penalty_var=float(args[3]), prefix=prefix) else: print("error len BN!") elif norm_type == "groupscale": print("groupscale") return MaskGroupScale(embed_dim, group_num=8) else: print("error NormSelect!") exit() #elif norm_type == 'power': # return MaskPowerNorm(embed_dim, group_num=head_num, warmup_iters=warmup_updates)
2,685
40.323077
155
py
RegularizedBN
RegularizedBN-main/fairseq/modules/same_pad.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from torch import nn class SamePad(nn.Module): def __init__(self, kernel_size): super().__init__() self.remove = kernel_size % 2 == 0 def forward(self, x): if self.remove: x = x[:, :, :-1] return x
432
21.789474
65
py
RegularizedBN
RegularizedBN-main/fairseq/modules/multihead_attention_simple.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import Dict, Optional, Tuple import torch import torch.nn.functional as F from torch import Tensor, nn from torch.nn import Parameter #from fairseq.modules import CWN class RelativeEmbedding(nn.Module): def forward(self, input): """Input is expected to be of size [bsz x seqlen]. """ bsz, seq_len = input.size() max_pos = self.padding_idx + seq_len if max_pos > self.origin_shift: #将embedding个数设多一点,不要出现这种情况 # recompute/expand embeddings if needed weights = self.get_embedding( max_pos*2, self.embedding_dim, self.padding_idx, ) weights = weights.to(self._float_tensor) del self.weights self.origin_shift = weights.size(0)//2 self.register_buffer('weights', weights) positions = torch.arange(-seq_len, seq_len).to(input.device).long() + self.origin_shift # 2*seq_len embed = self.weights.index_select(0, positions.long()).detach() return embed class RelativeSinusoidalPositionalEmbedding(RelativeEmbedding): """This module produces sinusoidal positional embeddings of any length. Padding symbols are ignored. """ def __init__(self, embedding_dim, padding_idx, init_size=1536): """ :param embedding_dim: 每个位置的dimension :param padding_idx: :param init_size: """ super().__init__() self.embedding_dim = embedding_dim self.padding_idx = padding_idx assert init_size%2==0 weights = self.get_embedding( init_size+1, embedding_dim, padding_idx, ) self.register_buffer('weights', weights) self.register_buffer('_float_tensor', torch.FloatTensor(1)) def get_embedding(self, num_embeddings, embedding_dim, padding_idx=None): """Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb) emb = torch.arange(-num_embeddings//2, num_embeddings//2, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) if embedding_dim % 2 == 1: # zero pad emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) if padding_idx is not None: emb[padding_idx, :] = 0 self.origin_shift = num_embeddings//2 + 1 return emb class MultiheadAttentionSimple(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ __count = 0 def update_cnt(self): MultiheadAttentionSimple.__count += 1 def __init__( self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, penalty=0, normalize_q=False, normalize_k=False, normalize_v=False, g0=10, fix_g0=False, ): super().__init__() self.id = self.__count #对每个attention设定唯一标识 self.update_cnt() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim self.num_heads = num_heads self.dropout_module = nn.Dropout(dropout) self.head_dim = embed_dim // num_heads assert ( self.head_dim * num_heads == self.embed_dim ), "embed_dim must be divisible by num_heads" self.scaling = self.head_dim ** -0.5 self.self_attention = True self.penalty = penalty self.normalize_q = normalize_q self.normalize_k = normalize_k self.normalize_v = normalize_v self.fix_g0 = fix_g0 self.eps = 1e-5 if self.normalize_q or self.normalize_k: self.g0 = Parameter(torch.Tensor(1)) nn.init.constant_(self.g0, g0) if self.fix_g0: self.g0 = self.g0.cuda().detach() assert self.qkv_same_dim, ( "Self-attention requires query, key and " "value to be of the same size" ) self.k_proj = nn.Linear(self.kdim, embed_dim, bias=bias) self.v_proj = nn.Linear(self.vdim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) #self.k_proj = CWN(self.kdim, embed_dim, NScale = -1, adjustScale=1) #self.v_proj = nn.Linear(self.vdim, embed_dim, bias=bias) #self.q_proj = CWN(embed_dim, embed_dim, NScale = -1, adjustScale=1) self.out_proj =nn.Linear(embed_dim, embed_dim, bias=bias) if add_bias_kv: self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) else: self.bias_k = self.bias_v = None self.add_zero_attn = add_zero_attn self.reset_parameters() #注意在使用CWN时要去掉!!! self.pos_embed = RelativeSinusoidalPositionalEmbedding(self.head_dim, 0, 1200) self.r_r_bias = nn.Parameter(nn.init.xavier_normal_(torch.zeros(num_heads, self.head_dim))) self.r_w_bias = nn.Parameter(nn.init.xavier_normal_(torch.zeros(num_heads, self.head_dim))) def reset_parameters(self): if self.qkv_same_dim: # Empirically observed the convergence to be much better with # the scaled initialization nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2)) #weight与weight_v一样 nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2)) else: nn.init.xavier_uniform_(self.k_proj.weight) nn.init.xavier_uniform_(self.v_proj.weight) nn.init.xavier_uniform_(self.q_proj.weight) nn.init.xavier_uniform_(self.out_proj.weight) if self.out_proj.bias is not None: nn.init.constant_(self.out_proj.bias, 0.) if self.bias_k is not None: nn.init.xavier_normal_(self.bias_k) if self.bias_v is not None: nn.init.xavier_normal_(self.bias_v) def orth_loss(self, attention_score): #感觉计算量太大了 #需要额外的mask,mask住的位置可能为-inf,不能直接乘 #代码不完善 B, T, _ = attention_score.shape cov = torch.bmm(attention_score.transpose(1,2),attention_score) cur_loss = 0 #print("orth loss:",cur_loss) return cur_loss def norm_irrelevant_attention_weight(self,q,k): #q: B, tgt_len, head_dim #k: B, src_len, head_dim q = q/(q.norm(dim=-1,keepdim=True)+self.eps) k = k/(k.norm(dim=-1,keepdim=True)+self.eps) attn_weights = torch.bmm(q,k.transpose(1,2)) return attn_weights def loss(self): loss = 0 assert self.training==True, "wrongly adding attention loss!" if self.self_attention and self.id<6 and self.penalty>0: loss += self.penalty*self.orth_loss(self.attention_score) return loss def forward( self, query, key: Optional[Tensor], value: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, need_weights: bool = True, static_kv: bool = False, attn_mask: Optional[Tensor] = None, before_softmax: bool = False, need_head_weights: bool = False, ) -> Tuple[Tensor, Optional[Tensor]]: """Input shape: Time x Batch x Channel Args: key_padding_mask (ByteTensor, optional): mask to exclude keys that are pads, of shape `(batch, src_len)`, where padding elements are indicated by 1s. need_weights (bool, optional): return the attention weights, averaged over heads (default: False). attn_mask (ByteTensor, optional): typically used to implement causal attention, where the mask prevents the attention from looking forward in time (default: None). before_softmax (bool, optional): return the raw attention weights and values before the attention softmax. need_head_weights (bool, optional): return the attention weights for each head. Implies *need_weights*. Default: return the average attention weights over all heads. """ if need_head_weights: need_weights = True tgt_len, bsz, embed_dim = query.size() assert embed_dim == self.embed_dim assert list(query.size()) == [tgt_len, bsz, embed_dim] assert key is not None and value is not None q = self.q_proj(query) k = self.k_proj(key) v = self.v_proj(value) if not self.normalize_q and not self.normalize_k: q *= self.scaling q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1) #b*n, l,d k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) #b*n, l,d v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) #b*n, l,d src_len = k.size(1) if self.id<6: if self.normalize_q: q = q/(q.norm(dim=-1,keepdim=True)+self.eps) q *= self.g0 if self.normalize_k: k = k/(k.norm(dim=-1,keepdim=True)+self.eps) if self.normalize_v: v = v/(v.norm(dim=-1,keepdim=True)+self.eps) attn_weights = torch.bmm(q, k.transpose(1, 2)) pos_embed = self.pos_embed(key_padding_mask) # l x head_dim D_ = torch.einsum('nd,ld->nl', self.r_w_bias, pos_embed)[None, :, None] # head x 2max_len, 每个head对位置的bias B_ = torch.einsum('bnqd,ld->bnql', q.view(bsz, self.num_heads, tgt_len, self.head_dim), pos_embed) # bsz x head x max_len x 2max_len,每个query对每个shift的偏移 E_ = torch.einsum('bnqd,ld->bnql', k.view(bsz, self.num_heads, tgt_len, self.head_dim), pos_embed) # bsz x head x max_len x 2max_len, key对relative的bias BD = B_ + D_ # bsz x head x max_len x 2max_len, 要转换为bsz x head x max_len x max_len BDE = self._shift(BD) + self._transpose_shift(E_) BDE = BDE.contiguous().view(bsz * self.num_heads, tgt_len, tgt_len) attn_weights += BDE*self.scaling if attn_mask is not None: #print(attn_mask);print(attn_mask.shape);print(attn_weights.shape);exit() #attn_mask = attn_mask.repeat(attn_weights.size(0),1,1) attn_mask = attn_mask.unsqueeze(0) attn_weights += attn_mask if key_padding_mask is not None: attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf") ) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights_float = F.softmax(attn_weights, dim=-1, dtype=torch.float32) attn_weights = attn_weights_float.type_as(attn_weights) self.attention_score = attn_weights #for addtional loss attn_probs = self.dropout_module(attn_weights) attn = torch.bmm(attn_probs, v) attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn = self.out_proj(attn) if need_weights: attn_weights = attn_weights_float.view( bsz, self.num_heads, tgt_len, src_len ).transpose(1, 0) if not need_head_weights: # average attention weights over heads attn_weights = attn_weights.mean(dim=0) return attn, attn_weights def _shift(self, BD): """ 类似 -3 -2 -1 0 1 2 -3 -2 -1 0 1 2 -3 -2 -1 0 1 2 转换为 0 1 2 -1 0 1 -2 -1 0 :param BD: batch_size x n_head x max_len x 2max_len :return: batch_size x n_head x max_len x max_len """ bsz, n_head, max_len, _ = BD.size() zero_pad = BD.new_zeros(bsz, n_head, max_len, 1) BD = torch.cat([BD, zero_pad], dim=-1).view(bsz, n_head, -1, max_len) # bsz x n_head x (2max_len+1) x max_len BD = BD[:, :, :-1].view(bsz, n_head, max_len, -1) # bsz x n_head x 2max_len x max_len BD = BD[:, :, :, max_len:] return BD def _transpose_shift(self, E): """ 类似 -3 -2 -1 0 1 2 -30 -20 -10 00 10 20 -300 -200 -100 000 100 200 转换为 0 -10 -200 1 00 -100 2 10 000 :param E: batch_size x n_head x max_len x 2max_len :return: batch_size x n_head x max_len x max_len """ bsz, n_head, max_len, _ = E.size() zero_pad = E.new_zeros(bsz, n_head, max_len, 1) # bsz x n_head x -1 x (max_len+1) E = torch.cat([E, zero_pad], dim=-1).view(bsz, n_head, -1, max_len) indice = (torch.arange(max_len)*2+1).to(E.device) E = E.index_select(index=indice, dim=-2).transpose(-1,-2) # bsz x n_head x max_len x max_len return E
13,929
37.480663
161
py
RegularizedBN
RegularizedBN-main/fairseq/modules/linearized_convolution.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn.functional as F from fairseq import utils from .conv_tbc import ConvTBC from fairseq.incremental_decoding_utils import with_incremental_state @with_incremental_state class LinearizedConvolution(ConvTBC): """An optimized version of nn.Conv1d. At training time, this module uses ConvTBC, which is an optimized version of Conv1d. At inference time, it optimizes incremental generation (i.e., one time step at a time) by replacing the convolutions with linear layers. Note that the input order changes from training to inference. """ def __init__(self, in_channels, out_channels, kernel_size, **kwargs): super().__init__(in_channels, out_channels, kernel_size, **kwargs) self._linearized_weight = None self.register_backward_hook(self._clear_linearized_weight) def state_dict(self, destination=None, prefix='', keep_vars=False): state = ConvTBC.state_dict(self, destination, prefix, keep_vars=keep_vars) # don't store redundant _linearized_weight in checkpoints if prefix + '_linearized_weight' in state: del state[prefix + '_linearized_weight'] return state def upgrade_state_dict_named(self, state_dict, name): prefix = name + '.' if name != '' else '' if prefix + '_linearized_weight' in state_dict: del state_dict[prefix + '_linearized_weight'] def forward(self, input, incremental_state=None): """ Args: incremental_state: Used to buffer signal; if not None, then input is expected to contain a single frame. If the input order changes between time steps, call reorder_incremental_state. Input: Time x Batch x Channel during training Batch x Time x Channel during inference """ if incremental_state is None: output = super().forward(input) if self.kernel_size[0] > 1 and self.padding[0] > 0: # remove future timesteps added by padding output = output[:-self.padding[0], :, :] return output # reshape weight weight = self._get_linearized_weight() kw = self.kernel_size[0] bsz = input.size(0) # input: bsz x len x dim if kw > 1: input = input.data input_buffer = self._get_input_buffer(incremental_state) if input_buffer is None: input_buffer = input.new(bsz, kw, input.size(2)).zero_() self._set_input_buffer(incremental_state, input_buffer) else: # shift buffer input_buffer[:, :-1, :] = input_buffer[:, 1:, :].clone() # append next input input_buffer[:, -1, :] = input[:, -1, :] input = input_buffer with torch.no_grad(): output = F.linear(input.view(bsz, -1), weight, self.bias) return output.view(bsz, 1, -1) def reorder_incremental_state(self, incremental_state, new_order): input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: input_buffer = input_buffer.index_select(0, new_order) self._set_input_buffer(incremental_state, input_buffer) def _get_input_buffer(self, incremental_state): return utils.get_incremental_state(self, incremental_state, 'input_buffer') def _set_input_buffer(self, incremental_state, new_buffer): return utils.set_incremental_state(self, incremental_state, 'input_buffer', new_buffer) def _get_linearized_weight(self): if self._linearized_weight is None: kw = self.kernel_size[0] weight = self.weight.transpose(2, 1).transpose(1, 0).contiguous() assert weight.size() == (self.out_channels, kw, self.in_channels) self._linearized_weight = torch.nn.Parameter(weight.view(self.out_channels, -1)) return self._linearized_weight def _clear_linearized_weight(self, *args): self._linearized_weight = None
4,261
41.19802
95
py
RegularizedBN
RegularizedBN-main/fairseq/modules/downsampled_multihead_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import math import torch import torch.nn as nn import torch.nn.functional as F from fairseq.modules.scalar_bias import scalar_bias from fairseq.modules.fairseq_dropout import FairseqDropout class SingleHeadAttention(nn.Module): """ Single-head attention that supports Gating and Downsampling """ def __init__( self, out_channels, embed_dim, head_dim, head_index, dropout=0., bias=True, project_input=True, gated=False, downsample=False, num_heads=1, ): super().__init__() self.embed_dim = embed_dim self.dropout_module = FairseqDropout(dropout, module_name=self.__class__.__name__) self.head_index = head_index self.head_dim = head_dim self.project_input = project_input self.gated = gated self.downsample = downsample self.num_heads = num_heads self.projection = None k_layers = [] v_layers = [] if self.downsample: k_layers.append(Downsample(self.head_index)) v_layers.append(Downsample(self.head_index)) out_proj_size = self.head_dim else: out_proj_size = self.head_dim * self.num_heads if self.gated: k_layers.append(GatedLinear(self.embed_dim, out_proj_size, bias=bias)) self.in_proj_q = GatedLinear(self.embed_dim, out_proj_size, bias=bias) v_layers.append(GatedLinear(self.embed_dim, out_proj_size, bias=bias)) else: k_layers.append(Linear(self.embed_dim, out_proj_size, bias=bias)) self.in_proj_q = Linear(self.embed_dim, out_proj_size, bias=bias) v_layers.append(Linear(self.embed_dim, out_proj_size, bias=bias)) self.in_proj_k = nn.Sequential(*k_layers) self.in_proj_v = nn.Sequential(*v_layers) if self.downsample: self.out_proj = Linear(out_proj_size, self.head_dim, bias=bias) else: self.out_proj = Linear(out_proj_size, out_channels, bias=bias) self.scaling = self.head_dim**-0.5 def forward( self, query, key, value, mask_future_timesteps=False, key_padding_mask=None, use_scalar_bias=False, ): """Input shape: Time x Batch x Channel Self-attention can be implemented by passing in the same arguments for query, key and value. Future timesteps can be masked with the `mask_future_timesteps` argument. Padding elements can be excluded from the key by passing a binary ByteTensor (`key_padding_mask`) with shape: batch x src_len, where padding elements are indicated by 1s. """ src_len, bsz, out_channels = key.size() tgt_len = query.size(0) assert list(query.size()) == [tgt_len, bsz, out_channels] assert key.size() == value.size() if key_padding_mask is not None: assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == src_len if self.downsample: size = bsz else: size = bsz * self.num_heads k = key v = value q = query if self.project_input: q = self.in_proj_q(q) k = self.in_proj_k(k) v = self.in_proj_v(v) src_len = k.size()[0] q *= self.scaling if not self.downsample: q = q.view(tgt_len, size, self.head_dim) k = k.view(src_len, size, self.head_dim) v = v.view(src_len, size, self.head_dim) q = q.transpose(0, 1) k = k.transpose(0, 1) v = v.transpose(0, 1) attn_weights = torch.bmm(q, k.transpose(1, 2)) if mask_future_timesteps: assert query.size() == key.size(), \ 'mask_future_timesteps only applies to self-attention' attn_weights *= torch.tril( attn_weights.data.new([1]).expand(tgt_len, tgt_len).clone(), diagonal=-1, )[:, ::self.head_index + 1 if self.downsample else 1].unsqueeze(0) attn_weights += torch.triu( attn_weights.data.new([-math.inf]).expand(tgt_len, tgt_len).clone(), diagonal=0 )[:, ::self.head_index + 1 if self.downsample else 1].unsqueeze(0) tgt_size = tgt_len if use_scalar_bias: attn_weights = scalar_bias(attn_weights, 2) v = scalar_bias(v, 1) tgt_size += 1 if key_padding_mask is not None: # don't attend to padding symbols if key_padding_mask.max() > 0: if self.downsample: attn_weights = attn_weights.view(bsz, 1, tgt_len, src_len) else: attn_weights = attn_weights.view(size, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2), -math.inf, ) attn_weights = attn_weights.view(size, tgt_len, src_len) attn_weights = F.softmax(attn_weights, dim=-1) attn_weights = self.dropout_module(attn_weights) attn = torch.bmm(attn_weights, v) if self.downsample: attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, self.head_dim) else: attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, self.embed_dim) attn = self.out_proj(attn) return attn, attn_weights class DownsampledMultiHeadAttention(nn.ModuleList): """ Multi-headed attention with Gating and Downsampling """ def __init__( self, out_channels, embed_dim, num_heads, dropout=0., bias=True, project_input=True, gated=False, downsample=False, ): self.embed_dim = embed_dim self.num_heads = num_heads self.head_dim = embed_dim // num_heads self.downsample = downsample self.gated = gated self.project_input = project_input assert self.head_dim * num_heads == embed_dim if self.downsample: attention_heads = [] for index in range(self.num_heads): attention_heads.append( SingleHeadAttention( out_channels, self.embed_dim, self.head_dim, index, dropout, bias, self.project_input, self.gated, self.downsample, self.num_heads, ) ) super().__init__(modules=attention_heads) self.out_proj = Linear(embed_dim, out_channels, bias=bias) else: # either we have a list of attention heads, or just one attention head # if not being downsampled, we can do the heads with one linear layer instead of separate ones super().__init__() self.attention_module = SingleHeadAttention( out_channels, self.embed_dim, self.head_dim, 1, dropout, bias, self.project_input, self.gated, self.downsample, self.num_heads, ) def forward( self, query, key, value, mask_future_timesteps=False, key_padding_mask=None, use_scalar_bias=False, ): src_len, bsz, embed_dim = key.size() tgt_len = query.size(0) assert embed_dim == self.embed_dim assert list(query.size()) == [tgt_len, bsz, embed_dim] assert key.size() == value.size() tgt_size = tgt_len if use_scalar_bias: tgt_size += 1 attn = [] attn_weights = [] if self.downsample: for attention_head_number in range(self.num_heads): # call the forward of each attention head _attn, _attn_weight = self[attention_head_number]( query, key, value, mask_future_timesteps, key_padding_mask, use_scalar_bias, ) attn.append(_attn) attn_weights.append(_attn_weight) full_attn = torch.cat(attn, dim=2) full_attn = self.out_proj(full_attn) return full_attn, attn_weights[0].clone() else: _attn, _attn_weight = self.attention_module( query, key, value, mask_future_timesteps, key_padding_mask, use_scalar_bias, ) attn.append(_attn) attn_weights.append(_attn_weight) full_attn = torch.cat(attn, dim=2) full_attn_weights = torch.cat(attn_weights) full_attn_weights = full_attn_weights.view(bsz, self.num_heads, tgt_size, src_len) full_attn_weights = full_attn_weights.sum(dim=1) / self.num_heads return full_attn, full_attn_weights class Downsample(nn.Module): """ Selects every nth element, where n is the index """ def __init__(self, index): super().__init__() self.index = index def forward(self, x): return x[::self.index+1] def Linear(in_features, out_features, dropout=0., bias=True): """Weight-normalized Linear layer (input: B x T x C)""" m = nn.Linear(in_features, out_features, bias=bias) m.weight.data.normal_(mean=0, std=math.sqrt((1 - dropout) / in_features)) m.bias.data.zero_() return nn.utils.weight_norm(m) def GatedLinear(in_features, out_features, dropout=0., bias=True): """Weight-normalized Linear layer (input: B x T x C) with interspersed GLU units""" return nn.Sequential( Linear(in_features, out_features*4, dropout, bias), nn.GLU(), Linear(out_features*2, out_features*2, dropout, bias), nn.GLU(), Linear(out_features, out_features, dropout, bias) )
9,863
37.381323
106
py
RegularizedBN
RegularizedBN-main/fairseq/modules/multihead_attention_relative.py
import torch from torch import nn import torch.nn.functional as F import math class RelativeEmbedding(nn.Module): def forward(self, input): """Input is expected to be of size [bsz x seqlen]. """ bsz, seq_len = input.size() max_pos = self.padding_idx + seq_len if max_pos > self.origin_shift: #将embedding个数设多一点,不要出现这种情况 # recompute/expand embeddings if needed weights = self.get_embedding( max_pos*2, self.embedding_dim, self.padding_idx, ) weights = weights.to(self._float_tensor) del self.weights self.origin_shift = weights.size(0)//2 self.register_buffer('weights', weights) positions = torch.arange(-seq_len, seq_len).to(input.device).long() + self.origin_shift # 2*seq_len embed = self.weights.index_select(0, positions.long()).detach() return embed class RelativeSinusoidalPositionalEmbedding(RelativeEmbedding): """This module produces sinusoidal positional embeddings of any length. Padding symbols are ignored. """ def __init__(self, embedding_dim, padding_idx, init_size=1536): """ :param embedding_dim: 每个位置的dimension :param padding_idx: :param init_size: """ super().__init__() self.embedding_dim = embedding_dim self.padding_idx = padding_idx assert init_size%2==0 weights = self.get_embedding( init_size+1, embedding_dim, padding_idx, ) self.register_buffer('weights', weights) self.register_buffer('_float_tensor', torch.FloatTensor(1)) def get_embedding(self, num_embeddings, embedding_dim, padding_idx=None): """Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb) emb = torch.arange(-num_embeddings//2, num_embeddings//2, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) if embedding_dim % 2 == 1: # zero pad emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) if padding_idx is not None: emb[padding_idx, :] = 0 self.origin_shift = num_embeddings//2 + 1 return emb class RelativeMultiHeadAttn(nn.Module): def __init__(self, d_model, n_head, dropout=0.0, r_w_bias=None, r_r_bias=None, scale=True): """ :param int d_model: :param int n_head: :param dropout: 对attention map的dropout :param r_w_bias: n_head x head_dim or None, 如果为dim :param r_r_bias: n_head x head_dim or None, :param scale: :param rel_pos_embed: """ super().__init__() self.qkv_linear = nn.Linear(d_model, d_model * 3, bias=False) self.n_head = n_head self.head_dim = d_model // n_head self.dropout_layer = nn.Dropout(dropout) self.pos_embed = RelativeSinusoidalPositionalEmbedding(d_model//n_head, 0, 1200) if scale: self.scale = math.sqrt(d_model // n_head) else: self.scale = 1 if r_r_bias is None or r_w_bias is None: # Biases are not shared self.r_r_bias = nn.Parameter(nn.init.xavier_normal_(torch.zeros(n_head, d_model // n_head))) self.r_w_bias = nn.Parameter(nn.init.xavier_normal_(torch.zeros(n_head, d_model // n_head))) else: self.r_r_bias = r_r_bias # r_r_bias就是v self.r_w_bias = r_w_bias # r_w_bias就是u def forward(self, x, mask): """ :param x: batch_size x max_len x d_model :param mask: batch_size x max_len :return: """ batch_size, max_len, d_model = x.size() pos_embed = self.pos_embed(mask) # l x head_dim qkv = self.qkv_linear(x) # batch_size x max_len x d_model3 q, k, v = torch.chunk(qkv, chunks=3, dim=-1) q = q.view(batch_size, max_len, self.n_head, -1).transpose(1, 2) k = k.view(batch_size, max_len, self.n_head, -1).transpose(1, 2) v = v.view(batch_size, max_len, self.n_head, -1).transpose(1, 2) # b x n x l x d rw_head_q = q + self.r_r_bias[:, None] AC = torch.einsum('bnqd,bnkd->bnqk', [rw_head_q, k]) # b x n x l x d, n是head D_ = torch.einsum('nd,ld->nl', self.r_w_bias, pos_embed)[None, :, None] # head x 2max_len, 每个head对位置的bias B_ = torch.einsum('bnqd,ld->bnql', q, pos_embed) # bsz x head x max_len x 2max_len,每个query对每个shift的偏移 E_ = torch.einsum('bnqd,ld->bnql', k, pos_embed) # bsz x head x max_len x 2max_len, key对relative的bias BD = B_ + D_ # bsz x head x max_len x 2max_len, 要转换为bsz x head x max_len x max_len BDE = self._shift(BD) + self._transpose_shift(E_) attn = AC + BDE attn = attn / self.scale attn = attn.masked_fill(mask[:, None, None, :].eq(1), float('-inf')) attn = F.softmax(attn, dim=-1) attn = self.dropout_layer(attn) v = torch.matmul(attn, v).transpose(1, 2).reshape(batch_size, max_len, d_model) # b x n x l x d return v def _shift(self, BD): """ 类似 -3 -2 -1 0 1 2 -3 -2 -1 0 1 2 -3 -2 -1 0 1 2 转换为 0 1 2 -1 0 1 -2 -1 0 :param BD: batch_size x n_head x max_len x 2max_len :return: batch_size x n_head x max_len x max_len """ bsz, n_head, max_len, _ = BD.size() zero_pad = BD.new_zeros(bsz, n_head, max_len, 1) BD = torch.cat([BD, zero_pad], dim=-1).view(bsz, n_head, -1, max_len) # bsz x n_head x (2max_len+1) x max_len BD = BD[:, :, :-1].view(bsz, n_head, max_len, -1) # bsz x n_head x 2max_len x max_len BD = BD[:, :, :, max_len:] return BD def _transpose_shift(self, E): """ 类似 -3 -2 -1 0 1 2 -30 -20 -10 00 10 20 -300 -200 -100 000 100 200 转换为 0 -10 -200 1 00 -100 2 10 000 :param E: batch_size x n_head x max_len x 2max_len :return: batch_size x n_head x max_len x max_len """ bsz, n_head, max_len, _ = E.size() zero_pad = E.new_zeros(bsz, n_head, max_len, 1) # bsz x n_head x -1 x (max_len+1) E = torch.cat([E, zero_pad], dim=-1).view(bsz, n_head, -1, max_len) indice = (torch.arange(max_len)*2+1).to(E.device) E = E.index_select(index=indice, dim=-2).transpose(-1,-2) # bsz x n_head x max_len x max_len return E
6,946
35.952128
118
py
RegularizedBN
RegularizedBN-main/fairseq/modules/quant_noise.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn def quant_noise(module, p, block_size): """ Wraps modules and applies quantization noise to the weights for subsequent quantization with Iterative Product Quantization as described in "Training with Quantization Noise for Extreme Model Compression" Args: - module: nn.Module - p: amount of Quantization Noise - block_size: size of the blocks for subsequent quantization with iPQ Remarks: - Module weights must have the right sizes wrt the block size - Only Linear, Embedding and Conv2d modules are supported for the moment - For more detail on how to quantize by blocks with convolutional weights, see "And the Bit Goes Down: Revisiting the Quantization of Neural Networks" - We implement the simplest form of noise here as stated in the paper which consists in randomly dropping blocks """ # if no quantization noise, don't register hook if p <= 0: return module # supported modules assert isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d)) # test whether module.weight has the right sizes wrt block_size is_conv = module.weight.ndim == 4 # 2D matrix if not is_conv: assert module.weight.size(1) % block_size == 0, "Input features must be a multiple of block sizes" # 4D matrix else: # 1x1 convolutions if module.kernel_size == (1, 1): assert module.in_channels % block_size == 0, "Input channels must be a multiple of block sizes" # regular convolutions else: k = module.kernel_size[0] * module.kernel_size[1] assert k % block_size == 0, "Kernel size must be a multiple of block size" def _forward_pre_hook(mod, input): # no noise for evaluation if mod.training: if not is_conv: # gather weight and sizes weight = mod.weight in_features = weight.size(1) out_features = weight.size(0) # split weight matrix into blocks and randomly drop selected blocks mask = torch.zeros(in_features // block_size * out_features, device=weight.device) mask.bernoulli_(p) mask = mask.repeat_interleave(block_size, -1).view(-1, in_features) else: # gather weight and sizes weight = mod.weight in_channels = mod.in_channels out_channels = mod.out_channels # split weight matrix into blocks and randomly drop selected blocks if mod.kernel_size == (1, 1): mask = torch.zeros(int(in_channels // block_size * out_channels), device=weight.device) mask.bernoulli_(p) mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels) else: mask = torch.zeros(weight.size(0), weight.size(1), device=weight.device) mask.bernoulli_(p) mask = mask.unsqueeze(2).unsqueeze(3).repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1]) # scale weights and apply mask mask = mask.to(torch.bool) # x.bool() is not currently supported in TorchScript s = 1 / (1 - p) mod.weight.data = s * weight.masked_fill(mask, 0) module.register_forward_pre_hook(_forward_pre_hook) return module
3,666
39.296703
110
py
RegularizedBN
RegularizedBN-main/fairseq/modules/gelu.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ See "Gaussian Error Linear Units (GELUs)" by Dan Hendrycks and Kevin Gimpel with the corresponding GitHub repo: https://github.com/hendrycks/GELUs """ import math import torch import torch.nn as nn def gelu_accurate(x): if not hasattr(gelu_accurate, "_a"): gelu_accurate._a = math.sqrt(2 / math.pi) return ( 0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3)))) ) def gelu(x: torch.Tensor) -> torch.Tensor: return torch.nn.functional.gelu(x.float()).type_as(x)
706
26.192308
87
py
RegularizedBN
RegularizedBN-main/fairseq/modules/statistics_init.py
import numpy as np import torch from scipy import io from .statistics_utils import save_forward_backward_weight_norm #need to define self.prefix, self.id before initializing statistics def init_residual_proportion(self, args): self.record_residual_proportion = args.record_residual_proportion if self.record_residual_proportion: self.record_residual_att_file = 'statistics/{}/residual_att_{}'.format(self.prefix,self.id) self.record_residual_ffn_file = 'statistics/{}/residual_ffn_{}'.format(self.prefix,self.id) self.x_proportion = {} self.residual_proportion = {} self.total_proportion = {} files = [self.record_residual_att_file, self.record_residual_ffn_file] for file in files: self.x_proportion[file] = [] self.residual_proportion[file] = [] self.total_proportion[file] = [] def backward_hook_weight(self, savefile): def backward_hook_template(grad): if self.training and self.step%self.save_interval==0: self.w_grad_norm[savefile].append(grad.norm()) if(self.step%self.save_interval==0 and savefile.find("w1_grad")>=0): save_forward_backward_weight_norm(self, self.save_weight_file) return backward_hook_template def init_forward_backward_weight_norm(self,args): self.record_weight_norm = args.record_weight_norm if self.record_weight_norm: self.record_w1_grad_file = 'statistics/{}/w1_grad_norm_{}'.format(self.prefix,self.id) self.record_w2_grad_file = 'statistics/{}/w2_grad_norm_{}'.format(self.prefix,self.id) self.save_weight_file = 'statistics/{}/weight_grad_norm_{}'.format(self.prefix,self.id) self.w1_norm = [] self.w2_norm = [] self.w_grad_norm = {} self.w_singular_value = {} self.w_grad_norm[self.record_w1_grad_file] = [] self.w_grad_norm[self.record_w2_grad_file] = [] self.w_singular_value[self.record_w1_grad_file] = [] self.w_singular_value[self.record_w2_grad_file] = [] self.fc1.weight.register_hook(backward_hook_weight(self, self.record_w1_grad_file)) self.fc2.weight.register_hook(backward_hook_weight(self, self.record_w2_grad_file)) def init_attn_weight(self, args): self.record_attn_weight = args.record_attn_weight if self.record_attn_weight: self.attn_weight_list = [] self.attn_weight_savefile = 'statistics/{}/attn_weight_{}'.format(self.prefix, self.id) def init_probe_norm(self, args, process): assert process in ['forward', 'backward'] self.d_savefile[process] = {} self.d_norm[process]= {} self.d_norm_distribution[process] = {} self.d_dominated_word[process] = {} self.d_dominated_index[process] = {} self.d_dominated_top2_value[process] = {} self.d_dominated_r[process] = {} self.d_zero_rate[process] = {} #use position to mark everything for position in self.probe_positions[process]: savefile = 'statistics/{}/{}_norm_{}_{}'.format(self.prefix, process, position, self.id) self.d_savefile[process][position] = savefile self.d_norm[process][position] = [] self.d_norm_distribution[process][position] = [] self.d_dominated_word[process][position]= [] self.d_dominated_index[process][position] = [] self.d_dominated_top2_value[process][position] = [] self.d_dominated_r[process][position] = [] self.d_zero_rate[process][position] = [] def init_probe_condition(self, args, process): assert process in ['forward', 'backward'] self.d_condition_savefile[process] = {} self.d_condition_number[process] = {} self.d_condition_singular_value[process] = {} #use position to mark everything for position in self.probe_positions[process]: savefile = 'statistics/{}/{}_condition_{}_{}'.format(self.prefix, process, position, self.id) self.d_condition_savefile[process][position] = savefile self.d_condition_number[process][position] = [] self.d_condition_singular_value[process][position] = [] def init_probe_statistics(self, args, process): assert process in ['forward', 'backward'] def add_prefix(cur_list, prefix): cur_list = ["{}_{}".format(prefix, item) for item in cur_list] return cur_list #self.candidate_positions[process] = add_prefix(self.all_positions, process) #self.candidate_parts[process] = add_prefix(self.all_parts, process) #self.candidate_norm_items[process] = add_prefix(self.all_norm_items, process) #self.candidate_condition_items[process] = add_prefix(self.all_condition_items, process) #"norm" includes: input_norm_distribution, dominated word(include input_norm), zero_rate # probe_position = args.forward_probe_position if process=='forward' else args.backward_probe_position record_parts = args.forward_record_parts if process=='forward' else args.backward_record_parts record_norm_items = args.forward_record_norm_items if process=='forward' else args.backward_record_norm_items record_condition_items = args.forward_record_condition_items if process=='forward' else args.backward_record_condition_items if probe_position=='all': self.probe_positions[process] = self.all_positions else: self.probe_positions[process] = (probe_position).split(',') if probe_position!='none' else [] if record_parts=='all': self.record_parts[process] = self.all_parts else: self.record_parts[process] = (record_parts).split(',') if record_parts!='none' else [] if record_norm_items=='all': self.norm_items[process] = self.all_norm_items else: self.norm_items[process] = (record_norm_items).split(',') if record_norm_items!='none' else [] if record_condition_items=='all': self.condition_items[process] = self.all_condition_items else: self.condition_items[process] = (record_condition_items).split(',') if record_condition_items!='none' else [] self.record_process[process] = 0 if probe_position=='none' or record_parts=='none' else 1 if not self.record_process[process]: return init_probe_norm(self, args, process) init_probe_condition(self, args, process) def init_all_statistics(self,args): #forward part init_residual_proportion(self, args) init_forward_backward_weight_norm(self,args) init_attn_weight(self, args) init_probe_statistics(self, args, "forward") #backward statistics are exactly the same as forward statistics #backward part init_probe_statistics(self, args, "backward") def init_base_dictionary(self,args): #condition self.d_condition_savefile = {} self.d_condition_number = {} self.d_condition_singular_value = {} #others self.d_savefile = {} self.d_norm = {} self.d_norm_distribution = {} self.d_dominated_word = {} self.d_dominated_index = {} self.d_dominated_top2_value = {} self.d_dominated_r = {} self.d_zero_rate = {} # self.probe_positions = {} self.record_parts = {} self.norm_items = {} self.condition_items = {} # self.all_positions = ['att_input','att_output','att_norm_input','ffn_input','ffn_output','ffn_norm_input','before_relu','after_relu'] self.all_parts = ['norm','condition'] self.all_norm_items = ['norm_distribution','dominated_word','zero_rate'] self.all_condition_items = ['c_max', 'c_20', 'c_50', 'c_80', 'r_total', 'r', 'intra_average_sim', 'inter_average_sim', 'total_average_sim'] self.candidate_positions = {} self.candidate_parts = {} self.candidate_norm_items = {} self.candidate_condition_items = {} # self.record_process = {} def init_config(self, args): #initialize configuration for saving statistics def extract_id(s): cand = s.split('/') for c in cand: if(c.find('transformer')>=0): return c print("error path!") exit() return 'error' self.prefix = extract_id(args.save_dir) self.step = 0 self.save_interval = 1100 #standard: 1100 self.record_interval = 50 #standard: 20 assert self.save_interval%self.record_interval==0, "wrong interval!" self.max_len = 600 #self.d_statistics = {} #for saving forward & backward statistics init_base_dictionary(self,args) init_all_statistics(self,args)
8,439
43.188482
144
py
RegularizedBN
RegularizedBN-main/fairseq/modules/positional_embedding.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn from .learned_positional_embedding import LearnedPositionalEmbedding from .sinusoidal_positional_embedding import SinusoidalPositionalEmbedding def PositionalEmbedding( num_embeddings: int, embedding_dim: int, padding_idx: int, learned: bool = False, ): if learned: # if padding_idx is specified then offset the embedding ids by # this index and adjust num_embeddings appropriately # TODO: The right place for this offset would be inside # LearnedPositionalEmbedding. Move this there for a cleaner implementation. if padding_idx is not None: num_embeddings = num_embeddings + padding_idx + 1 m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) if padding_idx is not None: nn.init.constant_(m.weight[padding_idx], 0) else: m = SinusoidalPositionalEmbedding( embedding_dim, padding_idx, init_size=num_embeddings + padding_idx + 1, ) return m
1,286
38
83
py
RegularizedBN
RegularizedBN-main/fairseq/modules/fairseq_dropout.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from typing import List, Optional import torch.nn as nn import torch.nn.functional as F logger = logging.getLogger(__name__) class FairseqDropout(nn.Module): def __init__(self, p, module_name=None): super().__init__() self.p = p self.module_name = module_name self.apply_during_inference = False def forward(self, x, inplace: bool = False): if self.training or self.apply_during_inference: return F.dropout(x, p=self.p, training=True, inplace=inplace) else: return x def make_generation_fast_( self, name: str, retain_dropout: bool = False, retain_dropout_modules: Optional[List[str]] = None, **kwargs ): if retain_dropout: if retain_dropout_modules is not None and self.module_name is None: logger.warning( 'Cannot enable dropout during inference for module {} ' 'because module_name was not set'.format(name) ) elif ( retain_dropout_modules is None # if None, apply to all modules or self.module_name in retain_dropout_modules ): logger.info( 'Enabling dropout during inference for module: {}'.format(name) ) self.apply_during_inference = True else: logger.info('Disabling dropout for module: {}'.format(name))
1,687
30.849057
83
py
RegularizedBN
RegularizedBN-main/fairseq/modules/cross_entropy.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import torch import torch.nn.functional as F logger = logging.getLogger(__name__) def _cross_entropy_pytorch(logits, target, ignore_index=None, reduction='mean'): lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float32) return F.nll_loss( lprobs, target, ignore_index=ignore_index, reduction=reduction, ) try: import xentropy_cuda from apex.contrib import xentropy logger.info('using fused cross entropy') def cross_entropy(logits, target, ignore_index=-100, reduction='mean'): if logits.device == torch.device('cpu'): return _cross_entropy_pytorch(logits, target, ignore_index, reduction) else: half_to_float = (logits.dtype == torch.half) losses = xentropy.SoftmaxCrossEntropyLoss.apply( logits, target, 0.0, ignore_index, half_to_float, ) if reduction == 'sum': return losses.sum() elif reduction == 'mean': if ignore_index >= 0: return losses.sum() / target.ne(ignore_index).sum() else: return losses.mean() elif reduction == 'none': return losses else: raise NotImplementedError except ImportError: def cross_entropy(logits, target, ignore_index=-100, reduction='mean'): return _cross_entropy_pytorch(logits, target, ignore_index, reduction)
1,650
30.75
82
py
RegularizedBN
RegularizedBN-main/fairseq/modules/adaptive_input.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from torch import nn from fairseq.modules.quant_noise import quant_noise from typing import List class AdaptiveInput(nn.Module): def __init__( self, vocab_size: int, padding_idx: int, initial_dim: int, factor: float, output_dim: int, cutoff: List[int], q_noise: float = 0, qn_block_size: int = 8, ): super().__init__() if vocab_size > cutoff[-1]: cutoff = cutoff + [vocab_size] else: assert vocab_size == cutoff[ -1], 'cannot specify cutoff larger than vocab size' self.cutoff = cutoff self.embedding_dim = output_dim self.padding_idx = padding_idx self.embeddings = nn.ModuleList() for i in range(len(self.cutoff)): prev = self.cutoff[i - 1] if i > 0 else 0 size = self.cutoff[i] - prev dim = int(initial_dim // (factor ** i)) seq = nn.Sequential( nn.Embedding(size, dim, self.padding_idx), quant_noise(nn.Linear(dim, output_dim, bias=False), q_noise, qn_block_size), ) self.embeddings.append(seq) self.padding_idx = None self.padding_idx = padding_idx def init_weights(m): if isinstance(m, nn.Embedding): nn.init.normal_(m.weight, mean=0, std=m.weight.shape[1] ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) elif hasattr(m, 'weight'): nn.init.xavier_uniform_(m.weight) self.apply(init_weights) self.register_buffer('_float_tensor', torch.FloatTensor(1)) def weights_for_band(self, band: int): return self.embeddings[band][0].weight, self.embeddings[band][1].weight def forward(self, input: torch.Tensor): result = self._float_tensor.new(input.shape + (self.embedding_dim,)) for i in range(len(self.cutoff)): mask = input.lt(self.cutoff[i]) if i > 0: mask.mul_(input.ge(self.cutoff[i - 1])) chunk_input = input[mask] - self.cutoff[i - 1] else: chunk_input = input[mask] if mask.any(): result[mask] = self.embeddings[i](chunk_input) return result
2,514
30.835443
92
py
RegularizedBN
RegularizedBN-main/fairseq/modules/gumbel_vector_quantizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F class GumbelVectorQuantizer(nn.Module): def __init__( self, dim, num_vars, temp, groups, combine_groups, vq_dim, time_first, activation=nn.GELU(), weight_proj_depth=1, weight_proj_factor=1, ): """Vector quantization using gumbel softmax Args: dim: input dimension (channels) num_vars: number of quantized vectors per group temp: temperature for training. this should be a tuple of 3 elements: (start, stop, decay factor) groups: number of groups for vector quantization combine_groups: whether to use the vectors for all groups vq_dim: dimensionality of the resulting quantized vector time_first: if true, expect input in BxTxC format, otherwise in BxCxT activation: what activation to use (should be a module). this is only used if weight_proj_depth is > 1 weight_proj_depth: number of layers (with activation in between) to project input before computing logits weight_proj_factor: this is used only if weight_proj_depth is > 1. scales the inner dimensionality of projections by this factor """ super().__init__() self.groups = groups self.combine_groups = combine_groups self.input_dim = dim self.num_vars = num_vars self.time_first = time_first assert ( vq_dim % groups == 0 ), f"dim {vq_dim} must be divisible by groups {groups} for concatenation" var_dim = vq_dim // groups num_groups = groups if not combine_groups else 1 self.vars = nn.Parameter(torch.FloatTensor(1, num_groups * num_vars, var_dim)) nn.init.uniform_(self.vars) if weight_proj_depth > 1: def block(input_dim, output_dim): return nn.Sequential(nn.Linear(input_dim, output_dim), activation) inner_dim = self.input_dim * weight_proj_factor self.weight_proj = nn.Sequential( *[ block(self.input_dim if i == 0 else inner_dim, inner_dim) for i in range(weight_proj_depth - 1) ], nn.Linear(inner_dim, groups * num_vars), ) else: self.weight_proj = nn.Linear(self.input_dim, groups * num_vars) nn.init.normal_(self.weight_proj.weight, mean=0, std=1) nn.init.zeros_(self.weight_proj.bias) assert len(temp) == 3, temp self.max_temp, self.min_temp, self.temp_decay = temp self.curr_temp = self.max_temp self.codebook_indices = None def set_num_updates(self, num_updates): self.curr_temp = max( self.max_temp * self.temp_decay ** num_updates, self.min_temp ) def get_codebook_indices(self): if self.codebook_indices is None: from itertools import product p = [range(self.num_vars)] * self.groups inds = list(product(*p)) self.codebook_indices = torch.tensor( inds, dtype=torch.long, device=self.vars.device ).flatten() if not self.combine_groups: self.codebook_indices = self.codebook_indices.view( self.num_vars ** self.groups, -1 ) for b in range(1, self.groups): self.codebook_indices[:, b] += self.num_vars * b self.codebook_indices = self.codebook_indices.flatten() return self.codebook_indices def codebook(self): indices = self.get_codebook_indices() return ( self.vars.squeeze(0) .index_select(0, indices) .view(self.num_vars ** self.groups, -1) ) def sample_from_codebook(self, b, n): indices = self.get_codebook_indices() indices = indices.view(-1, self.groups) cb_size = indices.size(0) assert ( n < cb_size ), f"sample size {n} is greater than size of codebook {cb_size}" sample_idx = torch.randint(low=0, high=cb_size, size=(b * n,)) indices = indices[sample_idx] z = self.vars.squeeze(0).index_select(0, indices.flatten()).view(b, n, -1) return z def to_codebook_index(self, indices): res = indices.new_full(indices.shape[:-1], 0) for i in range(self.groups): exponent = self.groups - i - 1 res += indices[..., i] * (self.num_vars ** exponent) return res def forward_idx(self, x): res = self.forward(x, produce_targets=True) return res["x"], res["targets"] def forward(self, x, produce_targets=False): result = {"num_vars": self.num_vars * self.groups} if not self.time_first: x = x.transpose(1, 2) bsz, tsz, fsz = x.shape x = x.reshape(-1, fsz) x = self.weight_proj(x) x = x.view(bsz * tsz * self.groups, -1) _, k = x.max(-1) hard_x = ( x.new_zeros(*x.shape) .scatter_(-1, k.view(-1, 1), 1.0) .view(bsz * tsz, self.groups, -1) ) hard_probs = torch.mean(hard_x.float(), dim=0) result["code_perplexity"] = torch.exp( -torch.sum(hard_probs * torch.log(hard_probs + 1e-7), dim=-1) ).sum() avg_probs = torch.softmax( x.view(bsz * tsz, self.groups, -1).float(), dim=-1 ).mean(dim=0) result["prob_perplexity"] = torch.exp( -torch.sum(avg_probs * torch.log(avg_probs + 1e-7), dim=-1) ).sum() result["temp"] = self.curr_temp if self.training: x = F.gumbel_softmax(x.float(), tau=self.curr_temp, hard=True).type_as(x) else: x = hard_x x = x.view(bsz * tsz, -1) vars = self.vars if self.combine_groups: vars = vars.repeat(1, self.groups, 1) if produce_targets: result["targets"] = ( x.view(bsz * tsz * self.groups, -1) .argmax(dim=-1) .view(bsz, tsz, self.groups) .detach() ) x = x.unsqueeze(-1) * vars x = x.view(bsz * tsz, self.groups, self.num_vars, -1) x = x.sum(-2) x = x.view(bsz, tsz, -1) if not self.time_first: x = x.transpose(1, 2) # BTC -> BCT result["x"] = x return result
6,792
33.135678
117
py
RegularizedBN
RegularizedBN-main/fairseq/modules/vggblock.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import absolute_import, division, print_function, unicode_literals from collections.abc import Iterable from itertools import repeat import torch import torch.nn as nn def _pair(v): if isinstance(v, Iterable): assert len(v) == 2, "len(v) != 2" return v return tuple(repeat(v, 2)) def infer_conv_output_dim(conv_op, input_dim, sample_inchannel): sample_seq_len = 200 sample_bsz = 10 x = torch.randn(sample_bsz, sample_inchannel, sample_seq_len, input_dim) # N x C x H x W # N: sample_bsz, C: sample_inchannel, H: sample_seq_len, W: input_dim x = conv_op(x) # N x C x H x W x = x.transpose(1, 2) # N x H x C x W bsz, seq = x.size()[:2] per_channel_dim = x.size()[3] # bsz: N, seq: H, CxW the rest return x.contiguous().view(bsz, seq, -1).size(-1), per_channel_dim class VGGBlock(torch.nn.Module): """ VGG motibated cnn module https://arxiv.org/pdf/1409.1556.pdf Args: in_channels: (int) number of input channels (typically 1) out_channels: (int) number of output channels conv_kernel_size: convolution channels pooling_kernel_size: the size of the pooling window to take a max over num_conv_layers: (int) number of convolution layers input_dim: (int) input dimension conv_stride: the stride of the convolving kernel. Can be a single number or a tuple (sH, sW) Default: 1 padding: implicit paddings on both sides of the input. Can be a single number or a tuple (padH, padW). Default: None layer_norm: (bool) if layer norm is going to be applied. Default: False Shape: Input: BxCxTxfeat, i.e. (batch_size, input_size, timesteps, features) Output: BxCxTxfeat, i.e. (batch_size, input_size, timesteps, features) """ def __init__( self, in_channels, out_channels, conv_kernel_size, pooling_kernel_size, num_conv_layers, input_dim, conv_stride=1, padding=None, layer_norm=False, ): assert ( input_dim is not None ), "Need input_dim for LayerNorm and infer_conv_output_dim" super(VGGBlock, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.conv_kernel_size = _pair(conv_kernel_size) self.pooling_kernel_size = _pair(pooling_kernel_size) self.num_conv_layers = num_conv_layers self.padding = ( tuple(e // 2 for e in self.conv_kernel_size) if padding is None else _pair(padding) ) self.conv_stride = _pair(conv_stride) self.layers = nn.ModuleList() for layer in range(num_conv_layers): conv_op = nn.Conv2d( in_channels if layer == 0 else out_channels, out_channels, self.conv_kernel_size, stride=self.conv_stride, padding=self.padding, ) self.layers.append(conv_op) if layer_norm: conv_output_dim, per_channel_dim = infer_conv_output_dim( conv_op, input_dim, in_channels if layer == 0 else out_channels ) self.layers.append(nn.LayerNorm(per_channel_dim)) input_dim = per_channel_dim self.layers.append(nn.ReLU()) if self.pooling_kernel_size is not None: pool_op = nn.MaxPool2d(kernel_size=self.pooling_kernel_size, ceil_mode=True) self.layers.append(pool_op) self.total_output_dim, self.output_dim = infer_conv_output_dim( pool_op, input_dim, out_channels ) def forward(self, x): for i, _ in enumerate(self.layers): x = self.layers[i](x) return x
4,057
33.683761
88
py
RegularizedBN
RegularizedBN-main/fairseq/modules/character_token_embedder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from typing import List, Tuple import torch from torch import nn import torch.nn.functional as F from fairseq.data import Dictionary CHAR_PAD_IDX = 0 CHAR_EOS_IDX = 257 logger = logging.getLogger(__name__) class CharacterTokenEmbedder(torch.nn.Module): def __init__( self, vocab: Dictionary, filters: List[Tuple[int, int]], char_embed_dim: int, word_embed_dim: int, highway_layers: int, max_char_len: int = 50, char_inputs: bool = False ): super(CharacterTokenEmbedder, self).__init__() self.onnx_trace = False self.embedding_dim = word_embed_dim self.max_char_len = max_char_len self.char_embeddings = nn.Embedding(257, char_embed_dim, padding_idx=0) self.symbol_embeddings = nn.Parameter(torch.FloatTensor(2, word_embed_dim)) self.eos_idx, self.unk_idx = 0, 1 self.char_inputs = char_inputs self.convolutions = nn.ModuleList() for width, out_c in filters: self.convolutions.append( nn.Conv1d(char_embed_dim, out_c, kernel_size=width) ) last_dim = sum(f[1] for f in filters) self.highway = Highway(last_dim, highway_layers) if highway_layers > 0 else None self.projection = nn.Linear(last_dim, word_embed_dim) assert vocab is not None or char_inputs, "vocab must be set if not using char inputs" self.vocab = None if vocab is not None: self.set_vocab(vocab, max_char_len) self.reset_parameters() def prepare_for_onnx_export_(self): self.onnx_trace = True def set_vocab(self, vocab, max_char_len): word_to_char = torch.LongTensor(len(vocab), max_char_len) truncated = 0 for i in range(len(vocab)): if i < vocab.nspecial: char_idxs = [0] * max_char_len else: chars = vocab[i].encode() # +1 for padding char_idxs = [c + 1 for c in chars] + [0] * (max_char_len - len(chars)) if len(char_idxs) > max_char_len: truncated += 1 char_idxs = char_idxs[:max_char_len] word_to_char[i] = torch.LongTensor(char_idxs) if truncated > 0: logger.info('truncated {} words longer than {} characters'.format(truncated, max_char_len)) self.vocab = vocab self.word_to_char = word_to_char @property def padding_idx(self): return Dictionary().pad() if self.vocab is None else self.vocab.pad() def reset_parameters(self): nn.init.xavier_normal_(self.char_embeddings.weight) nn.init.xavier_normal_(self.symbol_embeddings) nn.init.xavier_uniform_(self.projection.weight) nn.init.constant_(self.char_embeddings.weight[self.char_embeddings.padding_idx], 0.) nn.init.constant_(self.projection.bias, 0.) def forward( self, input: torch.Tensor, ): if self.char_inputs: chars = input.view(-1, self.max_char_len) pads = chars[:, 0].eq(CHAR_PAD_IDX) eos = chars[:, 0].eq(CHAR_EOS_IDX) if eos.any(): if self.onnx_trace: chars = torch.where(eos.unsqueeze(1), chars.new_zeros(1), chars) else: chars[eos] = 0 unk = None else: flat_words = input.view(-1) chars = self.word_to_char[flat_words.type_as(self.word_to_char)].type_as(input) pads = flat_words.eq(self.vocab.pad()) eos = flat_words.eq(self.vocab.eos()) unk = flat_words.eq(self.vocab.unk()) word_embs = self._convolve(chars) if self.onnx_trace: if pads.any(): word_embs = torch.where(pads.unsqueeze(1), word_embs.new_zeros(1), word_embs) if eos.any(): word_embs = torch.where(eos.unsqueeze(1), self.symbol_embeddings[self.eos_idx], word_embs) if unk is not None and unk.any(): word_embs = torch.where(unk.unsqueeze(1), self.symbol_embeddings[self.unk_idx], word_embs) else: if pads.any(): word_embs[pads] = 0 if eos.any(): word_embs[eos] = self.symbol_embeddings[self.eos_idx] if unk is not None and unk.any(): word_embs[unk] = self.symbol_embeddings[self.unk_idx] return word_embs.view(input.size()[:2] + (-1,)) def _convolve( self, char_idxs: torch.Tensor, ): char_embs = self.char_embeddings(char_idxs) char_embs = char_embs.transpose(1, 2) # BTC -> BCT conv_result = [] for conv in self.convolutions: x = conv(char_embs) x, _ = torch.max(x, -1) x = F.relu(x) conv_result.append(x) x = torch.cat(conv_result, dim=-1) if self.highway is not None: x = self.highway(x) x = self.projection(x) return x class Highway(torch.nn.Module): """ A `Highway layer <https://arxiv.org/abs/1505.00387>`_. Adopted from the AllenNLP implementation. """ def __init__( self, input_dim: int, num_layers: int = 1 ): super(Highway, self).__init__() self.input_dim = input_dim self.layers = nn.ModuleList([nn.Linear(input_dim, input_dim * 2) for _ in range(num_layers)]) self.activation = nn.ReLU() self.reset_parameters() def reset_parameters(self): for layer in self.layers: # As per comment in AllenNLP: # We should bias the highway layer to just carry its input forward. We do that by # setting the bias on `B(x)` to be positive, because that means `g` will be biased to # be high, so we will carry the input forward. The bias on `B(x)` is the second half # of the bias vector in each Linear layer. nn.init.constant_(layer.bias[self.input_dim:], 1) nn.init.constant_(layer.bias[:self.input_dim], 0) nn.init.xavier_normal_(layer.weight) def forward( self, x: torch.Tensor ): for layer in self.layers: projection = layer(x) proj_x, gate = projection.chunk(2, dim=-1) proj_x = self.activation(proj_x) gate = torch.sigmoid(gate) x = gate * x + (gate.new_tensor([1]) - gate) * proj_x return x
6,846
32.4
106
py
RegularizedBN
RegularizedBN-main/fairseq/modules/unfold.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn.functional as F def unfold1d(x, kernel_size, padding_l, pad_value=0): '''unfold T x B x C to T x B x C x K''' if kernel_size > 1: T, B, C = x.size() x = F.pad(x, (0, 0, 0, 0, padding_l, kernel_size - 1 - padding_l), value=pad_value) x = x.as_strided((T, B, C, kernel_size), (B*C, C, 1, B*C)) else: x = x.unsqueeze(3) return x
570
30.722222
91
py
RegularizedBN
RegularizedBN-main/fairseq/modules/fp32_group_norm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Layer norm done in fp32 (for fp16 training) """ import torch.nn as nn import torch.nn.functional as F class Fp32GroupNorm(nn.GroupNorm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def forward(self, input): output = F.group_norm( input.float(), self.num_groups, self.weight.float() if self.weight is not None else None, self.bias.float() if self.bias is not None else None, self.eps, ) return output.type_as(input)
727
27
69
py
RegularizedBN
RegularizedBN-main/fairseq/modules/adaptive_softmax.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import operator import functools import torch import torch.nn.functional as F from fairseq.modules.quant_noise import quant_noise from fairseq.modules.fairseq_dropout import FairseqDropout from torch import nn class TiedLinear(nn.Module): def __init__(self, weight, transpose): super().__init__() self.weight = weight self.transpose = transpose def forward(self, input): return F.linear(input, self.weight.t() if self.transpose else self.weight) class TiedHeadModule(nn.Module): def __init__(self, weights, input_dim, num_classes, q_noise, qn_block_size): super().__init__() tied_emb, _ = weights self.num_words, emb_dim = tied_emb.size() self.word_proj = quant_noise(TiedLinear(tied_emb, transpose=False), q_noise, qn_block_size) if input_dim != emb_dim: self.word_proj = nn.Sequential( quant_noise(nn.Linear(input_dim, emb_dim, bias=False), q_noise, qn_block_size), self.word_proj, ) self.class_proj = quant_noise(nn.Linear(input_dim, num_classes, bias=False), q_noise, qn_block_size) self.out_dim = self.num_words + num_classes self.register_buffer('_float_tensor', torch.FloatTensor(1)) def forward(self, input): inp_sz = functools.reduce(operator.mul, input.shape[:-1], 1) out = self._float_tensor.new(inp_sz, self.out_dim) out[:, :self.num_words] = self.word_proj(input.view(inp_sz, -1)) out[:, self.num_words:] = self.class_proj(input.view(inp_sz, -1)) return out class AdaptiveSoftmax(nn.Module): """ This is an implementation of the efficient softmax approximation for graphical processing units (GPU), described in the paper "Efficient softmax approximation for GPUs" (http://arxiv.org/abs/1609.04309). """ def __init__(self, vocab_size, input_dim, cutoff, dropout, factor=4., adaptive_inputs=None, tie_proj=False, q_noise=0, qn_block_size=8): super().__init__() if vocab_size > cutoff[-1]: cutoff = cutoff + [vocab_size] else: assert vocab_size == cutoff[ -1], 'cannot specify cutoff larger than vocab size' output_dim = cutoff[0] + len(cutoff) - 1 self.vocab_size = vocab_size self.cutoff = cutoff self.dropout_module = FairseqDropout(dropout, module_name=self.__class__.__name__) self.input_dim = input_dim self.factor = factor self.q_noise = q_noise self.qn_block_size = qn_block_size self.lsm = nn.LogSoftmax(dim=1) if adaptive_inputs is not None: self.head = TiedHeadModule(adaptive_inputs.weights_for_band(0), input_dim, len(cutoff) - 1, self.q_noise, self.qn_block_size) else: self.head = quant_noise(nn.Linear(input_dim, output_dim, bias=False), self.q_noise, self.qn_block_size) self._make_tail(adaptive_inputs, tie_proj) def init_weights(m): if hasattr(m, 'weight') and not isinstance(m, TiedLinear) and not isinstance(m, TiedHeadModule): nn.init.xavier_uniform_(m.weight) self.apply(init_weights) self.register_buffer('version', torch.LongTensor([1])) def _make_tail(self, adaptive_inputs=None, tie_proj=False): self.tail = nn.ModuleList() for i in range(len(self.cutoff) - 1): dim = int(self.input_dim // self.factor ** (i + 1)) tied_emb, tied_proj = adaptive_inputs.weights_for_band(i + 1) \ if adaptive_inputs is not None else (None, None) if tied_proj is not None: if tie_proj: proj = quant_noise(TiedLinear(tied_proj, transpose=True), self.q_noise, self.qn_block_size) else: proj = quant_noise(nn.Linear(tied_proj.size(0), tied_proj.size(1), bias=False), self.q_noise, self.qn_block_size) else: proj = quant_noise(nn.Linear(self.input_dim, dim, bias=False), self.q_noise, self.qn_block_size) if tied_emb is None: out_proj = nn.Linear(dim, self.cutoff[i + 1] - self.cutoff[i], bias=False) else: out_proj = TiedLinear(tied_emb, transpose=False) m = nn.Sequential( proj, nn.Dropout(self.dropout_module.p), quant_noise(out_proj, self.q_noise, self.qn_block_size), ) self.tail.append(m) def upgrade_state_dict_named(self, state_dict, name): version_name = name + '.version' if version_name not in state_dict: raise Exception('This version of the model is no longer supported') def adapt_target(self, target): """ In order to be efficient, the AdaptiveSoftMax does not compute the scores for all the word of the vocabulary for all the examples. It is thus necessary to call the method adapt_target of the AdaptiveSoftMax layer inside each forward pass. """ target = target.view(-1) new_target = [target.clone()] target_idxs = [] for i in range(len(self.cutoff) - 1): mask = target.ge(self.cutoff[i]).mul(target.lt(self.cutoff[i + 1])) new_target[0][mask] = self.cutoff[0] + i if mask.any(): target_idxs.append(mask.nonzero(as_tuple=False).squeeze(1)) new_target.append(target[mask].add(-self.cutoff[i])) else: target_idxs.append(None) new_target.append(None) return new_target, target_idxs def forward(self, input, target): """ Args: input: (b x t x d) target: (b x t) Returns: 2 lists: output for each cutoff section and new targets by cut off """ input = input.contiguous().view(-1, input.size(-1)) input = self.dropout_module(input) new_target, target_idxs = self.adapt_target(target) output = [self.head(input)] for i in range(len(target_idxs)): if target_idxs[i] is not None: output.append(self.tail[i](input.index_select(0, target_idxs[i]))) else: output.append(None) return output, new_target def get_log_prob(self, input, target): """ Computes the log probabilities for all the words of the vocabulary, given a 2D tensor of hidden vectors. """ bsz, length, dim = input.size() input = input.contiguous().view(-1, dim) if target is not None: _, target_idxs = self.adapt_target(target) else: target_idxs = None head_y = self.head(input) log_probs = head_y.new_zeros(input.size(0), self.vocab_size) head_sz = self.cutoff[0] + len(self.tail) log_probs[:, :head_sz] = self.lsm(head_y) tail_priors = log_probs[:, self.cutoff[0]: head_sz].clone() for i in range(len(self.tail)): start = self.cutoff[i] end = self.cutoff[i + 1] if target_idxs is None: tail_out = log_probs[:, start:end] tail_out.copy_(self.tail[i](input)) log_probs[:, start:end] = self.lsm(tail_out).add_(tail_priors[:, i, None]) elif target_idxs[i] is not None: idxs = target_idxs[i] tail_out = log_probs[idxs, start:end] tail_out.copy_(self.tail[i](input[idxs])) log_probs[idxs, start:end] = self.lsm(tail_out).add_(tail_priors[idxs, i, None]) log_probs = log_probs.view(bsz, length, -1) return log_probs
7,945
35.95814
137
py
RegularizedBN
RegularizedBN-main/fairseq/modules/fc_select.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn from .fc.wn import CWN from .fc.conv import Conv1d from .fc.dropout_fc import DropoutFC from .fc.oni_fc import ONI_Linear def parse_fc(fc_type): args = fc_type.split("_") return args def FcSelect(input_dim, output_dim, fc_type, layer_id=-1): args = parse_fc(fc_type) fc_type = args[0] #print(args);exit() if fc_type == "nnlinear": return nn.Linear(input_dim, output_dim) elif fc_type == "linear": if len(args)==1: return Linear(input_dim, output_dim) if len(args)==2: return Linear(input_dim, output_dim, scale=float(args[1])) if len(args)==3: return Linear(input_dim, output_dim, scale=float(args[1]), zero_bias=int(args[2])) else: print("error len linear!") elif fc_type == "dpfc": if args[-1]=='select': if args[-2].find(str(layer_id))>=0: return DropoutFC(input_dim, output_dim, dropout=float(args[1]),scale=float(args[2])) else: return DropoutFC(input_dim, output_dim, dropout=0.0,scale=float(args[2])) if len(args)==1: return DropoutFC(input_dim, output_dim, dropout=float(args[1])) if len(args)==2: return DropoutFC(input_dim, output_dim, dropout=float(args[1]), scale=float(args[2])) else: print("error len dpfc!") elif fc_type == "onifc": if args[-1]=='select': if args[-2].find(str(layer_id))>=0: return ONI_Linear(input_dim, output_dim) else: return Linear(input_dim, output_dim, scale=float(args[1])) if len(args)==1: return Linear(input_dim, output_dim, scale=float(args[1])) else: print("error len dpfc!") elif fc_type == "conv1d": return Conv1d(input_dim,output_dim,kernel_size=int(args[1])) elif fc_type == "wn": return CWN(input_dim, output_dim, iscenter=int(args[1]), Nscale=float(args[2]), adjustScale=int(args[3])) else: print("error FcSelect!") exit() #elif norm_type == 'power': # return MaskPowerNorm(embed_dim, group_num=head_num, warmup_iters=warmup_updates) def Linear(in_features, out_features, scale=1.0, zero_bias=True): m = nn.Linear(in_features, out_features, bias=True) nn.init.xavier_uniform_(m.weight,gain=scale) if zero_bias: nn.init.constant_(m.bias, 0.0) return m
2,654
38.044118
113
py
RegularizedBN
RegularizedBN-main/fairseq/modules/dropout_select.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn from .noise_dropout import NoiseDropout def parse_dropout(dropout_type): args = dropout_type.split("_") return args def DropoutSelect(dropout_type): args = parse_dropout(dropout_type) dropout_type = args[0] if dropout_type == "hard": return nn.Dropout(p=args[1]) elif dropout_type == "noise": return NoiseDropout(alpha=args[1]) else: print("error DropoutSelect!") exit()
642
25.791667
65
py
RegularizedBN
RegularizedBN-main/fairseq/modules/conv_tbc.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from torch.nn.modules.utils import _single class ConvTBC(torch.nn.Module): """1D convolution over an input of shape (time x batch x channel) The implementation uses gemm to perform the convolution. This implementation is faster than cuDNN for small kernel sizes. """ def __init__(self, in_channels, out_channels, kernel_size, padding=0): super(ConvTBC, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = _single(kernel_size) self.padding = _single(padding) self.weight = torch.nn.Parameter(torch.Tensor( self.kernel_size[0], in_channels, out_channels)) self.bias = torch.nn.Parameter(torch.Tensor(out_channels)) def forward(self, input): return torch.conv_tbc(input.contiguous(), self.weight, self.bias, self.padding[0]) def __repr__(self): s = ('{name}({in_channels}, {out_channels}, kernel_size={kernel_size}' ', padding={padding}') if self.bias is None: s += ', bias=False' s += ')' return s.format(name=self.__class__.__name__, **self.__dict__)
1,356
35.675676
90
py
RegularizedBN
RegularizedBN-main/fairseq/modules/transformer_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, List, Optional import torch import torch.nn as nn from torch.serialization import save from fairseq import utils from fairseq.modules import LayerNorm from fairseq.modules.norm.mask_identity import MaskIdentityNorm as InFcNorm from fairseq.modules.norm.mask_identity import MaskIdentityNorm as NoNorm #from fairseq.modules import CWN from fairseq.modules import NormSelect from fairseq.modules import FcSelect from fairseq.modules import ActivationSelect from fairseq.modules import MultiheadAttention, MultiheadAttentionSimple from fairseq.modules.quant_noise import quant_noise from fairseq.modules.fairseq_dropout import FairseqDropout from torch import Tensor import numpy as np from scipy import io import time from .statistics_utils import insert_probe from .statistics_utils import record_forward_weight_norm from .statistics_utils import save_attn_weights from .statistics_utils import save_residual_proportion from .statistics_init import init_config cond_name = ['c_max', 'c_20', 'c_50', 'c_80', 'r_total', 'r', 'intra_average_sim', 'inter_average_sim', 'total_average_sim'] class TransformerEncoderLayer(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ __count = 0 def update_cnt(self): TransformerEncoderLayer.__count += 1 def forward_hook(self,savefile): def forward_hook_template(m,i,o): input = i[0] self.input_norm[savefile].append(torch.norm(input,dim=(0,2))) return forward_hook_template def __init__(self, args): super().__init__() self.id = self.__count #对每个transformer_encoder设定唯一标识 self.update_cnt() self.orth_penalty = args.orth_penalty self.embed_dim = args.encoder_embed_dim self.quant_noise = getattr(args, "quant_noise_pq", 0) self.quant_noise_block_size = getattr(args, "quant_noise_pq_block_size", 8) self.self_attn = self.build_self_attention(self.embed_dim, args) #self.self_attn_layer_norm = NormSelect(args.encoder_att_norm,self.embed_dim, args.wseq, prefix = self.prefix) self.dropout_module = FairseqDropout(args.dropout, module_name=self.__class__.__name__) #self.dropout_module = FairseqDropout(args.encoder_dropout, module_name=self.__class__.__name__) #self.after_norm_dropout_module = nn.Dropout(p=args.encoder_after_norm_dropout) #self.activation_fn = utils.get_activation_fn( # activation=getattr(args, "activation_fn", "relu") #) self.activation_fn = ActivationSelect(args.encoder_activation) activation_dropout_p = getattr(args, "activation_dropout", 0) if activation_dropout_p == 0: # for backwards compatibility with models that use args.relu_dropout activation_dropout_p = getattr(args, "relu_dropout", 0) self.activation_dropout_module = FairseqDropout( float(activation_dropout_p), module_name=self.__class__.__name__ ) self.normalize_before = args.encoder_normalize_before #self.fc1 = nn.Linear(self.embed_dim, args.encoder_ffn_embed_dim) #self.fc2 = nn.Linear(args.encoder_ffn_embed_dim,self.embed_dim) self.fc1 = FcSelect(self.embed_dim, args.encoder_ffn_embed_dim, args.encoder_fc1, self.id) self.fc2 = FcSelect(args.encoder_ffn_embed_dim, self.embed_dim, args.encoder_fc2, self.id) init_config(self, args) self.self_attn_layer_norm = NormSelect(args.encoder_att_norm, self.embed_dim, self.id, self.prefix) self.in_ffn_norm = NormSelect(args.encoder_in_ffn_norm, args.encoder_ffn_embed_dim, self.id, self.prefix) #self.before_relu_nonorm = NormSelect("batch_nonorm", args.encoder_ffn_embed_dim, self.id, self.prefix) #self.after_relu_nonorm = NormSelect("batch_nonorm", args.encoder_ffn_embed_dim, self.id, self.prefix) self.final_layer_norm = NormSelect(args.encoder_ffn_norm, self.embed_dim, self.id, self.prefix) #m.register_parameter('weight_g',nn.parameter.Parameter(torch.ones(3,1)))可更改scale if self.id == -1: self.fc1.weight_v.register_hook(self.hook_v) self.fc1.weight_g.register_hook(self.hook_v) def orth_loss(self, w): d1,d2 = w.shape I = torch.eye(min(d1,d2)).cuda() if d1>d2: cov = torch.matmul(w.transpose(0,1),w) else: cov = torch.matmul(w,w.transpose(0,1)) cur_loss = ((cov-2*I)**2).sum() #print("orth loss:",cur_loss) return cur_loss def loss(self): assert self.training, "wrongly adding orthorgonal penalty!" loss = 0 if self.orth_penalty>0: loss += self.orth_penalty*self.orth_loss(self.fc1.weight) loss += self.orth_penalty*self.orth_loss(self.fc2.weight) return loss def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size): return quant_noise(Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size) def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size): return quant_noise(Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size) def build_self_attention(self, embed_dim, args): if args.encoder_attention=='origin': return MultiheadAttention( embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True, q_noise=self.quant_noise, qn_block_size=self.quant_noise_block_size, normalize_q=args.normalize_q, normalize_k=args.normalize_k, normalize_v=args.normalize_v, g0=args.g0, fix_g0=args.fix_g0, ) elif args.encoder_attention=='position': return MultiheadAttentionSimple( embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, normalize_q=args.normalize_q, normalize_k=args.normalize_k, normalize_v=args.normalize_v, g0=args.g0, fix_g0=args.fix_g0, ) else: print("error encoder attention!");exit() def upgrade_state_dict_named(self, state_dict, name): """ Rename layer norm states from `...layer_norms.0.weight` to `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to `...final_layer_norm.weight` """ layer_norm_map = {"0": "self_attn_layer_norm", "1": "final_layer_norm"} for old, new in layer_norm_map.items(): for m in ("weight", "bias"): k = "{}.layer_norms.{}.{}".format(name, old, m) if k in state_dict: state_dict["{}.{}.{}".format(name, new, m)] = state_dict[k] del state_dict[k] def forward(self, x, encoder_padding_mask, attn_mask: Optional[Tensor] = None, src_tokens=None): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, seq_len)` where padding elements are indicated by ``1``. attn_mask (ByteTensor): binary tensor of shape `(tgt_len, src_len)`, where `tgt_len` is the length of output and `src_len` is the length of input, though here both are equal to `seq_len`. `attn_mask[tgt_i, src_j] = 1` means that when calculating the embedding for `tgt_i`, we exclude (mask out) `src_j`. This is useful for strided self-attention. Returns: encoded output of shape `(seq_len, batch, embed_dim)` """ # anything in original attn_mask = 1, becomes -1e8 # anything in original attn_mask = 0, becomes 0 # Note that we cannot use -inf here, because at some edge cases, # the attention weight (before softmax) for some padded element in query # will become -inf, which results in NaN in model parameters #if(encoder_padding_mask.sum()!=0 and 0): #有时前边几个为True,剩余为False # print("has mask!",self.id) # print(encoder_padding_mask.sum()) # print(encoder_padding_mask) #exit() #if(attn_mask is not None and 0): #均为None # print("has att mask!",self.id) # print(attn_mask);exit() if self.training: self.step += 1 self.src_tokens = src_tokens self.mask = 1 - encoder_padding_mask.unsqueeze(dim=-1).type_as(x) if attn_mask is not None: attn_mask = attn_mask.masked_fill(attn_mask.to(torch.bool), -1e8) #record the weight matrix in FFN module record_forward_weight_norm(self) residual = x if self.normalize_before: position = 'att_norm_input' insert_probe(self, x, position) x = self.self_attn_layer_norm(x,encoder_padding_mask) #x = self.after_norm_dropout_module(x) #Attention Layer position = 'att_input' insert_probe(self, x, position) x, attn_weights = self.self_attn( query=x, key=x, value=x, key_padding_mask=encoder_padding_mask, attn_mask=attn_mask, ) save_attn_weights(self, attn_weights) position = 'att_output' insert_probe(self, x, position) x = self.dropout_module(x) #先子模块dropout,再与residual相加 save_residual_proportion(self, x, residual, module='att') x = residual + x if not self.normalize_before: position = 'att_norm_input' insert_probe(self, x, position) x = self.self_attn_layer_norm(x, encoder_padding_mask) #x = self.after_norm_dropout_module(x) #FFN residual = x if self.normalize_before: position = 'ffn_norm_input' insert_probe(self, x, position) x = self.final_layer_norm(x,encoder_padding_mask) #x = self.after_norm_dropout_module(x) position = 'ffn_input' insert_probe(self, x, position) x = self.fc1(x) position = 'before_relu' insert_probe(self, x, position) x = self.in_ffn_norm(x, encoder_padding_mask) #x = self.before_relu_nonorm(x, encoder_padding_mask) x = self.activation_fn(x) #x = self.after_relu_nonorm(x, encoder_padding_mask) position = 'after_relu' insert_probe(self, x, position) x = self.activation_dropout_module(x) x = self.fc2(x) position = 'ffn_output' insert_probe(self, x, position) x = self.dropout_module(x) save_residual_proportion(self, x, residual, module='ffn') x = residual + x if not self.normalize_before: position = 'ffn_norm_input' insert_probe(self, x, position) x = self.final_layer_norm(x, encoder_padding_mask) #x = self.after_norm_dropout_module(x) return x class TransformerDecoderLayer(nn.Module): """Decoder layer block. In the original paper each operation (multi-head attention, encoder attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.decoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ __count = 0 def update_cnt(self): TransformerDecoderLayer.__count += 1 def __init__( self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False ): super().__init__() self.id = self.__count #对每个transformer_encoder设定唯一标识 self.update_cnt() self.embed_dim = args.decoder_embed_dim self.dropout_module = FairseqDropout(args.dropout, module_name=self.__class__.__name__) #self.dropout_module = FairseqDropout(args.decoder_dropout, module_name=self.__class__.__name__) self.quant_noise = getattr(args, "quant_noise_pq", 0) self.quant_noise_block_size = getattr(args, "quant_noise_pq_block_size", 8) self.cross_self_attention = getattr(args, "cross_self_attention", False) self.self_attn = self.build_self_attention( self.embed_dim, args, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, ) self.activation_fn = utils.get_activation_fn( activation=getattr(args, "activation_fn", "relu") ) activation_dropout_p = getattr(args, "activation_dropout", 0) if activation_dropout_p == 0: # for backwards compatibility with models that use args.relu_dropout activation_dropout_p = getattr(args, "relu_dropout", 0) self.activation_dropout_module = FairseqDropout( float(activation_dropout_p), module_name=self.__class__.__name__) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, "char_inputs", False) #self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) #self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.self_attn_layer_norm = NormSelect(args.decoder_att_norm ,self.embed_dim) #self.self_attn_layer_norm = NoNorm(self.embed_dim) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = self.build_encoder_attention(self.embed_dim, args) #self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) #self.encoder_attn_layer_norm = LayerNorm(self.embed_dim) #self.encoder_attn_layer_norm = NoNorm(self.embed_dim) self.encoder_attn_layer_norm = NormSelect(args.decoder_cross_att_norm ,self.embed_dim) self.fc1 = FcSelect(self.embed_dim, args.decoder_ffn_embed_dim, args.decoder_fc1) self.in_ffn_norm = NormSelect(args.decoder_in_ffn_norm, args.decoder_ffn_embed_dim) self.fc2 = FcSelect(args.decoder_ffn_embed_dim, self.embed_dim, args.decoder_fc2) #self.fc1 = self.build_fc1( # self.embed_dim, args.decoder_ffn_embed_dim, self.quant_noise, self.quant_noise_block_size #) #self.fc2 = self.build_fc2( # args.decoder_ffn_embed_dim, self.embed_dim, self.quant_noise, self.quant_noise_block_size #) #self.final_layer_norm = LayerNorm(self.embed_dim, export=export) #self.final_layer_norm = LayerNorm(self.embed_dim) #self.final_layer_norm = NoNorm(self.embed_dim) self.final_layer_norm = NormSelect(args.decoder_ffn_norm ,self.embed_dim) self.need_attn = True self.onnx_trace = False def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size): return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size) def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size): return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size) def build_self_attention(self, embed_dim, args, add_bias_kv=False, add_zero_attn=False): return MultiheadAttention( embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=not getattr(args, "cross_self_attention", False), q_noise=self.quant_noise, qn_block_size=self.quant_noise_block_size, ) def build_encoder_attention(self, embed_dim, args): return MultiheadAttention( embed_dim, args.decoder_attention_heads, kdim=getattr(args, "encoder_embed_dim", None), vdim=getattr(args, "encoder_embed_dim", None), dropout=args.attention_dropout, encoder_decoder_attention=True, q_noise=self.quant_noise, qn_block_size=self.quant_noise_block_size, ) def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out: Optional[torch.Tensor] = None, encoder_padding_mask: Optional[torch.Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, prev_self_attn_state: Optional[List[torch.Tensor]] = None, prev_attn_state: Optional[List[torch.Tensor]] = None, self_attn_mask: Optional[torch.Tensor] = None, self_attn_padding_mask: Optional[torch.Tensor] = None, need_attn: bool = False, need_head_weights: bool = False, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor, optional): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. need_attn (bool, optional): return attention weights need_head_weights (bool, optional): return attention weights for each head (default: return average over heads). Returns: encoded output of shape `(seq_len, batch, embed_dim)` """ #if self.id==0: # print("decoding!") if need_head_weights: need_attn = True residual = x if self.normalize_before: #print(self_attn_padding_mask) #assert self_attn_padding_mask is not None, "wrong attn padding mask!" #为什么训练时会出现none? x = self.self_attn_layer_norm(x, self_attn_padding_mask) if prev_self_attn_state is not None: #false prev_key, prev_value = prev_self_attn_state[:2] saved_state: Dict[str, Optional[Tensor]] = { "prev_key": prev_key, "prev_value": prev_value, } if len(prev_self_attn_state) >= 3: saved_state["prev_key_padding_mask"] = prev_self_attn_state[2] assert incremental_state is not None self.self_attn._set_input_buffer(incremental_state, saved_state) _self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state) if self.cross_self_attention and not ( incremental_state is not None and _self_attn_input_buffer is not None and "prev_key" in _self_attn_input_buffer ): #false if self_attn_mask is not None: assert encoder_out is not None self_attn_mask = torch.cat( (x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1 ) if self_attn_padding_mask is not None: if encoder_padding_mask is None: assert encoder_out is not None encoder_padding_mask = self_attn_padding_mask.new_zeros( encoder_out.size(1), encoder_out.size(0) ) self_attn_padding_mask = torch.cat( (encoder_padding_mask, self_attn_padding_mask), dim=1 ) assert encoder_out is not None y = torch.cat((encoder_out, x), dim=0) else: y = x x, attn = self.self_attn( query=x, key=y, value=y, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = self.dropout_module(x) x = residual + x if not self.normalize_before: x = self.self_attn_layer_norm(x, self_attn_padding_mask) if self.encoder_attn is not None: #true residual = x if self.normalize_before: x = self.encoder_attn_layer_norm(x, self_attn_padding_mask) if prev_attn_state is not None: #false prev_key, prev_value = prev_attn_state[:2] saved_state: Dict[str, Optional[Tensor]] = { "prev_key": prev_key, "prev_value": prev_value, } if len(prev_attn_state) >= 3: saved_state["prev_key_padding_mask"] = prev_attn_state[2] assert incremental_state is not None self.encoder_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=need_attn or (not self.training and self.need_attn), need_head_weights=need_head_weights, ) x = self.dropout_module(x) x = residual + x if not self.normalize_before: x = self.encoder_attn_layer_norm(x, self_attn_padding_mask) residual = x if self.normalize_before: x = self.final_layer_norm(x, self_attn_padding_mask) x = self.fc1(x) x = self.in_ffn_norm(x,self_attn_padding_mask) #add decoder padding mask? x = self.activation_fn(x) x = self.activation_dropout_module(x) x = self.fc2(x) x = self.dropout_module(x) x = residual + x if not self.normalize_before: x = self.final_layer_norm(x, self_attn_padding_mask) if self.onnx_trace and incremental_state is not None: #false saved_state = self.self_attn._get_input_buffer(incremental_state) assert saved_state is not None if self_attn_padding_mask is not None: self_attn_state = [ saved_state["prev_key"], saved_state["prev_value"], saved_state["prev_key_padding_mask"], ] else: self_attn_state = [saved_state["prev_key"], saved_state["prev_value"]] return x, attn, self_attn_state return x, attn, None def make_generation_fast_(self, need_attn: bool = False, **kwargs): self.need_attn = need_attn def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.0) return m
24,120
40.162116
125
py
RegularizedBN
RegularizedBN-main/fairseq/modules/beamable_mm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn class BeamableMM(nn.Module): """This module provides an optimized MM for beam decoding with attention. It leverage the fact that the source-side of the input is replicated beam times and the target-side of the input is of width one. This layer speeds up inference by replacing the inputs {(bsz x 1 x nhu), (bsz x sz2 x nhu)} with smaller inputs {(bsz/beam x beam x nhu), (bsz/beam x sz2 x nhu)}. """ def __init__(self, beam_size=None): super(BeamableMM, self).__init__() self.beam_size = beam_size def forward(self, input1, input2): if ( not self.training and # test mode self.beam_size is not None and # beam size is set input1.dim() == 3 and # only support batched input input1.size(1) == 1 # single time step update ): bsz, beam = input1.size(0), self.beam_size # bsz x 1 x nhu --> bsz/beam x beam x nhu input1 = input1[:, 0, :].unfold(0, beam, beam).transpose(2, 1) # bsz x sz2 x nhu --> bsz/beam x sz2 x nhu input2 = input2.unfold(0, beam, beam)[:, :, :, 0] # use non batched operation if bsz = beam if input1.size(0) == 1: output = torch.mm(input1[0, :, :], input2[0, :, :]) else: output = input1.bmm(input2) return output.view(bsz, 1, -1) else: return input1.bmm(input2) def set_beam_size(self, beam_size): self.beam_size = beam_size
1,779
36.083333
80
py
RegularizedBN
RegularizedBN-main/fairseq/modules/multihead_attention_ori.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import Dict, Optional, Tuple import torch import torch.nn.functional as F from torch import Tensor, nn from torch.nn import Parameter from fairseq import utils from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.modules.fairseq_dropout import FairseqDropout from fairseq.modules.quant_noise import quant_noise @with_incremental_state class MultiheadAttention(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__( self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, q_noise=0.0, qn_block_size=8, ): super().__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim self.num_heads = num_heads self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.head_dim = embed_dim // num_heads assert ( self.head_dim * num_heads == self.embed_dim ), "embed_dim must be divisible by num_heads" self.scaling = self.head_dim ** -0.5 self.self_attention = self_attention self.encoder_decoder_attention = encoder_decoder_attention assert not self.self_attention or self.qkv_same_dim, ( "Self-attention requires query, key and " "value to be of the same size" ) self.k_proj = quant_noise(nn.Linear(self.kdim, embed_dim, bias=bias), q_noise, qn_block_size) self.v_proj = quant_noise(nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size) self.q_proj = quant_noise(nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size) self.out_proj = quant_noise(nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size) if add_bias_kv: self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) else: self.bias_k = self.bias_v = None self.add_zero_attn = add_zero_attn self.reset_parameters() self.onnx_trace = False self.tpu = False def prepare_for_onnx_export_(self): self.onnx_trace = True def prepare_for_tpu_(self, **kwargs): self.tpu = True def reset_parameters(self): if self.qkv_same_dim: # Empirically observed the convergence to be much better with # the scaled initialization nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2)) else: nn.init.xavier_uniform_(self.k_proj.weight) nn.init.xavier_uniform_(self.v_proj.weight) nn.init.xavier_uniform_(self.q_proj.weight) nn.init.xavier_uniform_(self.out_proj.weight) if self.out_proj.bias is not None: nn.init.constant_(self.out_proj.bias, 0.) if self.bias_k is not None: nn.init.xavier_normal_(self.bias_k) if self.bias_v is not None: nn.init.xavier_normal_(self.bias_v) def forward( self, query, key: Optional[Tensor], value: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, need_weights: bool = True, static_kv: bool = False, attn_mask: Optional[Tensor] = None, before_softmax: bool = False, need_head_weights: bool = False, ) -> Tuple[Tensor, Optional[Tensor]]: """Input shape: Time x Batch x Channel Args: key_padding_mask (ByteTensor, optional): mask to exclude keys that are pads, of shape `(batch, src_len)`, where padding elements are indicated by 1s. need_weights (bool, optional): return the attention weights, averaged over heads (default: False). attn_mask (ByteTensor, optional): typically used to implement causal attention, where the mask prevents the attention from looking forward in time (default: None). before_softmax (bool, optional): return the raw attention weights and values before the attention softmax. need_head_weights (bool, optional): return the attention weights for each head. Implies *need_weights*. Default: return the average attention weights over all heads. """ if need_head_weights: need_weights = True tgt_len, bsz, embed_dim = query.size() assert embed_dim == self.embed_dim assert list(query.size()) == [tgt_len, bsz, embed_dim] if ( not self.onnx_trace and not self.tpu # don't use PyTorch version on TPUs and incremental_state is None and not static_kv # A workaround for quantization to work. Otherwise JIT compilation # treats bias in linear module as method. and not torch.jit.is_scripting() ): assert key is not None and value is not None return F.multi_head_attention_forward( query, key, value, self.embed_dim, self.num_heads, torch.empty([0]), torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)), self.bias_k, self.bias_v, self.add_zero_attn, self.dropout_module.p, self.out_proj.weight, self.out_proj.bias, self.training or self.dropout_module.apply_during_inference, key_padding_mask, need_weights, attn_mask, use_separate_proj_weight=True, q_proj_weight=self.q_proj.weight, k_proj_weight=self.k_proj.weight, v_proj_weight=self.v_proj.weight, ) if incremental_state is not None: saved_state = self._get_input_buffer(incremental_state) if saved_state is not None and "prev_key" in saved_state: # previous time steps are cached - no need to recompute # key and value if they are static if static_kv: assert self.encoder_decoder_attention and not self.self_attention key = value = None else: saved_state = None if self.self_attention: q = self.q_proj(query) k = self.k_proj(query) v = self.v_proj(query) elif self.encoder_decoder_attention: # encoder-decoder attention q = self.q_proj(query) if key is None: assert value is None k = v = None else: k = self.k_proj(key) v = self.v_proj(key) else: assert key is not None and value is not None q = self.q_proj(query) k = self.k_proj(key) v = self.v_proj(value) q *= self.scaling if self.bias_k is not None: assert self.bias_v is not None k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)]) v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)]) if attn_mask is not None: attn_mask = torch.cat( [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1 ) if key_padding_mask is not None: key_padding_mask = torch.cat( [ key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1), ], dim=1, ) q = ( q.contiguous() .view(tgt_len, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if k is not None: k = ( k.contiguous() .view(-1, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if v is not None: v = ( v.contiguous() .view(-1, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if saved_state is not None: # saved states are stored with shape (bsz, num_heads, seq_len, head_dim) if "prev_key" in saved_state: _prev_key = saved_state["prev_key"] assert _prev_key is not None prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: k = prev_key else: assert k is not None k = torch.cat([prev_key, k], dim=1) if "prev_value" in saved_state: _prev_value = saved_state["prev_value"] assert _prev_value is not None prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: v = prev_value else: assert v is not None v = torch.cat([prev_value, v], dim=1) prev_key_padding_mask: Optional[Tensor] = None if "prev_key_padding_mask" in saved_state: prev_key_padding_mask = saved_state["prev_key_padding_mask"] assert k is not None and v is not None key_padding_mask = MultiheadAttention._append_prev_key_padding_mask( key_padding_mask=key_padding_mask, prev_key_padding_mask=prev_key_padding_mask, batch_size=bsz, src_len=k.size(1), static_kv=static_kv, ) saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim) saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim) saved_state["prev_key_padding_mask"] = key_padding_mask # In this branch incremental_state is never None assert incremental_state is not None incremental_state = self._set_input_buffer(incremental_state, saved_state) assert k is not None src_len = k.size(1) # This is part of a workaround to get around fork/join parallelism # not supporting Optional types. if key_padding_mask is not None and key_padding_mask.dim() == 0: key_padding_mask = None if key_padding_mask is not None: assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == src_len if self.add_zero_attn: assert v is not None src_len += 1 k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1) v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1) if attn_mask is not None: attn_mask = torch.cat( [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1 ) if key_padding_mask is not None: key_padding_mask = torch.cat( [ key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as( key_padding_mask ), ], dim=1, ) attn_weights = torch.bmm(q, k.transpose(1, 2)) attn_weights = MultiheadAttention.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz) assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len] if attn_mask is not None: attn_mask = attn_mask.unsqueeze(0) if self.onnx_trace: attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1) attn_weights += attn_mask if key_padding_mask is not None: # don't attend to padding symbols attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) if not self.tpu: attn_weights = attn_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf") ) else: attn_weights = attn_weights.transpose(0, 2) attn_weights = attn_weights.masked_fill(key_padding_mask, float('-inf')) attn_weights = attn_weights.transpose(0, 2) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if before_softmax: return attn_weights, v attn_weights_float = utils.softmax( attn_weights, dim=-1, onnx_trace=self.onnx_trace ) attn_weights = attn_weights_float.type_as(attn_weights) attn_probs = self.dropout_module(attn_weights) assert v is not None attn = torch.bmm(attn_probs, v) assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim] if self.onnx_trace and attn.size(1) == 1: # when ONNX tracing a single decoder step (sequence length == 1) # the transpose is a no-op copy before view, thus unnecessary attn = attn.contiguous().view(tgt_len, bsz, embed_dim) else: attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn = self.out_proj(attn) attn_weights: Optional[Tensor] = None if need_weights: attn_weights = attn_weights_float.view( bsz, self.num_heads, tgt_len, src_len ).transpose(1, 0) if not need_head_weights: # average attention weights over heads attn_weights = attn_weights.mean(dim=0) return attn, attn_weights @staticmethod def _append_prev_key_padding_mask( key_padding_mask: Optional[Tensor], prev_key_padding_mask: Optional[Tensor], batch_size: int, src_len: int, static_kv: bool, ) -> Optional[Tensor]: # saved key padding masks have shape (bsz, seq_len) if prev_key_padding_mask is not None and static_kv: new_key_padding_mask = prev_key_padding_mask elif prev_key_padding_mask is not None and key_padding_mask is not None: new_key_padding_mask = torch.cat( [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1 ) # During incremental decoding, as the padding token enters and # leaves the frame, there will be a time when prev or current # is None elif prev_key_padding_mask is not None: filler = torch.zeros( (batch_size, src_len - prev_key_padding_mask.size(1)), device=prev_key_padding_mask.device, ) new_key_padding_mask = torch.cat( [prev_key_padding_mask.float(), filler.float()], dim=1 ) elif key_padding_mask is not None: filler = torch.zeros( (batch_size, src_len - key_padding_mask.size(1)), device=key_padding_mask.device, ) new_key_padding_mask = torch.cat( [filler.float(), key_padding_mask.float()], dim=1 ) else: new_key_padding_mask = prev_key_padding_mask return new_key_padding_mask @torch.jit.export def reorder_incremental_state( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor ): """Reorder buffered internal state (for incremental generation).""" input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: for k in input_buffer.keys(): input_buffer_k = input_buffer[k] if input_buffer_k is not None: if self.encoder_decoder_attention and input_buffer_k.size(0) == new_order.size(0): break input_buffer[k] = input_buffer_k.index_select(0, new_order) incremental_state = self._set_input_buffer(incremental_state, input_buffer) return incremental_state def _get_input_buffer( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] ) -> Dict[str, Optional[Tensor]]: result = self.get_incremental_state(incremental_state, "attn_state") if result is not None: return result else: empty_result: Dict[str, Optional[Tensor]] = {} return empty_result def _set_input_buffer( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], buffer: Dict[str, Optional[Tensor]], ): return self.set_incremental_state(incremental_state, "attn_state", buffer) def apply_sparse_mask(attn_weights, tgt_len: int, src_len: int, bsz: int): return attn_weights def upgrade_state_dict_named(self, state_dict, name): prefix = name + "." if name != "" else "" items_to_add = {} keys_to_remove = [] for k in state_dict.keys(): if k.endswith(prefix + "in_proj_weight"): # in_proj_weight used to be q + k + v with same dimensions dim = int(state_dict[k].shape[0] / 3) items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim] items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim] items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :] keys_to_remove.append(k) k_bias = prefix + "in_proj_bias" if k_bias in state_dict.keys(): dim = int(state_dict[k].shape[0] / 3) items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim] items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][ dim : 2 * dim ] items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :] keys_to_remove.append(prefix + "in_proj_bias") for k in keys_to_remove: del state_dict[k] for key, value in items_to_add.items(): state_dict[key] = value
19,130
39.023013
103
py
RegularizedBN
RegularizedBN-main/fairseq/modules/layer_norm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F try: from apex.normalization import FusedLayerNorm as _FusedLayerNorm has_fused_layernorm = True class FusedLayerNorm(_FusedLayerNorm): @torch.jit.unused def forward(self, x): if not x.is_cuda: return super().forward(x) else: with torch.cuda.device(x.device): return super().forward(x) except ImportError: has_fused_layernorm = False def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False): if torch.jit.is_scripting(): export = True if not export and torch.cuda.is_available() and has_fused_layernorm: return FusedLayerNorm(normalized_shape, eps, elementwise_affine) return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine) class Fp32LayerNorm(nn.LayerNorm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def forward(self, input): output = F.layer_norm( input.float(), self.normalized_shape, self.weight.float() if self.weight is not None else None, self.bias.float() if self.bias is not None else None, self.eps, ) return output.type_as(input)
1,499
29
81
py
RegularizedBN
RegularizedBN-main/fairseq/modules/kmeans_vector_quantizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn from fairseq.modules import Fp32GroupNorm class KmeansVectorQuantizer(nn.Module): def __init__( self, dim, num_vars, groups, combine_groups, vq_dim, time_first, gamma=0.25 ): '''Vector quantization using straight pass-through estimator (i.e. kmeans) Args: dim: input dimension (channels) num_vars: number of quantized vectors per group groups: number of groups for vector quantization combine_groups: whether to use the vectors for all groups vq_dim: dimensionality of the resulting quantized vector time_first: if true, expect input in BxTxC format, otherwise in BxCxT gamma: commitment loss coefficient ''' super().__init__() self.groups = groups self.combine_groups = combine_groups self.input_dim = dim self.num_vars = num_vars self.vq_dim = vq_dim self.time_first = time_first assert ( vq_dim % groups == 0 ), f"dim {vq_dim} must be divisible by groups {groups} for concatenation" self.var_dim = vq_dim // groups num_groups = groups if not combine_groups else 1 self.embedding = nn.Parameter( 0.01 * torch.randn(num_vars, num_groups, self.var_dim) ) self.projection = nn.Sequential( nn.Conv1d(dim, dim, kernel_size=1, groups=groups, bias=False), Fp32GroupNorm(groups, dim), ) self.gamma = gamma self.mse_mean = nn.MSELoss(reduction="mean") def _pass_grad(self, x, y): """ Manually set gradient for backward pass. for y = f(x), ensure that during the backward pass, dL/dy = dL/dx regardless of f(x). Returns: y, with the gradient forced to be dL/dy = dL/dx. """ return y.detach() + (x - x.detach()) @property def expand_embedding(self): if self.combine_groups: return self.embedding.expand(self.num_vars, self.groups, self.var_dim) return self.embedding def forward_idx(self, x): res = self.forward(x, produce_targets=True) return res["x"], res["targets"] def forward(self, x, produce_targets=False): result = {"num_vars": self.num_vars} if self.time_first: x = x.transpose(1, 2) bsz, fsz, tsz = x.shape ze = self.projection(x) ze_ = ze.view(bsz, self.groups, self.var_dim, tsz).permute(0, 3, 1, 2) d = ( (ze_.unsqueeze(0) - self.expand_embedding.unsqueeze(1).unsqueeze(1)) .view(self.num_vars, bsz, tsz, self.groups, -1) .norm(dim=-1, p=2) ) idx = d.argmin(dim=0) zq = ( torch.stack( [ self.expand_embedding[idx[..., group], group] for group in range(self.groups) ], dim=-2, ) .view(bsz, tsz, self.groups * self.var_dim) .permute(0, 2, 1) ) assert ze.shape == zq.shape, (ze.shape, zq.shape) x = self._pass_grad(ze, zq) hard_x = ( idx.new_zeros(bsz*tsz*self.groups, self.num_vars) .scatter_(-1, idx.view(-1, 1), 1.0) .view(bsz * tsz, self.groups, -1) ) hard_probs = torch.mean(hard_x.float(), dim=0) result["code_perplexity"] = torch.exp( -torch.sum(hard_probs * torch.log(hard_probs + 1e-7), dim=-1) ).sum() if produce_targets: result["targets"] = idx if self.time_first: x = x.transpose(1, 2) # BCT -> BTC result["x"] = x ze = ze.float() zq = zq.float() latent_loss = self.mse_mean(zq, ze.detach()) commitment_loss = self.mse_mean(ze, zq.detach()) result["kmeans_loss"] = latent_loss + self.gamma * commitment_loss return result
4,248
31.937984
89
py
RegularizedBN
RegularizedBN-main/fairseq/modules/layer_drop.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ LayerDrop as described in https://arxiv.org/abs/1909.11556. """ import torch import torch.nn as nn class LayerDropModuleList(nn.ModuleList): """ A LayerDrop implementation based on :class:`torch.nn.ModuleList`. We refresh the choice of which layers to drop every time we iterate over the LayerDropModuleList instance. During evaluation we always iterate over all layers. Usage:: layers = LayerDropList(p=0.5, modules=[layer1, layer2, layer3]) for layer in layers: # this might iterate over layers 1 and 3 x = layer(x) for layer in layers: # this might iterate over all layers x = layer(x) for layer in layers: # this might not iterate over any layers x = layer(x) Args: p (float): probability of dropping out each layer modules (iterable, optional): an iterable of modules to add """ def __init__(self, p, modules=None): super().__init__(modules) self.p = p def __iter__(self): dropout_probs = torch.empty(len(self)).uniform_() for i, m in enumerate(super().__iter__()): if not self.training or (dropout_probs[i] > self.p): yield m
1,409
30.333333
71
py
RegularizedBN
RegularizedBN-main/fairseq/modules/dynamic_crf_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ This file is to re-implemented the low-rank and beam approximation of CRF layer Proposed by: Sun, Zhiqing, et al. Fast Structured Decoding for Sequence Models https://arxiv.org/abs/1910.11555 The CRF implementation is mainly borrowed from https://github.com/kmkurn/pytorch-crf/blob/master/torchcrf/__init__.py """ import numpy as np import torch import torch.nn as nn def logsumexp(x, dim=1): return torch.logsumexp(x.float(), dim=dim).type_as(x) class DynamicCRF(nn.Module): """Dynamic CRF layer is used to approximate the traditional Conditional Random Fields (CRF) $P(y | x) = 1/Z(x) exp(sum_i s(y_i, x) + sum_i t(y_{i-1}, y_i, x))$ where in this function, we assume the emition scores (s) are given, and the transition score is a |V| x |V| matrix $M$ in the following two aspects: (1) it used a low-rank approximation for the transition matrix: $M = E_1 E_2^T$ (2) it used a beam to estimate the normalizing factor Z(x) """ def __init__(self, num_embedding, low_rank=32, beam_size=64): super().__init__() self.E1 = nn.Embedding(num_embedding, low_rank) self.E2 = nn.Embedding(num_embedding, low_rank) self.vocb = num_embedding self.rank = low_rank self.beam = beam_size def extra_repr(self): return "vocab_size={}, low_rank={}, beam_size={}".format( self.vocb, self.rank, self.beam) def forward(self, emissions, targets, masks, beam=None): """ Compute the conditional log-likelihood of a sequence of target tokens given emission scores Args: emissions (`~torch.Tensor`): Emission score are usually the unnormalized decoder output ``(batch_size, seq_len, vocab_size)``. We assume batch-first targets (`~torch.LongTensor`): Sequence of target token indices ``(batch_size, seq_len) masks (`~torch.ByteTensor`): Mask tensor with the same size as targets Returns: `~torch.Tensor`: approximated log-likelihood """ numerator = self._compute_score(emissions, targets, masks) denominator = self._compute_normalizer(emissions, targets, masks, beam) return numerator - denominator def forward_decoder(self, emissions, masks=None, beam=None): """ Find the most likely output sequence using Viterbi algorithm. Args: emissions (`~torch.Tensor`): Emission score are usually the unnormalized decoder output ``(batch_size, seq_len, vocab_size)``. We assume batch-first masks (`~torch.ByteTensor`): Mask tensor with the same size as targets Returns: `~torch.LongTensor`: decoded sequence from the CRF model """ return self._viterbi_decode(emissions, masks, beam) def _compute_score(self, emissions, targets, masks=None): batch_size, seq_len = targets.size() emission_scores = emissions.gather(2, targets[:, :, None])[:, :, 0] # B x T transition_scores = (self.E1(targets[:, :-1]) * self.E2(targets[:, 1:])).sum(2) scores = emission_scores scores[:, 1:] += transition_scores if masks is not None: scores = scores * masks.type_as(scores) return scores.sum(-1) def _compute_normalizer(self, emissions, targets=None, masks=None, beam=None): # HACK: we include "target" which is a hueristic for training # HACK: we use a beam of tokens to approximate the normalizing factor (which is bad?) beam = beam if beam is not None else self.beam batch_size, seq_len = emissions.size()[:2] if targets is not None: _emissions = emissions.scatter(2, targets[:, :, None], np.float('inf')) beam_targets = _emissions.topk(beam, 2)[1] beam_emission_scores = emissions.gather(2, beam_targets) else: beam_emission_scores, beam_targets = emissions.topk(beam, 2) beam_transition_score1 = self.E1(beam_targets[:, :-1]) # B x (T-1) x K x D beam_transition_score2 = self.E2(beam_targets[:, 1:]) # B x (T-1) x K x D beam_transition_matrix = torch.bmm( beam_transition_score1.view(-1, beam, self.rank), beam_transition_score2.view(-1, beam, self.rank).transpose(1, 2)) beam_transition_matrix = beam_transition_matrix.view(batch_size, -1, beam, beam) # compute the normalizer in the log-space score = beam_emission_scores[:, 0] # B x K for i in range(1, seq_len): next_score = score[:, :, None] + beam_transition_matrix[:, i-1] next_score = logsumexp(next_score, dim=1) + beam_emission_scores[:, i] if masks is not None: score = torch.where(masks[:, i:i+1], next_score, score) else: score = next_score # Sum (log-sum-exp) over all possible tags return logsumexp(score, dim=1) def _viterbi_decode(self, emissions, masks=None, beam=None): # HACK: we use a beam of tokens to approximate the normalizing factor (which is bad?) beam = beam if beam is not None else self.beam batch_size, seq_len = emissions.size()[:2] beam_emission_scores, beam_targets = emissions.topk(beam, 2) beam_transition_score1 = self.E1(beam_targets[:, :-1]) # B x (T-1) x K x D beam_transition_score2 = self.E2(beam_targets[:, 1:]) # B x (T-1) x K x D beam_transition_matrix = torch.bmm( beam_transition_score1.view(-1, beam, self.rank), beam_transition_score2.view(-1, beam, self.rank).transpose(1, 2)) beam_transition_matrix = beam_transition_matrix.view(batch_size, -1, beam, beam) traj_tokens, traj_scores = [], [] finalized_tokens, finalized_scores = [], [] # compute the normalizer in the log-space score = beam_emission_scores[:, 0] # B x K dummy = torch.arange(beam, device=score.device).expand(*score.size()).contiguous() for i in range(1, seq_len): traj_scores.append(score) _score = score[:, :, None] + beam_transition_matrix[:, i-1] _score, _index = _score.max(dim=1) _score = _score + beam_emission_scores[:, i] if masks is not None: score = torch.where(masks[:, i: i+1], _score, score) index = torch.where(masks[:, i: i+1], _index, dummy) else: score, index = _score, _index traj_tokens.append(index) # now running the back-tracing and find the best best_score, best_index = score.max(dim=1) finalized_tokens.append(best_index[:, None]) finalized_scores.append(best_score[:, None]) for idx, scs in zip(reversed(traj_tokens), reversed(traj_scores)): previous_index = finalized_tokens[-1] finalized_tokens.append(idx.gather(1, previous_index)) finalized_scores.append(scs.gather(1, previous_index)) finalized_tokens.reverse() finalized_tokens = torch.cat(finalized_tokens, 1) finalized_tokens = beam_targets.gather(2, finalized_tokens[:, :, None])[:, :, 0] finalized_scores.reverse() finalized_scores = torch.cat(finalized_scores, 1) finalized_scores[:, 1:] = finalized_scores[:, 1:] - finalized_scores[:, :-1] return finalized_scores, finalized_tokens
7,676
40.497297
99
py
RegularizedBN
RegularizedBN-main/fairseq/modules/scalar_bias.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import torch class ScalarBias(torch.autograd.Function): """ Adds a vector of scalars, used in self-attention mechanism to allow the model to optionally attend to this vector instead of the past """ @staticmethod def forward(ctx, input, dim, bias_init): size = list(input.size()) size[dim] += 1 output = input.new(*size).fill_(bias_init) output.narrow(dim, 1, size[dim] - 1).copy_(input) ctx.dim = dim return output @staticmethod def backward(ctx, grad): return grad.narrow(ctx.dim, 1, grad.size(ctx.dim) - 1), None, None def scalar_bias(input, dim, bias_init=0): return ScalarBias.apply(input, dim, bias_init)
888
26.78125
74
py
RegularizedBN
RegularizedBN-main/fairseq/modules/transformer_sentence_encoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Optional, Tuple import torch import torch.nn as nn from fairseq.modules import ( FairseqDropout, LayerDropModuleList, LayerNorm, MultiheadAttention, PositionalEmbedding, TransformerSentenceEncoderLayer, ) from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_ def init_bert_params(module): """ Initialize the weights specific to the BERT Model. This overrides the default initializations depending on the specified arguments. 1. If normal_init_linear_weights is set then weights of linear layer will be initialized using the normal distribution and bais will be set to the specified value. 2. If normal_init_embed_weights is set then weights of embedding layer will be initialized using the normal distribution. 3. If normal_init_proj_weights is set then weights of in_project_weight for MultiHeadAttention initialized using the normal distribution (to be validated). """ if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=0.02) if module.bias is not None: module.bias.data.zero_() if isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=0.02) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() if isinstance(module, MultiheadAttention): module.q_proj.weight.data.normal_(mean=0.0, std=0.02) module.k_proj.weight.data.normal_(mean=0.0, std=0.02) module.v_proj.weight.data.normal_(mean=0.0, std=0.02) class TransformerSentenceEncoder(nn.Module): """ Implementation for a Bi-directional Transformer based Sentence Encoder used in BERT/XLM style pre-trained models. This first computes the token embedding using the token embedding matrix, position embeddings (if specified) and segment embeddings (if specified). After applying the specified number of TransformerEncoderLayers, it outputs all the internal states of the encoder as well as the final representation associated with the first token (usually CLS token). Input: - tokens: B x T matrix representing sentences - segment_labels: B x T matrix representing segment label for tokens Output: - a tuple of the following: - a list of internal model states used to compute the predictions where each tensor has shape T x B x C - sentence representation associated with first input token in format B x C. """ def __init__( self, padding_idx: int, vocab_size: int, num_encoder_layers: int = 6, embedding_dim: int = 768, ffn_embedding_dim: int = 3072, num_attention_heads: int = 8, dropout: float = 0.1, attention_dropout: float = 0.1, activation_dropout: float = 0.1, layerdrop: float = 0.0, max_seq_len: int = 256, num_segments: int = 2, use_position_embeddings: bool = True, offset_positions_by_padding: bool = True, encoder_normalize_before: bool = False, apply_bert_init: bool = False, activation_fn: str = "relu", learned_pos_embedding: bool = True, embed_scale: float = None, freeze_embeddings: bool = False, n_trans_layers_to_freeze: int = 0, export: bool = False, traceable: bool = False, q_noise: float = 0.0, qn_block_size: int = 8, ) -> None: super().__init__() self.padding_idx = padding_idx self.vocab_size = vocab_size self.dropout_module = FairseqDropout(dropout, module_name=self.__class__.__name__) self.layerdrop = layerdrop self.max_seq_len = max_seq_len self.embedding_dim = embedding_dim self.num_segments = num_segments self.use_position_embeddings = use_position_embeddings self.apply_bert_init = apply_bert_init self.learned_pos_embedding = learned_pos_embedding self.traceable = traceable self.tpu = False # whether we're on TPU self.embed_tokens = self.build_embedding( self.vocab_size, self.embedding_dim, self.padding_idx ) self.embed_scale = embed_scale if q_noise > 0: self.quant_noise = apply_quant_noise_( nn.Linear(self.embedding_dim, self.embedding_dim, bias=False), q_noise, qn_block_size, ) else: self.quant_noise = None self.segment_embeddings = ( nn.Embedding(self.num_segments, self.embedding_dim, padding_idx=None) if self.num_segments > 0 else None ) self.embed_positions = ( PositionalEmbedding( self.max_seq_len, self.embedding_dim, padding_idx=(self.padding_idx if offset_positions_by_padding else None), learned=self.learned_pos_embedding, ) if self.use_position_embeddings else None ) if self.layerdrop > 0.0: self.layers = LayerDropModuleList(p=self.layerdrop) else: self.layers = nn.ModuleList([]) self.layers.extend([ self.build_transformer_sentence_encoder_layer( embedding_dim=self.embedding_dim, ffn_embedding_dim=ffn_embedding_dim, num_attention_heads=num_attention_heads, dropout=self.dropout_module.p, attention_dropout=attention_dropout, activation_dropout=activation_dropout, activation_fn=activation_fn, export=export, q_noise=q_noise, qn_block_size=qn_block_size, ) for _ in range(num_encoder_layers) ]) if encoder_normalize_before: self.emb_layer_norm = LayerNorm(self.embedding_dim, export=export) else: self.emb_layer_norm = None # Apply initialization of model params after building the model if self.apply_bert_init: self.apply(init_bert_params) def freeze_module_params(m): if m is not None: for p in m.parameters(): p.requires_grad = False if freeze_embeddings: freeze_module_params(self.embed_tokens) freeze_module_params(self.segment_embeddings) freeze_module_params(self.embed_positions) freeze_module_params(self.emb_layer_norm) for layer in range(n_trans_layers_to_freeze): freeze_module_params(self.layers[layer]) def build_embedding(self, vocab_size, embedding_dim, padding_idx): return nn.Embedding(vocab_size, embedding_dim, padding_idx) def build_transformer_sentence_encoder_layer( self, embedding_dim, ffn_embedding_dim, num_attention_heads, dropout, attention_dropout, activation_dropout, activation_fn, export, q_noise, qn_block_size, ): return TransformerSentenceEncoderLayer( embedding_dim=embedding_dim, ffn_embedding_dim=ffn_embedding_dim, num_attention_heads=num_attention_heads, dropout=dropout, attention_dropout=attention_dropout, activation_dropout=activation_dropout, activation_fn=activation_fn, export=export, q_noise=q_noise, qn_block_size=qn_block_size, ) def prepare_for_tpu_(self, **kwargs): self.tpu = True def forward( self, tokens: torch.Tensor, segment_labels: torch.Tensor = None, last_state_only: bool = False, positions: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: # compute padding mask. This is needed for multi-head attention padding_mask = tokens.eq(self.padding_idx) if not self.traceable and not self.tpu and not padding_mask.any(): padding_mask = None x = self.embed_tokens(tokens) if self.embed_scale is not None: x *= self.embed_scale if self.embed_positions is not None: x += self.embed_positions(tokens, positions=positions) if self.segment_embeddings is not None and segment_labels is not None: x += self.segment_embeddings(segment_labels) if self.quant_noise is not None: x = self.quant_noise(x) if self.emb_layer_norm is not None: x = self.emb_layer_norm(x) x = self.dropout_module(x) # account for padding while computing the representation if padding_mask is not None: x *= 1 - padding_mask.unsqueeze(-1).type_as(x) # B x T x C -> T x B x C x = x.transpose(0, 1) inner_states = [] if not last_state_only: inner_states.append(x) for layer in self.layers: x, _ = layer(x, self_attn_padding_mask=padding_mask) if not last_state_only: inner_states.append(x) sentence_rep = x[0, :, :] if last_state_only: inner_states = [x] if self.traceable: return torch.stack(inner_states), sentence_rep else: return inner_states, sentence_rep
9,720
33.842294
90
py
RegularizedBN
RegularizedBN-main/fairseq/modules/grad_multiply.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch class GradMultiply(torch.autograd.Function): @staticmethod def forward(ctx, x, scale): ctx.scale = scale res = x.new(x) return res @staticmethod def backward(ctx, grad): return grad * ctx.scale, None
442
22.315789
65
py
RegularizedBN
RegularizedBN-main/fairseq/modules/sparse_transformer_sentence_encoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn from fairseq.modules import TransformerSentenceEncoder from fairseq.modules.sparse_transformer_sentence_encoder_layer import SparseTransformerSentenceEncoderLayer class SparseTransformerSentenceEncoder(TransformerSentenceEncoder): """ Sparse implementation of the TransformerSentenceEncoder - see SparseMultiheadAttention """ def __init__( self, padding_idx: int, vocab_size: int, num_encoder_layers: int = 6, embedding_dim: int = 768, ffn_embedding_dim: int = 3072, num_attention_heads: int = 8, dropout: float = 0.1, attention_dropout: float = 0.1, activation_dropout: float = 0.1, max_seq_len: int = 256, num_segments: int = 2, use_position_embeddings: bool = True, offset_positions_by_padding: bool = True, encoder_normalize_before: bool = False, apply_bert_init: bool = False, activation_fn: str = "relu", learned_pos_embedding: bool = True, embed_scale: float = None, freeze_embeddings: bool = False, n_trans_layers_to_freeze: int = 0, export: bool = False, is_bidirectional: bool = True, stride: int = 32, expressivity: int = 8, ) -> None: super().__init__( padding_idx, vocab_size, num_encoder_layers, embedding_dim, ffn_embedding_dim, num_attention_heads, dropout, attention_dropout, activation_dropout, max_seq_len, num_segments, use_position_embeddings, offset_positions_by_padding, encoder_normalize_before, apply_bert_init, activation_fn, learned_pos_embedding, embed_scale, freeze_embeddings, n_trans_layers_to_freeze, export ) self.layers = nn.ModuleList( [ SparseTransformerSentenceEncoderLayer( embedding_dim=self.embedding_dim, ffn_embedding_dim=ffn_embedding_dim, num_attention_heads=num_attention_heads, dropout=dropout, attention_dropout=attention_dropout, activation_dropout=activation_dropout, activation_fn=activation_fn, export=export, is_bidirectional=is_bidirectional, stride=stride, expressivity=expressivity, ) for _ in range(num_encoder_layers) ] ) def freeze_module_params(m): if m is not None: for p in m.parameters(): p.requires_grad = False for layer in range(n_trans_layers_to_freeze): freeze_module_params(self.layers[layer])
2,965
36.075
107
py
RegularizedBN
RegularizedBN-main/fairseq/modules/activation_select.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn def parse_activation(activation_type): args = activation_type.split("_") return args def ActivationSelect(activation_type): args = parse_activation(activation_type) activation_type = args[0] if activation_type == "relu": return nn.ReLU() elif activation_type == "leakyrelu": return nn.LeakyReLU(negative_slope=float(args[1])) #default 0.01 elif activation_type == "prelu": return nn.PReLU(init=float(args[1])) #default 0.25 elif activation_type =='celu': return nn.CELU(alpha=float(args[1])) #default 1 elif activation_type =='gelu': return nn.GELU() #no parameter elif activation_type =='elu': return nn.ELU(alpha=float(args[1])) #default 1 elif activation_type =='identity': return nn.Identity() elif activation_type =='sigmoid': return nn.Sigmoid() elif activation_type =='silu': #torch: 1.10才有 return nn.SiLU() elif activation_type =='tanh': return nn.Tanh() else: print("error ActivationSelect!") exit()
1,317
28.288889
65
py
RegularizedBN
RegularizedBN-main/fairseq/modules/sinusoidal_positional_embedding.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import Any, Optional import torch import torch.onnx.operators from fairseq import utils from torch import Tensor, nn class SinusoidalPositionalEmbedding(nn.Module): """This module produces sinusoidal positional embeddings of any length. Padding symbols are ignored. """ def __init__(self, embedding_dim, padding_idx, init_size=1024): super().__init__() self.embedding_dim = embedding_dim self.padding_idx = padding_idx self.weights = SinusoidalPositionalEmbedding.get_embedding( init_size, embedding_dim, padding_idx ) self.onnx_trace = False self.register_buffer("_float_tensor", torch.FloatTensor(1)) self.max_positions = int(1e5) def prepare_for_onnx_export_(self): self.onnx_trace = True @staticmethod def get_embedding( num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None ): """Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb) emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze( 1 ) * emb.unsqueeze(0) emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view( num_embeddings, -1 ) if embedding_dim % 2 == 1: # zero pad emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) if padding_idx is not None: emb[padding_idx, :] = 0 return emb def forward( self, input, incremental_state: Optional[Any] = None, timestep: Optional[Tensor] = None, positions: Optional[Any] = None, ): """Input is expected to be of size [bsz x seqlen].""" bspair = torch.onnx.operators.shape_as_tensor(input) bsz, seq_len = bspair[0], bspair[1] max_pos = self.padding_idx + 1 + seq_len if self.weights is None or max_pos > self.weights.size(0): # recompute/expand embeddings if needed self.weights = SinusoidalPositionalEmbedding.get_embedding( max_pos, self.embedding_dim, self.padding_idx ) self.weights = self.weights.to(self._float_tensor) if incremental_state is not None: # positions is the same for every token when decoding a single step pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len if self.onnx_trace: return ( self.weights.index_select(index=self.padding_idx + pos, dim=0) .unsqueeze(1) .repeat(bsz, 1, 1) ) return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1) positions = utils.make_positions( input, self.padding_idx, onnx_trace=self.onnx_trace ) if self.onnx_trace: flat_embeddings = self.weights.detach().index_select(0, positions.view(-1)) embedding_shape = torch.cat( (bsz.view(1), seq_len.view(1), torch.tensor([-1], dtype=torch.long)) ) embeddings = torch.onnx.operators.reshape_from_tensor_shape( flat_embeddings, embedding_shape ) return embeddings return ( self.weights.index_select(0, positions.view(-1)) .view(bsz, seq_len, -1) .detach() )
3,880
35.613208
87
py
RegularizedBN
RegularizedBN-main/fairseq/modules/lightweight_convolution.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.modules.unfold import unfold1d from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.modules.fairseq_dropout import FairseqDropout def LightweightConv(input_size, kernel_size=1, padding_l=None, num_heads=1, weight_dropout=0., weight_softmax=False, bias=False): if torch.cuda.is_available(): try: from fairseq.modules.lightconv_layer import LightconvLayer return LightconvLayer(input_size, kernel_size=kernel_size, padding_l=padding_l, num_heads=num_heads, weight_dropout=weight_dropout, weight_softmax=weight_softmax, bias=bias) except ImportError as e: print(e) return LightweightConv1dTBC(input_size, kernel_size=kernel_size, padding_l=padding_l, num_heads=num_heads, weight_dropout=weight_dropout, weight_softmax=weight_softmax, bias=bias) class LightweightConv1d(nn.Module): '''Lightweight Convolution assuming the input is BxCxT This is just an example that explains LightConv clearer than the TBC version. We don't use this module in the model. Args: input_size: # of channels of the input and output kernel_size: convolution channels padding: padding num_heads: number of heads used. The weight is of shape `(num_heads, 1, kernel_size)` weight_softmax: normalize the weight with softmax before the convolution Shape: Input: BxCxT, i.e. (batch_size, input_size, timesteps) Output: BxCxT, i.e. (batch_size, input_size, timesteps) Attributes: weight: the learnable weights of the module of shape `(num_heads, 1, kernel_size)` bias: the learnable bias of the module of shape `(input_size)` ''' def __init__(self, input_size, kernel_size=1, padding=0, num_heads=1, weight_softmax=False, bias=False, weight_dropout=0.): super().__init__() self.input_size = input_size self.kernel_size = kernel_size self.num_heads = num_heads self.padding = padding self.weight_softmax = weight_softmax self.weight = nn.Parameter(torch.Tensor(num_heads, 1, kernel_size)) if bias: self.bias = nn.Parameter(torch.Tensor(input_size)) else: self.bias = None self.weight_dropout_module = FairseqDropout(weight_dropout, module_name=self.__class__.__name__) self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.weight) if self.bias is not None: nn.init.constant_(self.bias, 0.) def forward(self, input): ''' input size: B x C x T output size: B x C x T ''' B, C, T = input.size() H = self.num_heads weight = self.weight if self.weight_softmax: weight = F.softmax(weight, dim=-1) weight = self.weight_dropout_module(weight) # Merge every C/H entries into the batch dimension (C = self.input_size) # B x C x T -> (B * C/H) x H x T # One can also expand the weight to C x 1 x K by a factor of C/H # and do not reshape the input instead, which is slow though input = input.view(-1, H, T) output = F.conv1d(input, weight, padding=self.padding, groups=self.num_heads) output = output.view(B, C, T) if self.bias is not None: output = output + self.bias.view(1, -1, 1) return output @with_incremental_state class LightweightConv1dTBC(nn.Module): '''Lightweight Convolution assuming the input is TxBxC Args: input_size: # of channels of the input kernel_size: convolution channels padding_l: padding to the left when using "same" padding num_heads: number of heads used. The weight is of shape (num_heads, 1, kernel_size) weight_dropout: the drop rate of the DropConnect to drop the weight weight_softmax: normalize the weight with softmax before the convolution bias: use bias Shape: Input: TxBxC, i.e. (timesteps, batch_size, input_size) Output: TxBxC, i.e. (timesteps, batch_size, input_size) Attributes: weight: the learnable weights of the module of shape `(num_heads, 1, kernel_size)` bias: the learnable bias of the module of shape `(input_size)` ''' def __init__(self, input_size, kernel_size=1, padding_l=None, num_heads=1, weight_dropout=0., weight_softmax=False, bias=False): super().__init__() self.input_size = input_size self.kernel_size = kernel_size self.padding_l = padding_l self.num_heads = num_heads self.weight_dropout_module = FairseqDropout(weight_dropout, module_name=self.__class__.__name__) self.weight_softmax = weight_softmax self.weight = nn.Parameter(torch.Tensor(num_heads, 1, kernel_size)) if bias: self.bias = nn.Parameter(torch.Tensor(input_size)) else: self.bias = None self.reset_parameters() self.onnx_trace = False def reset_parameters(self): nn.init.xavier_uniform_(self.weight) if self.bias is not None: nn.init.constant_(self.bias, 0.) def forward(self, x, incremental_state=None, unfold=False): '''Assuming the input, x, of the shape T x B x C and producing an output in the shape T x B x C args: x: Input of shape T x B x C, i.e. (timesteps, batch_size, input_size) incremental_state: A dict to keep the state unfold: unfold the input or not. If not, we use the matrix trick instead ''' unfold = unfold or (incremental_state is not None) if unfold: output = self._forward_unfolded(x, incremental_state) else: output = self._forward_expanded(x, incremental_state) if self.bias is not None: output = output + self.bias.view(1, 1, -1) return output def prepare_for_onnx_export_(self): self.onnx_trace = True def _forward_unfolded(self, x, incremental_state): '''The conventional implementation of convolutions. Unfolding the input by having a window shifting to the right.''' T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H assert R * H == C == self.input_size weight = self.weight.view(H, K) if incremental_state is not None: input_buffer = self._get_input_buffer(incremental_state) if input_buffer is None: input_buffer = x.new() x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3) if self.kernel_size > 1: self._set_input_buffer(incremental_state, x_unfold[:, :, :, -self.kernel_size+1:]) x_unfold = x_unfold.view(T*B*H, R, -1) else: # unfold the input: T x B x C --> T' x B x C x K x_unfold = unfold1d(x, self.kernel_size, self.padding_l, 0) x_unfold = x_unfold.view(T*B*H, R, K) if self.weight_softmax: weight = utils.softmax(weight, dim=1, onnx_trace=self.onnx_trace).type_as(weight) if incremental_state is not None: weight = weight[:, -x_unfold.size(2):] K = weight.size(1) weight = weight.view(1, H, K).expand(T*B, H, K).contiguous().view(T*B*H, K, 1) weight = self.weight_dropout_module(weight) output = torch.bmm(x_unfold, weight) # T*B*H x R x 1 output = output.view(T, B, C) return output def _forward_expanded(self, x, incremental_state): '''Turn the convolution filters into band matrices and do matrix multiplication. This is faster when the sequence is short, but less memory efficient. This is not used in the decoder during inference. ''' T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H assert R * H == C == self.input_size weight = self.weight.view(H, K) if self.weight_softmax: weight = utils.softmax(weight, dim=1, onnx_trace=self.onnx_trace).type_as(weight) weight = weight.view(1, H, K).expand(T*B, H, K).contiguous() weight = weight.view(T, B*H, K).transpose(0, 1) x = x.view(T, B*H, R).transpose(0, 1) P = self.padding_l if K > T and P == K-1: weight = weight.narrow(2, K-T, T) K, P = T, T-1 # turn the convolution filters into band matrices weight_expanded = weight.new_zeros(B*H, T, T+K-1, requires_grad=False) weight_expanded.as_strided((B*H, T, K), (T*(T+K-1), T+K, 1)).copy_(weight) weight_expanded = weight_expanded.narrow(2, P, T) weight_expanded = self.weight_dropout_module(weight_expanded) output = torch.bmm(weight_expanded, x) output = output.transpose(0, 1).contiguous().view(T, B, C) return output def reorder_incremental_state(self, incremental_state, new_order): input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: input_buffer = input_buffer.index_select(1, new_order) self._set_input_buffer(incremental_state, input_buffer) def _get_input_buffer(self, incremental_state): return utils.get_incremental_state(self, incremental_state, 'input_buffer') def _set_input_buffer(self, incremental_state, new_buffer): return utils.set_incremental_state(self, incremental_state, 'input_buffer', new_buffer) def extra_repr(self): s = '{}, kernel_size={}, padding_l={}, num_heads={}, weight_softmax={}, bias={}'.format( self.input_size, self.kernel_size, self.padding_l, self.num_heads, self.weight_softmax, self.bias is not None ) if self.weight_dropout_module.p > 0.: s += ', weight_dropout={}'.format(self.weight_dropout_module.p) return s
10,496
39.844358
104
py
RegularizedBN
RegularizedBN-main/fairseq/modules/dynamic_convolution.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from .unfold import unfold1d from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.modules.fairseq_dropout import FairseqDropout def DynamicConv(input_size, kernel_size=1, padding_l=None, num_heads=1, weight_dropout=0., weight_softmax=False, renorm_padding=False, bias=False, conv_bias=False, query_size=None, in_proj=False): if torch.cuda.is_available(): try: from fairseq.modules.dynamicconv_layer import DynamicconvLayer return DynamicconvLayer(input_size, kernel_size=kernel_size, padding_l=padding_l, num_heads=num_heads, weight_dropout=weight_dropout, weight_softmax=weight_softmax, bias=bias) except ImportError as e: print(e) return DynamicConv1dTBC(input_size, kernel_size=kernel_size, padding_l=padding_l, num_heads=num_heads, weight_dropout=weight_dropout, weight_softmax=weight_softmax, bias=bias) def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.) return m @with_incremental_state class DynamicConv1dTBC(nn.Module): '''Dynamic lightweight convolution taking T x B x C inputs Args: input_size: # of channels of the input kernel_size: convolution channels padding_l: padding to the left when using "same" padding num_heads: number of heads used. The weight is of shape (num_heads, 1, kernel_size) weight_dropout: the drop rate of the DropConnect to drop the weight weight_softmax: normalize the weight with softmax before the convolution renorm_padding: re-normalize the filters to ignore the padded part (only the non-padding parts sum up to 1) bias: use bias conv_bias: bias of the convolution query_size: specified when feeding a different input as the query in_proj: project the input and generate the filter together Shape: Input: TxBxC, i.e. (timesteps, batch_size, input_size) Output: TxBxC, i.e. (timesteps, batch_size, input_size) Attributes: weight: the learnable weights of the module of shape `(num_heads, 1, kernel_size)` bias: the learnable bias of the module of shape `(input_size)` ''' def __init__(self, input_size, kernel_size=1, padding_l=None, num_heads=1, weight_dropout=0., weight_softmax=False, renorm_padding=False, bias=False, conv_bias=False, query_size=None, in_proj=False): super().__init__() self.input_size = input_size self.query_size = input_size if query_size is None else query_size self.kernel_size = kernel_size self.padding_l = padding_l self.num_heads = num_heads self.weight_dropout_module = FairseqDropout(weight_dropout, module_name=self.__class__.__name__) self.weight_softmax = weight_softmax self.renorm_padding = renorm_padding if in_proj: self.weight_linear = Linear(self.input_size, self.input_size + num_heads * kernel_size * 1) else: self.weight_linear = Linear(self.query_size, num_heads * kernel_size * 1, bias=bias) if conv_bias: self.conv_bias = nn.Parameter(torch.Tensor(input_size)) else: self.conv_bias = None self.reset_parameters() @property def in_proj(self): return self.weight_linear.out_features == self.input_size + self.num_heads * self.kernel_size def reset_parameters(self): self.weight_linear.reset_parameters() if self.conv_bias is not None: nn.init.constant_(self.conv_bias, 0.) def forward(self, x, incremental_state=None, query=None, unfold=None): '''Assuming the input, x, of the shape T x B x C and producing an output in the shape T x B x C args: x: Input of shape T x B x C, i.e. (timesteps, batch_size, input_size) incremental_state: A dict to keep the state unfold: unfold the input or not. If not, we use the matrix trick instead query: use the specified query to predict the conv filters ''' unfold = x.size(0) > 512 if unfold is None else unfold # use unfold mode as default for long sequence to save memory unfold = unfold or (incremental_state is not None) assert query is None or not self.in_proj if query is None: query = x if unfold: output = self._forward_unfolded(x, incremental_state, query) else: output = self._forward_expanded(x, incremental_state, query) if self.conv_bias is not None: output = output + self.conv_bias.view(1, 1, -1) return output def _forward_unfolded(self, x, incremental_state, query): '''The conventional implementation of convolutions. Unfolding the input by having a window shifting to the right.''' T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H assert R * H == C == self.input_size if self.in_proj: proj = self.weight_linear(x) x = proj.narrow(2, 0, self.input_size).contiguous() weight = proj.narrow(2, self.input_size, H*K).contiguous().view(T*B*H, -1) else: weight = self.weight_linear(query).view(T*B*H, -1) # renorm_padding is only implemented in _forward_expanded assert not self.renorm_padding or incremental_state is not None if incremental_state is not None: input_buffer = self._get_input_buffer(incremental_state) if input_buffer is None: input_buffer = x.new() x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3) if self.kernel_size > 1: self._set_input_buffer(incremental_state, x_unfold[:, :, :, -self.kernel_size+1:]) x_unfold = x_unfold.view(T*B*H, R, -1) else: padding_l = self.padding_l if K > T and padding_l == K-1: weight = weight.narrow(1, K-T, T) K, padding_l = T, T-1 # unfold the input: T x B x C --> T' x B x C x K x_unfold = unfold1d(x, K, padding_l, 0) x_unfold = x_unfold.view(T*B*H, R, K) if self.weight_softmax and not self.renorm_padding: weight = F.softmax(weight, dim=1) weight = weight.narrow(1, 0, K) if incremental_state is not None: weight = weight[:, -x_unfold.size(2):] K = weight.size(1) if self.weight_softmax and self.renorm_padding: weight = F.softmax(weight, dim=1) weight = self.weight_dropout_module(weight, inplace=False) output = torch.bmm(x_unfold, weight.unsqueeze(2)) # T*B*H x R x 1 output = output.view(T, B, C) return output def _forward_expanded(self, x, incremental_stat, query): '''Turn the convolution filters into band matrices and do matrix multiplication. This is faster when the sequence is short, but less memory efficient. This is not used in the decoder during inference. ''' T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H assert R * H == C == self.input_size if self.in_proj: proj = self.weight_linear(x) x = proj.narrow(2, 0, self.input_size).contiguous() weight = proj.narrow(2, self.input_size, H*K).contiguous().view(T*B*H, -1) else: weight = self.weight_linear(query).view(T*B*H, -1) if not self.renorm_padding: if self.weight_softmax: weight = F.softmax(weight, dim=1) weight = self.weight_dropout_module(weight, inplace=False) weight = weight.narrow(1, 0, K).contiguous() weight = weight.view(T, B*H, K).transpose(0, 1) x = x.view(T, B*H, R).transpose(0, 1) if self.weight_softmax and self.renorm_padding: # turn the convolution filters into band matrices weight_expanded = weight.new(B*H, T, T+K-1).fill_(float('-inf')) weight_expanded.as_strided((B*H, T, K), (T*(T+K-1), T+K, 1)).copy_(weight) weight_expanded = weight_expanded.narrow(2, self.padding_l, T) # normalize the weight over valid positions like self-attention weight_expanded = F.softmax(weight_expanded, dim=2) weight_expanded = self.weight_dropout_module(weight_expanded, inplace=False) else: P = self.padding_l # For efficieny, we cut the kernel size and reduce the padding when the kernel is larger than the length if K > T and P == K-1: weight = weight.narrow(2, K-T, T) K, P = T, T-1 # turn the convolution filters into band matrices weight_expanded = weight.new_zeros(B*H, T, T+K-1, requires_grad=False) weight_expanded.as_strided((B*H, T, K), (T*(T+K-1), T+K, 1)).copy_(weight) weight_expanded = weight_expanded.narrow(2, P, T) # B*H x T x T output = torch.bmm(weight_expanded, x) output = output.transpose(0, 1).contiguous().view(T, B, C) return output def reorder_incremental_state(self, incremental_state, new_order): input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: input_buffer = input_buffer.index_select(1, new_order) self._set_input_buffer(incremental_state, input_buffer) def _get_input_buffer(self, incremental_state): return utils.get_incremental_state(self, incremental_state, 'input_buffer') def _set_input_buffer(self, incremental_state, new_buffer): return utils.set_incremental_state(self, incremental_state, 'input_buffer', new_buffer) def extra_repr(self): s = '{}, kernel_size={}, padding_l={}, num_heads={}, weight_softmax={}, conv_bias={}, renorm_padding={}, in_proj={}'.format( self.input_size, self.kernel_size, self.padding_l, self.num_heads, self.weight_softmax, self.conv_bias is not None, self.renorm_padding, self.in_proj, ) if self.query_size != self.input_size: s += ', query_size={}'.format(self.query_size) if self.weight_dropout_module.p > 0.: s += ', weight_dropout={}'.format(self.weight_dropout_module.p) return s
11,057
43.95122
132
py
RegularizedBN
RegularizedBN-main/fairseq/modules/noise_dropout.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from typing import List, Optional import torch.nn as nn import torch.nn.functional as F import torch logger = logging.getLogger(__name__) class NoiseDropout(nn.Module): def __init__(self, alpha): super().__init__() self.alpha = alpha def forward(self, x, inplace: bool = False): if self.training: coef = (2*torch.rand_like(x)-1).to(x) coef *= self.alpha x = x*(1+coef) return x else: return x
700
22.366667
65
py
RegularizedBN
RegularizedBN-main/fairseq/modules/norm/mask_powernorm3d.py
#! /usr/bin/env python3 # -*- coding: utf-8 -*- # File : MaskBatchNorm.py # Distributed under MIT License. import torch import torch.nn as nn import torch.nn.init as init import torch.nn.functional as F from torch.nn.parameter import Parameter from torch.nn.modules._functions import SyncBatchNorm as sync_batch_norm import numpy as np from scipy import io __all__ = ['MaskPowerNorm3d'] class MaskPowerNorm3d(nn.Module): """ An implementation of masked batch normalization, used for testing the numerical stability. """ __count = 0 def update_cnt(self): MaskPowerNorm3d.__count += 1 def __init__(self, num_features, eps=1e-5, momentum=0.05, \ affine=True, track_running_stats=True, sync_bn=True, process_group=None, \ with_seq = True, prefix = 'None', weight_affine=0, penalty_var=0, \ penalty_type='diff', learn_alpha=False, alpha=1): super().__init__() self.id = self.__count self.prefix = prefix #print(self.id) self.update_cnt() self.interval = 1100 #standard: 1100 #self.save_interval = 20 self.batchnum = 0 self.num_features = num_features self.eps = eps self.momentum = momentum #default0.05 self.affine = affine self.track_running_stats = track_running_stats self.maxlen = 600 self.with_seq = with_seq self.learn_alpha = learn_alpha if self.learn_alpha: self.alpha = Parameter(torch.Tensor(1)) nn.init.constant_(self.alpha, alpha) self.weight_affine = weight_affine self.penalty_var = penalty_var self.penalty_type = penalty_type assert self.penalty_type in ['diff','mag','reldiffnorm','reldiffcosine','symkl'], "wrong penalty type for BN!" #self.running_mean_list = [] #self.running_var_list = [] #self.batch_mean_list = [] #self.batch_var_list = [] self.batch_var_norm = [] self.running_var_norm = [] self.diff_var_norm = [] self.grad_proj_list = [] self.file = 'statistics/{}/bn_{}'.format(self.prefix,self.id) if self.affine: self.weight = Parameter(torch.Tensor(num_features)) self.bias = Parameter(torch.Tensor(num_features)) else: self.register_parameter('weight', None) self.register_parameter('bias', None) if self.track_running_stats: self.register_buffer('running_var', torch.ones(num_features)) self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long)) else: self.register_parameter('running_var', None) self.register_parameter('num_batches_tracked', None) self.sync_bn = sync_bn # gpu_size is set through DistributedDataParallel initialization. This is to ensure that SyncBatchNorm is used # under supported condition (single GPU per process) self.process_group = process_group self.ddp_gpu_size = 4 self.reset_parameters() def reset_running_stats(self): if self.track_running_stats: self.running_var.fill_(1) self.num_batches_tracked.zero_() def reset_parameters(self): self.reset_running_stats() if self.affine: init.ones_(self.weight) init.zeros_(self.bias) def extra_repr(self): return '{num_features}, eps={eps}, momentum={momentum}, affine={affine}, ' \ 'track_running_stats={track_running_stats}'.format(**self.__dict__) def record_forward(self): diff_var_data = (self.running_var-self.batch_var).data self.diff_var_norm.append(diff_var_data.norm().cpu().numpy().item()) self.batch_var_norm.append(self.batch_var.norm().cpu().numpy().item()) self.running_var_norm.append(self.running_var.norm().cpu().numpy().item()) if self.batchnum%self.interval==0: savefile = "{}_forward_{}.mat".format(self.file,self.batchnum//self.interval) d = {} diff_var = np.array(self.diff_var_norm) batch_var = np.array(self.batch_var_norm) running_var = np.array(self.running_var_norm) d['diff_var'] = diff_var d['running_var'] = running_var d['batch_var'] = batch_var io.savemat(savefile, d) self.batch_var_norm = [] self.running_var_norm = [] self.diff_var_norm = [] def backward_hook(self,grad): #B, T, C grad_proj = ((grad*self.mask)*self.x).sum(dim=(0,1))/self.sum_size**0.5 #self.grad_mean_list.append(grad_mean.data.cpu().reshape([1,-1])) #self.grad_proj_list.append(grad_proj.data.cpu().reshape([1,-1])) self.grad_proj_list.append(grad_proj.norm().cpu().numpy().item()) #print(grad_mean.shape,grad_proj.shape);exit() if self.batchnum%self.interval==0: savefile = "{}_backward_{}.mat".format(self.file,self.batchnum//self.interval) d = {} #grad_mean_arr = torch.cat(self.grad_mean_list,dim=0) #grad_proj_arr = torch.cat(self.grad_proj_list,dim=0) d['grad_proj'] = np.array(self.grad_proj_list) from scipy import io io.savemat(savefile, d) self.grad_proj_list = [] def loss(self): loss = 0 assert self.training==True, "wrongly adding BN inconsistent loss!" if self.penalty_var==0: return loss if self.penalty_type=='diff': loss = self.loss_diff(loss) if self.penalty_type=='mag': loss = self.loss_magnitude(loss) if self.penalty_type=='reldiffnorm': loss = self.loss_rel_diff_norm(loss) if self.penalty_type=='reldiffcosine': loss = self.loss_rel_diff_cosine(loss) if self.penalty_type=='symkl': loss = self.sym_kl_loss(loss) return loss def loss_diff(self, loss): if self.weight_affine: loss += self.penalty_var*((torch.abs(self.running_var-self.batch_var))*self.weight.detach()).sum() #print(loss) #loss: 初始十几,训了一小会,零点几,1,2的居多,偶尔有9 else: loss += self.penalty_var*(torch.abs(self.running_var-self.batch_var)).sum() return loss def loss_rel_diff_norm(self, loss): loss += self.penalty_var*(torch.abs(self.running_var-self.batch_var)/(self.running_var.norm(p=1)).sum()+1) return loss def loss_rel_diff_cosine(self, loss): loss -= self.penalty_var*(self.running_var*self.batch_var).sum()/self.batch_var.norm()/self.running_var.norm() #loss -= self.penalty_var*torch.sqrt((self.running_var*self.batch_var)).sum()/torch.sqrt(self.batch_var.sum()*self.running_var.sum()) return loss def loss_magnitude(self, loss): loss += self.penalty_var*(torch.abs(self.batch_var)).sum() return loss def sym_kl_loss(self,loss): item1 = self.running_var/(self.batch_var+self.eps)+self.batch_var/(self.running_var+self.eps) loss += self.penalty_mean*(item1.sum()) return loss def forward(self, input, pad_mask=None, is_encoder=False, update_run=True): """ input: T x B x C -> B x C x T : B x C x T -> T x B x C pad_mask: B x T (padding is True) """ T, B, C = input.shape input = input.contiguous() #print(input.shape) #21, 192, 512 input = input.permute(1,0,2) #T,B,C-->B,T,C #print(pad_mask.shape,input.shape)#torch.Size([192, 21]) torch.Size([192, 21, 512]) #print(~pad_mask);exit()#true: 有mask, false:无mask if pad_mask is not None: mask = 1-pad_mask.unsqueeze(dim=-1).type_as(input) else: mask = torch.ones(B,T,1).cuda() #if not self.training: # mask = torch.ones(B,T,1).cuda() #else: # print("error bn pad mask!") # print(self.id) # print(pad_mask) # exit() input = input*mask # Compute the sum and square-sum. sum_size = mask.sum() #print(sum_size,input.size(0)*input.size(1),sum_size/input.size(0)/input.size(1));exit() #4032 input_ssum = (input**2).sum(dim=(0,1),keepdim=True) # # Compute the statistics if self.training: self.batchnum += 1 self.mask = mask self.sum_size = sum_size #相当于求统计量的batch size bias_var = input_ssum/sum_size #用于计算running statistics self.batch_var = bias_var.squeeze() with torch.no_grad(): stat_var = bias_var.squeeze() self.running_var = (1 - self.momentum) * self.running_var + self.momentum * stat_var.data self.record_forward() else: bias_var = self.running_var input = (input)/torch.sqrt(bias_var.clamp(self.eps)) if self.training: #input.register_hook(self.backward_hook) pass if self.training: self.x = input.data*mask if self.affine: output = input*self.weight + self.bias else: output = input output = output.permute(1, 0, 2).contiguous() #B,T,C-->T,B,C return output
9,403
38.512605
141
py