text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
#!/bin/bash
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#echo 'Cloning Moses github repository (for tokenization scripts)...'
#git clone https://github.com/moses-smt/mosesdecoder.git
if [ -z $WORKDIR_ROOT ] ;
then
echo "please specify your working directory root in environment variable WORKDIR_ROOT. Exitting..."
exit
fi
data_root=${WORKDIR_ROOT}/iwsltv2
DESTDIR=${WORKDIR_ROOT}/ML50/raw
langs="ar_AR it_IT nl_XX ko_KR vi_VN"
echo "data_root: $data_root"
download_path=${data_root}/downloads
raw=${DESTDIR}
tmp=${data_root}/tmp
orig=${data_root}/orig
mkdir -p $download_path $orig $raw $tmp
#######################
download_iwslt(){
iwslt_key=$1
src=$2
tgt=$3
save_prefix=$4
pushd ${download_path}
if [[ ! -f ${save_prefix}$src-$tgt.tgz ]]; then
wget https://wit3.fbk.eu/archive/${iwslt_key}/texts/$src/$tgt/$src-$tgt.tgz -O ${save_prefix}$src-$tgt.tgz
[ $? -eq 0 ] && return 0
fi
popd
}
extract_iwslt(){
src=$1
tgt=$2
prefix=$3
pushd $orig
tar zxvf ${download_path}/${prefix}$src-${tgt}.tgz
popd
}
generate_train(){
lsrc=$1
ltgt=$2
src=${lsrc:0:2}
tgt=${ltgt:0:2}
for ll in $lsrc $ltgt; do
l=${ll:0:2}
f="$orig/*/train.tags.$src-$tgt.$l"
f_raw=$raw/train.$lsrc-$ltgt.$ll
cat $f \
| grep -v '<url>' \
| grep -v '<talkid>' \
| grep -v '<keywords>' \
| grep -v '<speaker>' \
| grep -v '<reviewer' \
| grep -v '<translator' \
| grep -v '<doc' \
| grep -v '</doc>' \
| sed -e 's/<title>//g' \
| sed -e 's/<\/title>//g' \
| sed -e 's/<description>//g' \
| sed -e 's/<\/description>//g' \
| sed 's/^\s*//g' \
| sed 's/\s*$//g' \
> $f_raw
[ $? -eq 0 ] && echo "extracted $f to $f_raw"
done
return 0
}
convert_valid_test(){
src=$1
tgt=$2
for l in $src $tgt; do
echo "lang: ${l}"
for o in `ls $orig/*/IWSLT*.TED*.$src-$tgt.$l.xml`; do
fname=${o##*/}
f=$tmp/${fname%.*}
echo "$o => $f"
grep '<seg id' $o \
| sed -e 's/<seg id="[0-9]*">\s*//g' \
| sed -e 's/\s*<\/seg>\s*//g' \
| sed -e "s/\’/\'/g" \
> $f
echo ""
done
done
}
generate_subset(){
lsrc=$1
ltgt=$2
src=${lsrc:0:2}
tgt=${ltgt:0:2}
subset=$3
prefix=$4
for ll in $lsrc $ltgt; do
l=${ll:0:2}
f=$tmp/$prefix.${src}-${tgt}.$l
if [[ -f $f ]]; then
cp $f $raw/$subset.${lsrc}-$ltgt.${ll}
fi
done
}
#################
echo "downloading iwslt training and dev data"
# using multilingual for it, nl
download_iwslt "2017-01-trnmted" DeEnItNlRo DeEnItNlRo
download_iwslt "2017-01-trnted" ar en
download_iwslt "2017-01-trnted" en ar
download_iwslt "2017-01-trnted" ko en
download_iwslt "2017-01-trnted" en ko
download_iwslt "2015-01" vi en
download_iwslt "2015-01" en vi
echo "donwloading iwslt test data"
download_iwslt "2017-01-mted-test" it en "test."
download_iwslt "2017-01-mted-test" en it "test."
download_iwslt "2017-01-mted-test" nl en "test."
download_iwslt "2017-01-mted-test" en nl "test."
download_iwslt "2017-01-ted-test" ar en "test."
download_iwslt "2017-01-ted-test" en ar "test."
download_iwslt "2017-01-ted-test" ko en "test."
download_iwslt "2017-01-ted-test" en ko "test."
download_iwslt "2015-01-test" vi en "test."
download_iwslt "2015-01-test" en vi "test."
echo "extract training data tar balls"
extract_iwslt DeEnItNlRo DeEnItNlRo
extract_iwslt ar en
extract_iwslt en ar
extract_iwslt ko en
extract_iwslt en ko
extract_iwslt vi en
extract_iwslt en vi
echo "extracting iwslt test data"
for lang in $langs; do
l=${lang:0:2}
extract_iwslt $l en "test."
extract_iwslt en $l "test."
done
echo "convert dev and test data"
for lang in $langs; do
s_lang=${lang:0:2}
convert_valid_test $s_lang en
convert_valid_test en $s_lang
done
echo "creating training data into $raw"
for lang in $langs; do
generate_train $lang en_XX
generate_train en_XX $lang
done
echo "creating iwslt dev data into raw"
generate_subset en_XX vi_VN valid "IWSLT15.TED.tst2013"
generate_subset vi_VN en_XX valid "IWSLT15.TED.tst2013"
generate_subset en_XX ar_AR valid "IWSLT17.TED.tst2016"
generate_subset ar_AR en_XX valid "IWSLT17.TED.tst2016"
generate_subset en_XX ko_KR valid "IWSLT17.TED.tst2016"
generate_subset ko_KR en_XX valid "IWSLT17.TED.tst2016"
generate_subset en_XX it_IT valid "IWSLT17.TED.tst2010"
generate_subset it_IT en_XX valid "IWSLT17.TED.tst2010"
generate_subset en_XX nl_XX valid "IWSLT17.TED.tst2010"
generate_subset nl_XX en_XX valid "IWSLT17.TED.tst2010"
echo "creating iswslt test data into raw"
generate_subset en_XX vi_VN test "IWSLT15.TED.tst2015"
generate_subset vi_VN en_XX test "IWSLT15.TED.tst2015"
generate_subset en_XX ar_AR test "IWSLT17.TED.tst2017"
generate_subset ar_AR en_XX test "IWSLT17.TED.tst2017"
generate_subset en_XX ko_KR test "IWSLT17.TED.tst2017"
generate_subset ko_KR en_XX test "IWSLT17.TED.tst2017"
generate_subset en_XX it_IT test "IWSLT17.TED.tst2017.mltlng"
generate_subset it_IT en_XX test "IWSLT17.TED.tst2017.mltlng"
generate_subset en_XX nl_XX test "IWSLT17.TED.tst2017.mltlng"
generate_subset nl_XX en_XX test "IWSLT17.TED.tst2017.mltlng"
# normalze iwslt directions into x-en
pushd $raw
for lang in $langs; do
for split in test valid; do
x_en_f1=$split.$lang-en_XX.en_XX
x_en_f2=$split.$lang-en_XX.${lang}
en_x_f1=$split.en_XX-$lang.en_XX
en_x_f2=$split.en_XX-$lang.${lang}
if [ -f $en_x_f1 ] && [ ! -f $x_en_f1 ]; then
echo "cp $en_x_f1 $x_en_f1"
cp $en_x_f1 $x_en_f1
fi
if [ -f $x_en_f2 ] && [ ! -f $x_en_f2 ]; then
echo "cp $en_x_f2 $x_en_f2"
cp $en_x_f2 $x_en_f2
fi
done
done
popd
|
COCO-LM/fairseq/examples/multilingual/data_scripts/download_iwslt_and_extract.sh/0
|
{
"file_path": "COCO-LM/fairseq/examples/multilingual/data_scripts/download_iwslt_and_extract.sh",
"repo_id": "COCO-LM",
"token_count": 3164
}
| 167 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Any, Dict, Optional, List, Tuple
import torch
import torch.nn as nn
from fairseq import metrics, utils
from fairseq.models import register_model, register_model_architecture
from fairseq.models.transformer import (
DEFAULT_MAX_SOURCE_POSITIONS,
DEFAULT_MAX_TARGET_POSITIONS,
TransformerDecoder,
TransformerEncoder,
TransformerModel,
base_architecture,
)
from torch import Tensor
logger = logging.getLogger(__name__)
@register_model("transformer_pointer_generator")
class TransformerPointerGeneratorModel(TransformerModel):
"""
Transformer model from `"Attention Is All You Need" (Vaswani et al, 2017)
<https://arxiv.org/abs/1706.03762>`_, augmented with a pointer-generator
network from `"Get To The Point: Summarization with Pointer-Generator
Networks" (See et al, 2017) <https://arxiv.org/abs/1704.04368>`_.
Args:
encoder (TransformerPointerGeneratorEncoder): the encoder
decoder (TransformerPointerGeneratorDecoder): the decoder
The Transformer pointer-generator model provides the following named
architectures and command-line arguments:
.. argparse::
:ref: fairseq.models.transformer_pointer_generator_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
TransformerModel.add_args(parser)
parser.add_argument('--alignment-heads', type=int, metavar='N',
help='number of attention heads to be used for '
'pointing')
parser.add_argument('--alignment-layer', type=int, metavar='I',
help='layer number to be used for pointing (0 '
'corresponding to the bottommost layer)')
parser.add_argument('--source-position-markers', type=int, metavar='N',
help='dictionary includes N additional items that '
'represent an OOV token at a particular input '
'position')
parser.add_argument('--force-generation', type=float, metavar='P',
default=None,
help='set the vocabulary distribution weight to P, '
'instead of predicting it from the input (1.0 '
'corresponding to generation, 0.0 to pointing)')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if args.encoder_layers_to_keep:
args.encoder_layers = len(args.encoder_layers_to_keep.split(","))
if args.decoder_layers_to_keep:
args.decoder_layers = len(args.decoder_layers_to_keep.split(","))
if getattr(args, "max_source_positions", None) is None:
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
if getattr(args, "source_position_markers", None) is None:
args.source_position_markers = args.max_source_positions
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
if src_dict != tgt_dict:
raise ValueError("Pointer-generator requires a joined dictionary")
def build_embedding(dictionary, embed_dim, path=None):
# The dictionary may include additional items that can be used in
# place of the normal OOV token and that all map to the same
# embedding. Using a different token for each input position allows
# one to restore the word identities from the original source text.
num_embeddings = len(dictionary) - args.source_position_markers
padding_idx = dictionary.pad()
unk_idx = dictionary.unk()
logger.info(
"dictionary indices from {0} to {1} will be mapped to {2}".format(
num_embeddings, len(dictionary) - 1, unk_idx
)
)
emb = Embedding(num_embeddings, embed_dim, padding_idx, unk_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
if args.share_all_embeddings:
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
"--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim"
)
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path
):
raise ValueError(
"--share-all-embeddings not compatible with --decoder-embed-path"
)
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = build_embedding(
tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
return cls(args, encoder, decoder)
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerPointerGeneratorEncoder(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerPointerGeneratorDecoder(args, tgt_dict, embed_tokens)
class TransformerPointerGeneratorEncoder(TransformerEncoder):
"""
Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`TransformerEncoderLayer`. The pointer-generator variant adds
the source tokens to the encoder output as these are otherwise not passed
to the decoder.
"""
def forward(
self,
src_tokens,
src_lengths: Optional[Tensor] = None,
return_all_hiddens: bool = False,
token_embeddings: Optional[Tensor] = None
):
"""
Runs the `forward()` method of the parent Transformer class. Then adds
the source tokens into the encoder output tuple.
While it might be more elegant that the model would pass the source
tokens to the `forward()` method of the decoder too, this would require
changes to `SequenceGenerator`.
Args:
src_tokens (torch.LongTensor): tokens in the source language of
shape `(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
token_embeddings (torch.Tensor, optional): precomputed embeddings
default `None` will recompute embeddings
Returns:
namedtuple:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_embedding** (Tensor): the (scaled) embedding lookup
of shape `(batch, src_len, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
- **src_tokens** (Tensor): input token ids of shape
`(batch, src_len)`
"""
encoder_out = self.forward_scriptable(src_tokens,
src_lengths,
return_all_hiddens,
token_embeddings)
# The Pytorch Mobile lite interpreter does not supports returning NamedTuple in
# `forward` so we use a dictionary instead.
# TorchScript does not support mixed values so the values are all lists.
# The empty list is equivalent to None.
return {
"encoder_out": encoder_out["encoder_out"], # T x B x C
"encoder_padding_mask": encoder_out["encoder_padding_mask"], # B x T
"encoder_embedding": encoder_out["encoder_embedding"], # B x T x C
"encoder_states": encoder_out["encoder_states"], # List[T x B x C]
"src_tokens": [src_tokens], # B x T
"src_lengths": [],
}
class TransformerPointerGeneratorDecoder(TransformerDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`. The pointer-generator variant mixes
the output probabilities with an attention distribution in the output layer.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
"""
def __init__(self, args, dictionary, embed_tokens):
super().__init__(args, dictionary, embed_tokens, no_encoder_attn=False)
# In the pointer-generator model these arguments define the decoder
# layer and the number of attention heads that will be averaged to
# create the alignment for pointing.
self.alignment_heads = args.alignment_heads
self.alignment_layer = args.alignment_layer
input_embed_dim = embed_tokens.embedding_dim
# Generation probabilities / interpolation coefficients are predicted
# from the current decoder input embedding and the decoder output, which
# is the size of output_embed_dim.
p_gen_input_size = input_embed_dim + self.output_embed_dim
self.project_p_gens = nn.Linear(p_gen_input_size, 1)
nn.init.zeros_(self.project_p_gens.bias)
# The dictionary may include a separate entry for an OOV token in each
# input position, so that their identity can be restored from the
# original source text.
self.num_types = len(dictionary)
self.num_oov_types = args.source_position_markers
self.num_embeddings = self.num_types - self.num_oov_types
self.force_p_gen = args.force_generation
def forward(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
alignment_layer: Optional[int] = 0,
alignment_heads: Optional[int] = 1,
src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention
incremental_state (dict, optional): dictionary used for storing
state during :ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False)
alignment_layer (int, optional): 0-based index of the layer to be
used for pointing (default: 0)
alignment_heads (int, optional): number of attention heads to be
used for pointing (default: 1)
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
# The normal Transformer model doesn't pass the alignment_layer and
# alignment_heads parameters correctly. We use our local variables.
x, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
alignment_layer=self.alignment_layer,
alignment_heads=self.alignment_heads,
)
if not features_only:
# Embedding the tokens again for generation probability prediction,
# so that we don't have to reimplement the whole extract_features()
# method.
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
prev_output_embed = self.embed_tokens(prev_output_tokens)
prev_output_embed *= self.embed_scale
predictors = torch.cat((prev_output_embed, x), 2)
p_gens = self.project_p_gens(predictors)
p_gens = torch.sigmoid(p_gens)
# Torchscript complains if encoder_out or attn are None because
# `output_layer()` signature expects tensors instead
attn: Optional[Tensor] = extra["attn"][0]
assert encoder_out is not None
assert attn is not None
x = self.output_layer(x, attn, encoder_out["src_tokens"][0], p_gens)
return x, extra
def output_layer(
self,
features: Tensor,
attn: Tensor,
src_tokens: Tensor,
p_gens: Tensor
) -> Tensor:
"""
Project features to the vocabulary size and mix with the attention
distributions.
"""
if self.force_p_gen is not None:
p_gens = self.force_p_gen
# project back to size of vocabulary
if self.adaptive_softmax is None:
logits = self.output_projection(features)
else:
logits = features
batch_size = logits.shape[0]
output_length = logits.shape[1]
assert logits.shape[2] == self.num_embeddings
assert src_tokens.shape[0] == batch_size
src_length = src_tokens.shape[1]
# The final output distribution will be a mixture of the normal output
# distribution (softmax of logits) and attention weights.
gen_dists = self.get_normalized_probs_scriptable(
(logits, None), log_probs=False, sample=None
)
gen_dists = torch.mul(gen_dists, p_gens)
padding_size = (batch_size, output_length, self.num_oov_types)
padding = gen_dists.new_zeros(padding_size)
gen_dists = torch.cat((gen_dists, padding), 2)
assert gen_dists.shape[2] == self.num_types
# Scatter attention distributions to distributions over the extended
# vocabulary in a tensor of shape [batch_size, output_length,
# vocab_size]. Each attention weight will be written into a location
# that is for other dimensions the same as in the index tensor, but for
# the third dimension it's the value of the index tensor (the token ID).
attn = torch.mul(attn, 1 - p_gens)
index = src_tokens[:, None, :]
index = index.expand(batch_size, output_length, src_length)
attn_dists_size = (batch_size, output_length, self.num_types)
attn_dists = attn.new_zeros(attn_dists_size)
attn_dists.scatter_add_(2, index, attn)
# Final distributions, [batch_size, output_length, num_types].
return gen_dists + attn_dists
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
"""
Get normalized probabilities (or log probs) from a net's output.
Pointer-generator network output is already normalized.
"""
probs = net_output[0]
# Make sure the probabilities are greater than zero when returning log
# probabilities.
return probs.clamp(1e-10, 1.0).log() if log_probs else probs
class Embedding(nn.Embedding):
r"""A simple lookup table that stores embeddings of a fixed dictionary and size.
This module is often used to store word embeddings and retrieve them using indices.
The input to the module is a list of indices, and the output is the corresponding
word embeddings. This subclass differs from the standard PyTorch Embedding class by
allowing additional vocabulary entries that will be mapped to the unknown token
embedding.
Args:
num_embeddings (int): size of the dictionary of embeddings
embedding_dim (int): the size of each embedding vector
padding_idx (int): Pads the output with the embedding vector at :attr:`padding_idx`
(initialized to zeros) whenever it encounters the index.
unk_idx (int): Maps all token indices that are greater than or equal to
num_embeddings to this index.
Attributes:
weight (Tensor): the learnable weights of the module of shape (num_embeddings, embedding_dim)
initialized from :math:`\mathcal{N}(0, 1)`
Shape:
- Input: :math:`(*)`, LongTensor of arbitrary shape containing the indices to extract
- Output: :math:`(*, H)`, where `*` is the input shape and :math:`H=\text{embedding\_dim}`
.. note::
Keep in mind that only a limited number of optimizers support
sparse gradients: currently it's :class:`optim.SGD` (`CUDA` and `CPU`),
:class:`optim.SparseAdam` (`CUDA` and `CPU`) and :class:`optim.Adagrad` (`CPU`)
.. note::
With :attr:`padding_idx` set, the embedding vector at
:attr:`padding_idx` is initialized to all zeros. However, note that this
vector can be modified afterwards, e.g., using a customized
initialization method, and thus changing the vector used to pad the
output. The gradient for this vector from :class:`~torch.nn.Embedding`
is always zero.
"""
__constants__ = ["unk_idx"]
# Torchscript: Inheriting from Embedding class produces an error when exporting to Torchscript
# -> RuntimeError: Unable to cast Python instance to C++ type (compile in debug mode for details
# It's happening because max_norm attribute from nn.Embedding is None by default and it cannot be
# cast to a C++ type
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx: Optional[int],
unk_idx: int,
max_norm: Optional[float] = float("inf"),
):
super().__init__(num_embeddings, embedding_dim, padding_idx=padding_idx, max_norm=max_norm)
self.unk_idx = unk_idx
nn.init.normal_(self.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(self.weight[padding_idx], 0)
def forward(self, input):
input = torch.where(
input >= self.num_embeddings, torch.ones_like(input) * self.unk_idx, input
)
return nn.functional.embedding(
input, self.weight, self.padding_idx, self.max_norm,
self.norm_type, self.scale_grad_by_freq, self.sparse
)
@register_model_architecture(
"transformer_pointer_generator", "transformer_pointer_generator"
)
def transformer_pointer_generator(args):
args.alignment_heads = getattr(args, "alignment_heads", 1)
args.alignment_layer = getattr(args, "alignment_layer", -1)
base_architecture(args)
if args.alignment_layer < 0:
args.alignment_layer = args.decoder_layers + args.alignment_layer
@register_model_architecture(
"transformer_pointer_generator", "transformer_pointer_generator_iwslt_de_en"
)
def transformer_pointer_generator_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.decoder_layers = getattr(args, "decoder_layers", 6)
transformer_pointer_generator(args)
@register_model_architecture(
"transformer_pointer_generator", "transformer_pointer_generator_wmt_en_de"
)
def transformer_pointer_generator_wmt_en_de(args):
transformer_pointer_generator(args)
# Transformer pointer-generator with the base Transformer parameters as used in
# the "Attention Is All You Need" paper (Vaswani et al., 2017)
@register_model_architecture(
"transformer_pointer_generator",
"transformer_pointer_generator_vaswani_wmt_en_de_big",
)
def transformer_pointer_generator_vaswani_wmt_en_de_big(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.3)
transformer_pointer_generator(args)
@register_model_architecture(
"transformer_pointer_generator",
"transformer_pointer_generator_vaswani_wmt_en_fr_big",
)
def transformer_pointer_generator_vaswani_wmt_en_fr_big(args):
args.dropout = getattr(args, "dropout", 0.1)
transformer_pointer_generator_vaswani_wmt_en_de_big(args)
@register_model_architecture(
"transformer_pointer_generator", "transformer_pointer_generator_wmt_en_de_big"
)
def transformer_pointer_generator_wmt_en_de_big(args):
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
transformer_pointer_generator_vaswani_wmt_en_de_big(args)
# default parameters used in tensor2tensor implementation
@register_model_architecture(
"transformer_pointer_generator", "transformer_pointer_generator_wmt_en_de_big_t2t"
)
def transformer_pointer_generator_wmt_en_de_big_t2t(args):
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_dropout = getattr(args, "activation_dropout", 0.1)
transformer_pointer_generator_vaswani_wmt_en_de_big(args)
|
COCO-LM/fairseq/examples/pointer_generator/pointer_generator_src/transformer_pg.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/pointer_generator/pointer_generator_src/transformer_pg.py",
"repo_id": "COCO-LM",
"token_count": 9904
}
| 168 |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import os
import re
class InputExample:
def __init__(self, paragraph, qa_list, label):
self.paragraph = paragraph
self.qa_list = qa_list
self.label = label
def get_examples(data_dir, set_type):
"""
Extract paragraph and question-answer list from each json file
"""
examples = []
levels = ["middle", "high"]
set_type_c = set_type.split("-")
if len(set_type_c) == 2:
levels = [set_type_c[1]]
set_type = set_type_c[0]
for level in levels:
cur_dir = os.path.join(data_dir, set_type, level)
for filename in os.listdir(cur_dir):
cur_path = os.path.join(cur_dir, filename)
with open(cur_path, "r") as f:
cur_data = json.load(f)
answers = cur_data["answers"]
options = cur_data["options"]
questions = cur_data["questions"]
context = cur_data["article"].replace("\n", " ")
context = re.sub(r"\s+", " ", context)
for i in range(len(answers)):
label = ord(answers[i]) - ord("A")
qa_list = []
question = questions[i]
for j in range(4):
option = options[i][j]
if "_" in question:
qa_cat = question.replace("_", option)
else:
qa_cat = " ".join([question, option])
qa_cat = re.sub(r"\s+", " ", qa_cat)
qa_list.append(qa_cat)
examples.append(InputExample(context, qa_list, label))
return examples
def main():
"""
Helper script to extract paragraphs questions and answers from RACE datasets.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--input-dir",
help="input directory for downloaded RACE dataset",
)
parser.add_argument(
"--output-dir",
help="output directory for extracted data",
)
args = parser.parse_args()
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir, exist_ok=True)
for set_type in ["train", "dev", "test-middle", "test-high"]:
examples = get_examples(args.input_dir, set_type)
qa_file_paths = [
os.path.join(args.output_dir, set_type + ".input" + str(i + 1))
for i in range(4)
]
qa_files = [open(qa_file_path, "w") for qa_file_path in qa_file_paths]
outf_context_path = os.path.join(args.output_dir, set_type + ".input0")
outf_label_path = os.path.join(args.output_dir, set_type + ".label")
outf_context = open(outf_context_path, "w")
outf_label = open(outf_label_path, "w")
for example in examples:
outf_context.write(example.paragraph + "\n")
for i in range(4):
qa_files[i].write(example.qa_list[i] + "\n")
outf_label.write(str(example.label) + "\n")
for f in qa_files:
f.close()
outf_label.close()
outf_context.close()
if __name__ == "__main__":
main()
|
COCO-LM/fairseq/examples/roberta/preprocess_RACE.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/roberta/preprocess_RACE.py",
"repo_id": "COCO-LM",
"token_count": 1679
}
| 169 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
from fairseq import registry
build_agent, register_agent, MONOTONIC_AGENT, _ = registry.setup_registry(
"--agent-type"
)
DEFAULT_EOS = "</s>"
GET = 0
SEND = 1
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
module = file[: file.find(".py")]
importlib.import_module("agents." + module)
|
COCO-LM/fairseq/examples/simultaneous_translation/eval/agents/__init__.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/simultaneous_translation/eval/agents/__init__.py",
"repo_id": "COCO-LM",
"token_count": 207
}
| 170 |
from functools import partial
import torch
import math
import torch.nn.functional as F
from . import register_monotonic_attention
from .monotonic_multihead_attention import (
MonotonicMultiheadAttentionWaitK,
MonotonicMultiheadAttentionHardAligned,
MonotonicMultiheadAttentionInfiniteLookback,
)
def fixed_pooling_monotonic_attention(monotonic_attention):
def create_model(monotonic_attention, klass):
class FixedStrideMonotonicAttention(monotonic_attention):
def __init__(self, args):
super().__init__(args)
self.pre_decision_type = args.fixed_pre_decision_type
self.pre_decision_ratio = args.fixed_pre_decision_ratio
self.pre_decision_pad_threshold = args.fixed_pre_decision_pad_threshold
if self.pre_decision_ratio == 1:
return
if args.fixed_pre_decision_type == "average":
self.pooling_layer = torch.nn.AvgPool1d(
kernel_size=self.pre_decision_ratio,
stride=self.pre_decision_ratio,
ceil_mode=True,
)
elif args.fixed_pre_decision_type == "last":
def last(key):
if key.size(2) < self.pre_decision_ratio:
return key
else:
k = key[
:,
:,
self.pre_decision_ratio - 1 :: self.pre_decision_ratio,
].contiguous()
if key.size(-1) % self.pre_decision_ratio != 0:
k = torch.cat([k, key[:, :, -1:]], dim=-1).contiguous()
return k
self.pooling_layer = last
else:
raise NotImplementedError
@staticmethod
def add_args(parser):
super(
FixedStrideMonotonicAttention, FixedStrideMonotonicAttention
).add_args(parser)
parser.add_argument(
"--fixed-pre-decision-ratio",
type=int,
required=True,
help=(
"Ratio for the fixed pre-decision,"
"indicating how many encoder steps will start"
"simultaneous decision making process."
),
)
parser.add_argument(
"--fixed-pre-decision-type",
default="average",
choices=["average", "last"],
help="Pooling type",
)
parser.add_argument(
"--fixed-pre-decision-pad-threshold",
type=float,
default=0.3,
help="If a part of the sequence has pad"
",the threshold the pooled part is a pad.",
)
def insert_zeros(self, x):
bsz_num_heads, tgt_len, src_len = x.size()
stride = self.pre_decision_ratio
weight = F.pad(x.new_ones(1, 1, 1), (stride - 1, 0))
x_upsample = F.conv_transpose1d(
x.view(-1, src_len).unsqueeze(1),
weight,
stride=stride,
padding=0,
)
return x_upsample.squeeze(1).view(bsz_num_heads, tgt_len, -1)
def p_choose(
self,
query,
key,
key_padding_mask=None,
incremental_state=None,
**extra_args
):
src_len = key.size(0)
tgt_len = query.size(0)
batch_size = query.size(1)
if self.pre_decision_ratio == 1:
return super().p_choose(
query,
key,
key_padding_mask=None,
incremental_state=None,
**extra_args
)
key_pool = self.pooling_layer(key.transpose(0, 2)).transpose(0, 2)
if key_padding_mask is not None:
key_padding_mask_pool = (
self.pooling_layer(key_padding_mask.unsqueeze(0).float())
.squeeze(0)
.gt(self.pre_decision_pad_threshold)
)
# Make sure at least one element is not pad
key_padding_mask_pool[:, 0] = 0
else:
key_padding_mask_pool = None
if incremental_state is not None:
# The floor instead of ceil is used for inference
# But make sure the length key_pool at least 1
if (
max(1, math.floor(key.size(0) / self.pre_decision_ratio))
) < key_pool.size(0):
key_pool = key_pool[:-1]
if key_padding_mask_pool is not None:
key_padding_mask_pool = key_padding_mask_pool[:-1]
p_choose_pooled = super().p_choose(
query,
key_pool,
key_padding_mask_pool,
incremental_state=incremental_state,
)
# Upsample, interpolate zeros
p_choose = self.insert_zeros(p_choose_pooled)
if p_choose.size(-1) < src_len:
# Append zeros if the upsampled p_choose is shorter than src_len
p_choose = torch.cat(
[
p_choose,
p_choose.new_zeros(
p_choose.size(0),
tgt_len,
src_len - p_choose.size(-1)
)
],
dim=2
)
else:
# can be larger than src_len because we used ceil before
p_choose = p_choose[:, :, :src_len]
p_choose[:, :, -1] = p_choose_pooled[:, :, -1]
assert list(p_choose.size()) == [
batch_size * self.num_heads,
tgt_len,
src_len,
]
return p_choose
FixedStrideMonotonicAttention.__name__ = klass.__name__
return FixedStrideMonotonicAttention
return partial(create_model, monotonic_attention)
@register_monotonic_attention("waitk_fixed_pre_decision")
@fixed_pooling_monotonic_attention(MonotonicMultiheadAttentionWaitK)
class MonotonicMultiheadAttentionWaitkFixedStride:
pass
@register_monotonic_attention("hard_aligned_fixed_pre_decision")
@fixed_pooling_monotonic_attention(MonotonicMultiheadAttentionHardAligned)
class MonotonicMultiheadAttentionHardFixedStride:
pass
@register_monotonic_attention("infinite_lookback_fixed_pre_decision")
@fixed_pooling_monotonic_attention(MonotonicMultiheadAttentionInfiniteLookback)
class MonotonicMultiheadAttentionInfiniteLookbackFixedStride:
pass
|
COCO-LM/fairseq/examples/simultaneous_translation/modules/fixed_pre_decision.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/simultaneous_translation/modules/fixed_pre_decision.py",
"repo_id": "COCO-LM",
"token_count": 4429
}
| 171 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Replabel transforms for use with flashlight's ASG criterion.
"""
def replabel_symbol(i):
"""
Replabel symbols used in flashlight, currently just "1", "2", ...
This prevents training with numeral tokens, so this might change in the future
"""
return str(i)
def pack_replabels(tokens, dictionary, max_reps):
"""
Pack a token sequence so that repeated symbols are replaced by replabels
"""
if len(tokens) == 0 or max_reps <= 0:
return tokens
replabel_value_to_idx = [0] * (max_reps + 1)
for i in range(1, max_reps + 1):
replabel_value_to_idx[i] = dictionary.index(replabel_symbol(i))
result = []
prev_token = -1
num_reps = 0
for token in tokens:
if token == prev_token and num_reps < max_reps:
num_reps += 1
else:
if num_reps > 0:
result.append(replabel_value_to_idx[num_reps])
num_reps = 0
result.append(token)
prev_token = token
if num_reps > 0:
result.append(replabel_value_to_idx[num_reps])
return result
def unpack_replabels(tokens, dictionary, max_reps):
"""
Unpack a token sequence so that replabels are replaced by repeated symbols
"""
if len(tokens) == 0 or max_reps <= 0:
return tokens
replabel_idx_to_value = {}
for i in range(1, max_reps + 1):
replabel_idx_to_value[dictionary.index(replabel_symbol(i))] = i
result = []
prev_token = -1
for token in tokens:
try:
for _ in range(replabel_idx_to_value[token]):
result.append(prev_token)
prev_token = -1
except KeyError:
result.append(token)
prev_token = token
return result
|
COCO-LM/fairseq/examples/speech_recognition/data/replabels.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/speech_recognition/data/replabels.py",
"repo_id": "COCO-LM",
"token_count": 853
}
| 172 |
# Speech-to-Text (S2T) Modeling
[https://www.aclweb.org/anthology/2020.aacl-demo.6](https://www.aclweb.org/anthology/2020.aacl-demo.6.pdf)
Speech recognition (ASR) and speech-to-text translation (ST) with fairseq.
## Data Preparation
S2T modeling data consists of source speech features, target text and other optional information
(source text, speaker id, etc.). Fairseq S2T uses per-dataset-split TSV manifest files
to store these information. Each data field is represented by a column in the TSV file.
Unlike text token embeddings, speech features (e.g. log mel-scale filter banks) are usually fixed
during model training and can be pre-computed. The manifest file contains the path to
either the feature file in NumPy format or the WAV/FLAC audio file. For the latter,
features will be extracted on-the-fly by fairseq S2T. Optionally, feature/audio files can be packed
into uncompressed ZIP files (then accessed via byte offset and length) to improve I/O performance.
Fairseq S2T also employs a YAML file for data related configurations: tokenizer type and dictionary path
for the target text, feature transforms such as CMVN (cepstral mean and variance normalization) and SpecAugment,
temperature-based resampling, etc.
## Model Training
Fairseq S2T uses the unified `fairseq-train` interface for model training. It requires arguments `--task speech_to_text`,
`--arch <model architecture in fairseq.models.speech_to_text.*>` and `--config-yaml <config YAML filename>`.
## Inference & Evaluation
Fairseq S2T uses the unified `fairseq-generate`/`fairseq-interactive` interface for inference and evaluation. It
requires arguments `--task speech_to_text` and `--config-yaml <config YAML filename>`. The interactive console takes
audio paths (one per line) as inputs.
## Examples
- [Speech Recognition (ASR) on LibriSpeech](docs/librispeech_example.md)
- [Speech-to-Text Translation (ST) on MuST-C](docs/mustc_example.md)
- [Speech-to-Text Translation (ST) on CoVoST 2](docs/covost_example.md)
- [Speech-to-Text Translation (ST) on Multilingual TEDx](docs/mtedx_example.md)
- [Simultaneous Speech-to-Text Translation (SimulST) on MuST-C](docs/simulst_mustc_example.md)
## Updates
- 02/04/2021: Added interactive decoding (`fairseq-interactive`) support. Examples:
[ASR (LibriSpeech)](docs/librispeech_example.md#interactive-decoding)
and [ST (CoVoST 2)](docs/covost_example.md#interactive-decoding).
- 01/08/2021: Several fixes for S2T Transformer model, inference-time de-tokenization, scorer configuration and data
preparation scripts. We also add pre-trained models to the examples and revise the instructions.
Breaking changes: the data preparation scripts now extract filterbank features without CMVN. CMVN is instead applied
on-the-fly (defined in the config YAML).
## What's Next
- We are migrating the old fairseq [ASR example](../speech_recognition) into this S2T framework and
merging the features from both sides.
- The following papers also base their experiments on fairseq S2T. We are adding more examples for replication.
- [Improving Cross-Lingual Transfer Learning for End-to-End Speech Recognition with Speech Translation (Wang et al., 2020)](https://arxiv.org/abs/2006.05474)
- [Self-Supervised Representations Improve End-to-End Speech Translation (Wu et al., 2020)](https://arxiv.org/abs/2006.12124)
- [Self-Training for End-to-End Speech Translation (Pino et al., 2020)](https://arxiv.org/abs/2006.02490)
- [CoVoST: A Diverse Multilingual Speech-To-Text Translation Corpus (Wang et al., 2020)](https://arxiv.org/abs/2002.01320)
- [Harnessing Indirect Training Data for End-to-End Automatic Speech Translation: Tricks of the Trade (Pino et al., 2019)](https://arxiv.org/abs/1909.06515)
## Citation
Please cite as:
```
@inproceedings{wang2020fairseqs2t,
title = {fairseq S2T: Fast Speech-to-Text Modeling with fairseq},
author = {Changhan Wang and Yun Tang and Xutai Ma and Anne Wu and Dmytro Okhonko and Juan Pino},
booktitle = {Proceedings of the 2020 Conference of the Asian Chapter of the Association for Computational Linguistics (AACL): System Demonstrations},
year = {2020},
}
@inproceedings{ott2019fairseq,
title = {fairseq: A Fast, Extensible Toolkit for Sequence Modeling},
author = {Myle Ott and Sergey Edunov and Alexei Baevski and Angela Fan and Sam Gross and Nathan Ng and David Grangier and Michael Auli},
booktitle = {Proceedings of NAACL-HLT 2019: Demonstrations},
year = {2019},
}
```
|
COCO-LM/fairseq/examples/speech_to_text/README.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/speech_to_text/README.md",
"repo_id": "COCO-LM",
"token_count": 1350
}
| 173 |
#!/bin/bash
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
SRCS=(
"de"
"fr"
)
TGT=en
ROOT=$(dirname "$0")
SCRIPTS=$ROOT/../../scripts
SPM_TRAIN=$SCRIPTS/spm_train.py
SPM_ENCODE=$SCRIPTS/spm_encode.py
BPESIZE=16384
ORIG=$ROOT/iwslt17_orig
DATA=$ROOT/iwslt17.de_fr.en.bpe16k
mkdir -p "$ORIG" "$DATA"
TRAIN_MINLEN=1 # remove sentences with <1 BPE token
TRAIN_MAXLEN=250 # remove sentences with >250 BPE tokens
URLS=(
"https://wit3.fbk.eu/archive/2017-01-trnted/texts/de/en/de-en.tgz"
"https://wit3.fbk.eu/archive/2017-01-trnted/texts/fr/en/fr-en.tgz"
)
ARCHIVES=(
"de-en.tgz"
"fr-en.tgz"
)
VALID_SETS=(
"IWSLT17.TED.dev2010.de-en IWSLT17.TED.tst2010.de-en IWSLT17.TED.tst2011.de-en IWSLT17.TED.tst2012.de-en IWSLT17.TED.tst2013.de-en IWSLT17.TED.tst2014.de-en IWSLT17.TED.tst2015.de-en"
"IWSLT17.TED.dev2010.fr-en IWSLT17.TED.tst2010.fr-en IWSLT17.TED.tst2011.fr-en IWSLT17.TED.tst2012.fr-en IWSLT17.TED.tst2013.fr-en IWSLT17.TED.tst2014.fr-en IWSLT17.TED.tst2015.fr-en"
)
# download and extract data
for ((i=0;i<${#URLS[@]};++i)); do
ARCHIVE=$ORIG/${ARCHIVES[i]}
if [ -f "$ARCHIVE" ]; then
echo "$ARCHIVE already exists, skipping download"
else
URL=${URLS[i]}
wget -P "$ORIG" "$URL"
if [ -f "$ARCHIVE" ]; then
echo "$URL successfully downloaded."
else
echo "$URL not successfully downloaded."
exit 1
fi
fi
FILE=${ARCHIVE: -4}
if [ -e "$FILE" ]; then
echo "$FILE already exists, skipping extraction"
else
tar -C "$ORIG" -xzvf "$ARCHIVE"
fi
done
echo "pre-processing train data..."
for SRC in "${SRCS[@]}"; do
for LANG in "${SRC}" "${TGT}"; do
cat "$ORIG/${SRC}-${TGT}/train.tags.${SRC}-${TGT}.${LANG}" \
| grep -v '<url>' \
| grep -v '<talkid>' \
| grep -v '<keywords>' \
| grep -v '<speaker>' \
| grep -v '<reviewer' \
| grep -v '<translator' \
| grep -v '<doc' \
| grep -v '</doc>' \
| sed -e 's/<title>//g' \
| sed -e 's/<\/title>//g' \
| sed -e 's/<description>//g' \
| sed -e 's/<\/description>//g' \
| sed 's/^\s*//g' \
| sed 's/\s*$//g' \
> "$DATA/train.${SRC}-${TGT}.${LANG}"
done
done
echo "pre-processing valid data..."
for ((i=0;i<${#SRCS[@]};++i)); do
SRC=${SRCS[i]}
VALID_SET=(${VALID_SETS[i]})
for ((j=0;j<${#VALID_SET[@]};++j)); do
FILE=${VALID_SET[j]}
for LANG in "$SRC" "$TGT"; do
grep '<seg id' "$ORIG/${SRC}-${TGT}/${FILE}.${LANG}.xml" \
| sed -e 's/<seg id="[0-9]*">\s*//g' \
| sed -e 's/\s*<\/seg>\s*//g' \
| sed -e "s/\’/\'/g" \
> "$DATA/valid${j}.${SRC}-${TGT}.${LANG}"
done
done
done
# learn BPE with sentencepiece
TRAIN_FILES=$(for SRC in "${SRCS[@]}"; do echo $DATA/train.${SRC}-${TGT}.${SRC}; echo $DATA/train.${SRC}-${TGT}.${TGT}; done | tr "\n" ",")
echo "learning joint BPE over ${TRAIN_FILES}..."
python "$SPM_TRAIN" \
--input=$TRAIN_FILES \
--model_prefix=$DATA/sentencepiece.bpe \
--vocab_size=$BPESIZE \
--character_coverage=1.0 \
--model_type=bpe
# encode train/valid
echo "encoding train with learned BPE..."
for SRC in "${SRCS[@]}"; do
python "$SPM_ENCODE" \
--model "$DATA/sentencepiece.bpe.model" \
--output_format=piece \
--inputs $DATA/train.${SRC}-${TGT}.${SRC} $DATA/train.${SRC}-${TGT}.${TGT} \
--outputs $DATA/train.bpe.${SRC}-${TGT}.${SRC} $DATA/train.bpe.${SRC}-${TGT}.${TGT} \
--min-len $TRAIN_MINLEN --max-len $TRAIN_MAXLEN
done
echo "encoding valid with learned BPE..."
for ((i=0;i<${#SRCS[@]};++i)); do
SRC=${SRCS[i]}
VALID_SET=(${VALID_SETS[i]})
for ((j=0;j<${#VALID_SET[@]};++j)); do
python "$SPM_ENCODE" \
--model "$DATA/sentencepiece.bpe.model" \
--output_format=piece \
--inputs $DATA/valid${j}.${SRC}-${TGT}.${SRC} $DATA/valid${j}.${SRC}-${TGT}.${TGT} \
--outputs $DATA/valid${j}.bpe.${SRC}-${TGT}.${SRC} $DATA/valid${j}.bpe.${SRC}-${TGT}.${TGT}
done
done
|
COCO-LM/fairseq/examples/translation/prepare-iwslt17-multilingual.sh/0
|
{
"file_path": "COCO-LM/fairseq/examples/translation/prepare-iwslt17-multilingual.sh",
"repo_id": "COCO-LM",
"token_count": 2341
}
| 174 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import sys
def _normalize_spaces(line):
return " ".join(line.split())
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_file", required=True, type=str)
parser.add_argument("-n", "--repeat_times", required=True, type=int)
parser.add_argument("-o", "--output_file", required=False, type=str)
args = parser.parse_args()
stream = open(args.output_file, "w") if args.output_file else sys.stdout
for line in open(args.input_file):
for _ in range(args.repeat_times):
stream.write(_normalize_spaces(line) + "\n")
if __name__ == "__main__":
main()
|
COCO-LM/fairseq/examples/unsupervised_quality_estimation/repeat_lines.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/unsupervised_quality_estimation/repeat_lines.py",
"repo_id": "COCO-LM",
"token_count": 296
}
| 175 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Helper script to pre-compute embeddings for a flashlight (previously called wav2letter++) dataset
"""
import argparse
import os
def main():
parser = argparse.ArgumentParser()
parser.add_argument("tsv")
parser.add_argument("--output-dir", required=True)
parser.add_argument("--output-name", required=True)
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
transcriptions = {}
with open(args.tsv, "r") as tsv, open(
os.path.join(args.output_dir, args.output_name + ".ltr"), "w"
) as ltr_out, open(
os.path.join(args.output_dir, args.output_name + ".wrd"), "w"
) as wrd_out:
root = next(tsv).strip()
for line in tsv:
line = line.strip()
dir = os.path.dirname(line)
if dir not in transcriptions:
parts = dir.split(os.path.sep)
trans_path = f"{parts[-2]}-{parts[-1]}.trans.txt"
path = os.path.join(root, dir, trans_path)
assert os.path.exists(path)
texts = {}
with open(path, "r") as trans_f:
for tline in trans_f:
items = tline.strip().split()
texts[items[0]] = " ".join(items[1:])
transcriptions[dir] = texts
part = os.path.basename(line).split(".")[0]
assert part in transcriptions[dir]
print(transcriptions[dir][part], file=wrd_out)
print(
" ".join(list(transcriptions[dir][part].replace(" ", "|"))) + " |",
file=ltr_out,
)
if __name__ == "__main__":
main()
|
COCO-LM/fairseq/examples/wav2vec/libri_labels.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/wav2vec/libri_labels.py",
"repo_id": "COCO-LM",
"token_count": 888
}
| 176 |
/*
Copyright (c) Microsoft Corporation.
Licensed under the MIT License.
*/
/*
Kernel implementation for blocking repeated n-grams.
*/
#include <cuda.h>
#include <cuda_runtime.h>
#include <math.h>
#include <torch/extension.h>
#include <vector>
// Ban repeated ngrams of length = 'no_repeat_ngram_size'
__global__ void banRepeatedTokens(long* __restrict__ tokens,
float* __restrict__ lprobs,
int max_predict_len, int vocab_size,
int no_repeat_ngram_size) {
auto row = blockIdx.x;
auto col = threadIdx.x;
auto start = row * (max_predict_len) + col;
// Each thread compares ngram starting from
// thread index with final ngram starting from
// step - no_repeat_ngram_size +2
auto check_start_pos = blockDim.x;
auto lprob_start = row * vocab_size;
bool is_banned = true;
extern __shared__ long tokens_shm[];
tokens_shm[col] = tokens[start];
if (col == blockDim.x - 1) {
for (int i=1; i<no_repeat_ngram_size; i++){
if (col+i < max_predict_len){
tokens_shm[col + i] = tokens[start + i];
}
}
}
__syncthreads();
for (int k = 0; k < no_repeat_ngram_size - 1; k++) {
if (tokens_shm[col + k] != tokens_shm[check_start_pos + k]) {
is_banned = false;
}
}
if (is_banned == true) {
auto token_to_be_banned = tokens_shm[col + no_repeat_ngram_size - 1];
lprobs[lprob_start + token_to_be_banned] = -INFINITY;
}
}
// Allocate blocks and threads based on
// batch size and sequence length and launch
// kernel
torch::Tensor ngram_repeat_block_cuda_forward(const torch::Tensor tokens,
torch::Tensor lprobs, int bsz,
int step, int beam_size,
int no_repeat_ngram_size) {
int threads = step - no_repeat_ngram_size + 2;
if (threads <= 0) return lprobs;
int max_predict_len = tokens.size(1);
int vocab_size = lprobs.size(1);
auto token_ptr = tokens.data_ptr<long>();
auto lprob_ptr = lprobs.data_ptr<float>();
int blocks = bsz * beam_size;
int shared_mem_size = (step + 1) * sizeof(long);
// Launching N blocks where N is number of samples in a batch (beams*bsz)
// Launching T threads where T is number of previous ngrams in a sample
// Allocating shared mem per block for fastser access of input tokens since
// each token will be accessed N times to compare with current Ngram where
// N is Ngram size.
banRepeatedTokens<<<blocks, threads, shared_mem_size>>>(
token_ptr, lprob_ptr, max_predict_len, vocab_size, no_repeat_ngram_size);
return lprobs;
}
|
COCO-LM/fairseq/fairseq/clib/cuda/ngram_repeat_block_cuda_kernel.cu/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/clib/cuda/ngram_repeat_block_cuda_kernel.cu",
"repo_id": "COCO-LM",
"token_count": 1159
}
| 177 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass, field
from typing import Dict, List
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
logger = logging.getLogger(__name__)
@dataclass
class ModelCriterionConfig(FairseqDataclass):
loss_weights: Dict[str, float] = field(
default_factory=dict,
metadata={"help": "weights for the loss terms"},
)
log_keys: List[str] = field(
default_factory=list,
metadata={"help": "additional output keys to log"},
)
@register_criterion("model", dataclass=ModelCriterionConfig)
class ModelCriterion(FairseqCriterion):
"""
This criterion relies on the model to supply losses.
The losses should be a dictionary of name -> scalar returned by
the model either by including it in the net_output dict or by
implementing a get_losses(net_output, sample) method. The final loss is
a scaled sum of all losses according to weights in loss_weights.
If no weights are provided, then all losses are scaled by 1.0.
The losses will be automatically logged. Additional keys from
net_output dict can be logged via the log_keys parameter.
"""
def __init__(self, task, loss_weights=None, log_keys=None):
super().__init__(task)
self.loss_weights = loss_weights
self.log_keys = log_keys
def forward(self, model, sample, reduce=True):
net_output = model(**sample["net_input"])
sample_size = net_output["sample_size"]
scaled_losses = {}
if hasattr(model, "get_losses"):
losses = model.get_losses(net_output, sample)
elif isinstance(net_output, dict) and "losses" in net_output:
losses = net_output["losses"]
else:
raise Exception("Could not retrieve losses")
for lk, p in losses.items():
try:
coef = 1.0 if len(self.loss_weights) == 0 else self.loss_weights[lk]
except KeyError:
logger.error(
f"weight for loss {lk} is not in loss_weights ({self.loss_weights})"
)
raise
if coef != 0 and p is not None:
scaled_losses[lk] = coef * p.float()
loss = sum(scaled_losses.values())
if reduce and loss.numel() > 1:
loss = loss.sum()
logging_output = {
"loss": loss.data,
"ntokens": sample_size,
"nsentences": sample["id"].numel(),
"sample_size": sample_size,
"_world_size": 1,
}
for lk in self.log_keys:
if lk in net_output and net_output[lk] is not None:
logging_output[lk] = float(net_output[lk])
if len(scaled_losses) > 1:
for lk, l in scaled_losses.items():
logging_output[f"loss_{lk}"] = l.item()
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs))
nsentences = utils.item(
sum(log.get("nsentences", 0) for log in logging_outputs)
)
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
metrics.log_scalar("loss", loss_sum / sample_size, sample_size, round=3)
metrics.log_scalar("ntokens", ntokens)
metrics.log_scalar("nsentences", nsentences)
builtin_keys = {
"loss",
"ntokens",
"nsentences",
"sample_size",
"_world_size",
}
world_size = utils.item(
sum(log.get("_world_size", 0) for log in logging_outputs)
)
for k in logging_outputs[0]:
if k not in builtin_keys:
val = sum(log.get(k, 0) for log in logging_outputs)
if k.startswith("loss_"):
metrics.log_scalar(k, val / sample_size, sample_size, round=3)
else:
metrics.log_scalar(k, val / world_size, round=3)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
|
COCO-LM/fairseq/fairseq/criterions/model_criterion.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/criterions/model_criterion.py",
"repo_id": "COCO-LM",
"token_count": 2119
}
| 178 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import csv
import io
import logging
import os.path as op
import re
from typing import Dict, List, Optional, Tuple
import numpy as np
import torch
from fairseq.data import (
ConcatDataset,
Dictionary,
FairseqDataset,
ResamplingDataset,
data_utils as fairseq_data_utils,
)
from fairseq.data.audio.audio_utils import get_fbank, get_waveform
from fairseq.data.audio.feature_transforms import CompositeAudioFeatureTransform
logger = logging.getLogger(__name__)
class S2TDataConfig(object):
"""Wrapper class for data config YAML"""
def __init__(self, yaml_path):
try:
import yaml
except ImportError:
print("Please install PyYAML to load YAML files for " "S2T data config")
self.config = {}
if op.isfile(yaml_path):
try:
with open(yaml_path) as f:
self.config = yaml.load(f, Loader=yaml.FullLoader)
except Exception as e:
logger.info(f"Failed to load config from {yaml_path}: {e}")
else:
logger.info(f"Cannot find {yaml_path}")
@property
def vocab_filename(self):
"""fairseq vocabulary file under data root"""
return self.config.get("vocab_filename", "dict.txt")
@property
def shuffle(self) -> bool:
"""Shuffle dataset samples before batching"""
return self.config.get("shuffle", False)
@property
def pre_tokenizer(self) -> Dict:
"""Pre-tokenizer to apply before subword tokenization. Returning
a dictionary with `tokenizer` providing the tokenizer name and
the other items providing the tokenizer-specific arguments.
Tokenizers are defined in `fairseq.data.encoders.*`"""
return self.config.get("pre_tokenizer", {"tokenizer": None})
@property
def bpe_tokenizer(self) -> Dict:
"""Subword tokenizer to apply after pre-tokenization. Returning
a dictionary with `bpe` providing the tokenizer name and
the other items providing the tokenizer-specific arguments.
Tokenizers are defined in `fairseq.data.encoders.*`"""
return self.config.get("bpe_tokenizer", {"bpe": None})
@property
def prepend_tgt_lang_tag(self) -> bool:
"""Prepend target lang ID token as the target BOS (e.g. for to-many
multilingual setting). During inference, this requires `--prefix-size 1`
to force BOS to be lang ID token."""
return self.config.get("prepend_tgt_lang_tag", False)
@property
def input_feat_per_channel(self):
"""The dimension of input features (per audio channel)"""
return self.config.get("input_feat_per_channel", 80)
@property
def input_channels(self):
"""The number of channels in the input audio"""
return self.config.get("input_channels", 1)
@property
def sampling_alpha(self):
"""Hyper-parameter alpha = 1/T for temperature-based resampling.
(alpha = 1 for no resampling)"""
return self.config.get("sampling_alpha", 1.0)
@property
def use_audio_input(self):
"""Needed by the dataset loader to see if the model requires
raw audio as inputs."""
return self.config.get("use_audio_input", False)
@property
def audio_root(self):
"""Audio paths in the manifest TSV can be relative and this provides
the root path. Set this to empty string when using absolute paths."""
return self.config.get("audio_root", "")
def get_feature_transforms(self, split, is_train):
"""Split-specific feature transforms. Allowing train set wildcard `_train`,
evaluation set wildcard `_eval` and general wildcard `*` for matching."""
from copy import deepcopy
cfg = deepcopy(self.config)
_cur = cfg.get("transforms", {})
cur = _cur.get(split)
cur = _cur.get("_train") if cur is None and is_train else cur
cur = _cur.get("_eval") if cur is None and not is_train else cur
cur = _cur.get("*") if cur is None else cur
cfg["transforms"] = cur
return cfg
def is_npy_data(data: bytes) -> bool:
return data[0] == 147 and data[1] == 78
def is_flac_or_wav_data(data: bytes) -> bool:
is_flac = data[0] == 102 and data[1] == 76
is_wav = data[0] == 82 and data[1] == 73
return is_flac or is_wav
def read_from_uncompressed_zip(file_path, offset, file_size) -> bytes:
with open(file_path, "rb") as f:
f.seek(offset)
data = f.read(file_size)
return data
def get_features_from_npy_or_audio(path):
ext = op.splitext(op.basename(path))[1]
if ext not in {".npy", ".flac", ".wav"}:
raise ValueError(f'Unsupported file format for "{path}"')
return np.load(path) if ext == ".npy" else get_fbank(path)
def get_features_or_waveform_from_uncompressed_zip(
path, byte_offset, byte_size, need_waveform=False
):
assert path.endswith(".zip")
data = read_from_uncompressed_zip(path, byte_offset, byte_size)
f = io.BytesIO(data)
if is_npy_data(data):
features_or_waveform = np.load(f)
elif is_flac_or_wav_data(data):
features_or_waveform = \
get_waveform(f, always_2d=False)[0] if need_waveform else get_fbank(f)
else:
raise ValueError(f'Unknown file format for "{path}"')
return features_or_waveform
def get_features_or_waveform(path: str, need_waveform=False):
"""Get speech features from .npy file or waveform from .wav/.flac file.
The file may be inside an uncompressed ZIP file and is accessed via byte
offset and length.
Args:
path (str): File path in the format of "<.npy/.wav/.flac path>" or
"<zip path>:<byte offset>:<byte length>".
need_waveform (bool): return waveform instead of features.
Returns:
features_or_waveform (numpy.ndarray): speech features or waveform.
"""
_path, *extra = path.split(":")
if not op.exists(_path):
raise FileNotFoundError(f"File not found: {_path}")
if len(extra) == 0:
if need_waveform:
return get_waveform(_path, always_2d=False)
return get_features_from_npy_or_audio(_path)
elif len(extra) == 2:
extra = [int(i) for i in extra]
features_or_waveform = get_features_or_waveform_from_uncompressed_zip(
_path, extra[0], extra[1], need_waveform=need_waveform
)
else:
raise ValueError(f"Invalid path: {path}")
return features_or_waveform
def _collate_frames(
frames: List[torch.Tensor], is_audio_input: bool = False
) -> torch.Tensor:
"""
Convert a list of 2D frames into a padded 3D tensor
Args:
frames (list): list of 2D frames of size L[i]*f_dim. Where L[i] is
length of i-th frame and f_dim is static dimension of features
Returns:
3D tensor of size len(frames)*len_max*f_dim where len_max is max of L[i]
"""
max_len = max(frame.size(0) for frame in frames)
if is_audio_input:
out = frames[0].new_zeros((len(frames), max_len))
else:
out = frames[0].new_zeros((len(frames), max_len, frames[0].size(1)))
for i, v in enumerate(frames):
out[i, : v.size(0)] = v
return out
class SpeechToTextDataset(FairseqDataset):
LANG_TAG_TEMPLATE = "<lang:{}>"
def __init__(
self,
split: str,
is_train_split: bool,
data_cfg: S2TDataConfig,
audio_paths: List[str],
n_frames: List[int],
src_texts: Optional[List[str]] = None,
tgt_texts: Optional[List[str]] = None,
speakers: Optional[List[str]] = None,
src_langs: Optional[List[str]] = None,
tgt_langs: Optional[List[str]] = None,
ids: Optional[List[str]] = None,
tgt_dict: Optional[Dictionary] = None,
pre_tokenizer=None,
bpe_tokenizer=None,
):
self.split, self.is_train_split = split, is_train_split
self.data_cfg = data_cfg
self.audio_paths, self.n_frames = audio_paths, n_frames
self.n_samples = len(audio_paths)
assert len(n_frames) == self.n_samples > 0
assert src_texts is None or len(src_texts) == self.n_samples
assert tgt_texts is None or len(tgt_texts) == self.n_samples
assert speakers is None or len(speakers) == self.n_samples
assert src_langs is None or len(src_langs) == self.n_samples
assert tgt_langs is None or len(tgt_langs) == self.n_samples
assert ids is None or len(ids) == self.n_samples
assert (tgt_dict is None and tgt_texts is None) or (
tgt_dict is not None and tgt_texts is not None
)
self.src_texts, self.tgt_texts = src_texts, tgt_texts
self.src_langs, self.tgt_langs = src_langs, tgt_langs
self.tgt_dict = tgt_dict
self.check_tgt_lang_tag()
self.ids = ids
self.shuffle = data_cfg.shuffle if is_train_split else False
self.feature_transforms = CompositeAudioFeatureTransform.from_config_dict(
self.data_cfg.get_feature_transforms(split, is_train_split)
)
self.pre_tokenizer = pre_tokenizer
self.bpe_tokenizer = bpe_tokenizer
logger.info(self.__repr__())
def __repr__(self):
return (
self.__class__.__name__
+ f'(split="{self.split}", n_samples={self.n_samples}, '
f"prepend_tgt_lang_tag={self.data_cfg.prepend_tgt_lang_tag}, "
f"shuffle={self.shuffle}, transforms={self.feature_transforms})"
)
@classmethod
def is_lang_tag(cls, token):
pattern = cls.LANG_TAG_TEMPLATE.replace("{}", "(.*)")
return re.match(pattern, token)
def check_tgt_lang_tag(self):
if self.data_cfg.prepend_tgt_lang_tag:
assert self.tgt_langs is not None and self.tgt_dict is not None
tgt_lang_tags = [
self.LANG_TAG_TEMPLATE.format(t) for t in set(self.tgt_langs)
]
assert all(t in self.tgt_dict for t in tgt_lang_tags)
def tokenize_text(self, text: str):
if self.pre_tokenizer is not None:
text = self.pre_tokenizer.encode(text)
if self.bpe_tokenizer is not None:
text = self.bpe_tokenizer.encode(text)
return text
def __getitem__(
self, index: int
) -> Tuple[int, torch.Tensor, Optional[torch.Tensor]]:
source = get_features_or_waveform(
self.audio_paths[index], need_waveform=self.data_cfg.use_audio_input
)
if self.feature_transforms is not None:
assert not self.data_cfg.use_audio_input
source = self.feature_transforms(source)
source = torch.from_numpy(source).float()
target = None
if self.tgt_texts is not None:
tokenized = self.tokenize_text(self.tgt_texts[index])
target = self.tgt_dict.encode_line(
tokenized, add_if_not_exist=False, append_eos=True
).long()
if self.data_cfg.prepend_tgt_lang_tag:
lang_tag = self.LANG_TAG_TEMPLATE.format(self.tgt_langs[index])
lang_tag_idx = self.tgt_dict.index(lang_tag)
target = torch.cat((torch.LongTensor([lang_tag_idx]), target), 0)
return index, source, target
def __len__(self):
return self.n_samples
def collater(self, samples: List[Tuple[int, torch.Tensor, torch.Tensor]]) -> Dict:
if len(samples) == 0:
return {}
indices = torch.tensor([i for i, _, _ in samples], dtype=torch.long)
frames = _collate_frames(
[s for _, s, _ in samples], self.data_cfg.use_audio_input
)
# sort samples by descending number of frames
n_frames = torch.tensor([s.size(0) for _, s, _ in samples], dtype=torch.long)
n_frames, order = n_frames.sort(descending=True)
indices = indices.index_select(0, order)
frames = frames.index_select(0, order)
target, target_lengths = None, None
prev_output_tokens = None
ntokens = None
if self.tgt_texts is not None:
target = fairseq_data_utils.collate_tokens(
[t for _, _, t in samples],
self.tgt_dict.pad(),
self.tgt_dict.eos(),
left_pad=False,
move_eos_to_beginning=False,
)
target = target.index_select(0, order)
target_lengths = torch.tensor(
[t.size(0) for _, _, t in samples], dtype=torch.long
).index_select(0, order)
prev_output_tokens = fairseq_data_utils.collate_tokens(
[t for _, _, t in samples],
self.tgt_dict.pad(),
self.tgt_dict.eos(),
left_pad=False,
move_eos_to_beginning=True,
)
prev_output_tokens = prev_output_tokens.index_select(0, order)
ntokens = sum(t.size(0) for _, _, t in samples)
out = {
"id": indices,
"net_input": {
"src_tokens": frames,
"src_lengths": n_frames,
"prev_output_tokens": prev_output_tokens,
},
"target": target,
"target_lengths": target_lengths,
"ntokens": ntokens,
"nsentences": len(samples),
}
return out
def num_tokens(self, index):
return self.n_frames[index]
def size(self, index):
t_len = 0
if self.tgt_texts is not None:
tokenized = self.tokenize_text(self.tgt_texts[index])
t_len = len(tokenized.split(" "))
return self.n_frames[index], t_len
@property
def sizes(self):
return np.array(self.n_frames)
@property
def can_reuse_epoch_itr_across_epochs(self):
return True
def ordered_indices(self):
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
# first by descending order of # of frames then by original/random order
order.append([-n for n in self.n_frames])
return np.lexsort(order)
def prefetch(self, indices):
raise False
class SpeechToTextDatasetCreator(object):
# mandatory columns
KEY_ID, KEY_AUDIO, KEY_N_FRAMES = "id", "audio", "n_frames"
KEY_TGT_TEXT = "tgt_text"
# optional columns
KEY_SPEAKER, KEY_SRC_TEXT = "speaker", "src_text"
KEY_SRC_LANG, KEY_TGT_LANG = "src_lang", "tgt_lang"
# default values
DEFAULT_SPEAKER = DEFAULT_SRC_TEXT = DEFAULT_LANG = ""
@classmethod
def _from_list(
cls,
split_name: str,
is_train_split,
samples: List[List[Dict]],
data_cfg: S2TDataConfig,
tgt_dict,
pre_tokenizer,
bpe_tokenizer,
) -> SpeechToTextDataset:
audio_paths, n_frames, src_texts, tgt_texts, ids = [], [], [], [], []
speakers, src_langs, tgt_langs = [], [], []
for s in samples:
ids.extend([ss[cls.KEY_ID] for ss in s])
audio_paths.extend(
[op.join(data_cfg.audio_root, ss[cls.KEY_AUDIO]) for ss in s]
)
n_frames.extend([int(ss[cls.KEY_N_FRAMES]) for ss in s])
tgt_texts.extend([ss[cls.KEY_TGT_TEXT] for ss in s])
src_texts.extend(
[ss.get(cls.KEY_SRC_TEXT, cls.DEFAULT_SRC_TEXT) for ss in s]
)
speakers.extend([ss.get(cls.KEY_SPEAKER, cls.DEFAULT_SPEAKER) for ss in s])
src_langs.extend([ss.get(cls.KEY_SRC_LANG, cls.DEFAULT_LANG) for ss in s])
tgt_langs.extend([ss.get(cls.KEY_TGT_LANG, cls.DEFAULT_LANG) for ss in s])
return SpeechToTextDataset(
split_name,
is_train_split,
data_cfg,
audio_paths,
n_frames,
src_texts,
tgt_texts,
speakers,
src_langs,
tgt_langs,
ids,
tgt_dict,
pre_tokenizer,
bpe_tokenizer,
)
@classmethod
def _get_size_ratios(cls, ids: List[str], sizes: List[int], alpha: float = 1.0):
"""Size ratios for temperature-based sampling
(https://arxiv.org/abs/1907.05019)"""
_sizes = np.array(sizes)
prob = _sizes / _sizes.sum()
smoothed_prob = prob ** alpha
smoothed_prob = smoothed_prob / smoothed_prob.sum()
size_ratio = (smoothed_prob * _sizes.sum()) / _sizes
o_str = str({_i: f"{prob[i]:.3f}" for i, _i in enumerate(ids)})
logger.info(f"original sampling probability: {o_str}")
p_str = str({_i: f"{smoothed_prob[i]:.3f}" for i, _i in enumerate(ids)})
logger.info(f"balanced sampling probability: {p_str}")
sr_str = str({_id: f"{size_ratio[i]:.3f}" for i, _id in enumerate(ids)})
logger.info(f"balanced sampling size ratio: {sr_str}")
return size_ratio.tolist()
@classmethod
def from_tsv(
cls,
root: str,
data_cfg: S2TDataConfig,
splits: str,
tgt_dict,
pre_tokenizer,
bpe_tokenizer,
is_train_split: bool,
epoch: int,
seed: int,
) -> SpeechToTextDataset:
samples = []
_splits = splits.split(",")
for split in _splits:
tsv_path = op.join(root, f"{split}.tsv")
if not op.isfile(tsv_path):
raise FileNotFoundError(f"Dataset not found: {tsv_path}")
with open(tsv_path) as f:
reader = csv.DictReader(
f,
delimiter="\t",
quotechar=None,
doublequote=False,
lineterminator="\n",
quoting=csv.QUOTE_NONE,
)
samples.append([dict(e) for e in reader])
assert len(samples) > 0
datasets = [
cls._from_list(
name,
is_train_split,
[s],
data_cfg,
tgt_dict,
pre_tokenizer,
bpe_tokenizer,
)
for name, s in zip(_splits, samples)
]
if is_train_split and len(_splits) > 1 and data_cfg.sampling_alpha != 1.0:
# temperature-based sampling
size_ratios = cls._get_size_ratios(
_splits, [len(s) for s in samples], alpha=data_cfg.sampling_alpha
)
datasets = [
ResamplingDataset(
d, size_ratio=r, seed=seed, epoch=epoch, replace=(r >= 1.0)
)
for d, r in zip(datasets, size_ratios)
]
return ConcatDataset(datasets)
|
COCO-LM/fairseq/fairseq/data/audio/speech_to_text_dataset.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/audio/speech_to_text_dataset.py",
"repo_id": "COCO-LM",
"token_count": 9088
}
| 179 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.data.encoders import register_bpe
SPACE = chr(32)
SPACE_ESCAPE = chr(9601)
@register_bpe("characters")
class Characters(object):
def __init__(self, *unused):
pass
@staticmethod
def add_args(parser):
pass
@staticmethod
def encode(x: str) -> str:
escaped = x.replace(SPACE, SPACE_ESCAPE)
return SPACE.join(list(escaped))
@staticmethod
def decode(x: str) -> str:
return x.replace(SPACE, "").replace(SPACE_ESCAPE, SPACE)
|
COCO-LM/fairseq/fairseq/data/encoders/characters.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/encoders/characters.py",
"repo_id": "COCO-LM",
"token_count": 264
}
| 180 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
import math
import operator
import os
import queue
import time
from threading import Thread
import numpy as np
import torch
from fairseq.data import data_utils
logger = logging.getLogger(__name__)
# Object used by _background_consumer to signal the source is exhausted
# to the main thread.
_sentinel = object()
class CountingIterator(object):
"""Wrapper around an iterable that maintains the iteration count.
Args:
iterable (iterable): iterable to wrap
start (int): starting iteration count. Note that this doesn't
actually advance the iterator.
total (int): override the iterator length returned by
``__len__``. This can be used to truncate *iterator*.
Attributes:
n (int): number of elements consumed from this iterator
"""
def __init__(self, iterable, start=None, total=None):
self.iterable = iterable
self.itr = iter(self)
if start is None:
self.n = getattr(iterable, "n", 0)
else:
self.n = start
if total is None:
self.total = self.n + len(iterable)
else:
self.total = total
def __len__(self):
return self.total
def __iter__(self):
for x in self.iterable:
if self.n >= self.total:
raise RuntimeError(
"Mismatch between actual and expected iterable length. "
"This may be caused by resuming training from a checkpoint using "
"a different number of GPUs, in which case you can try the "
"--reset-dataloader option. Alternatively you may have a train or "
"validation set that is smaller than the number of GPUs. If none "
"of these apply, please report this to the fairseq developers."
)
self.n += 1
yield x
def __next__(self):
return next(self.itr)
def has_next(self):
"""Whether the iterator has been exhausted."""
return self.n < len(self)
def skip(self, num_to_skip):
"""Fast-forward the iterator by skipping *num_to_skip* elements."""
next(itertools.islice(self.itr, num_to_skip, num_to_skip), None)
return self
def take(self, n):
"""
Truncates the iterator to n elements at most.
"""
self.total = min(self.total, n)
# Propagate this change to the underlying iterator
# Only take after what we have already consumed (i.e. after restarting
# from checkpoint mid epoch, we have to subtract self.n which is the
# starting point)
#
# This to maintain the invariant self.total = self.n + len(iterable),
# before calling __next__ or __iter__
propagated_take = max(n - self.n, 0)
if hasattr(self.iterable, "take"):
self.iterable.take(propagated_take)
else:
self.iterable = itertools.islice(self.iterable, propagated_take)
class EpochBatchIterating(object):
def __len__(self) -> int:
raise NotImplementedError
@property
def next_epoch_idx(self):
raise NotImplementedError
def next_epoch_itr(
self, shuffle=True, fix_batches_to_gpus=False, set_dataset_epoch=True
):
"""Return a new iterator over the dataset.
Args:
shuffle (bool, optional): shuffle batches before returning the
iterator (default: True).
fix_batches_to_gpus (bool, optional): ensure that batches are always
allocated to the same shards across epochs. Requires
that :attr:`dataset` supports prefetching (default: False).
set_dataset_epoch (bool, optional): update the wrapped Dataset with
the new epoch number (default: True).
"""
raise NotImplementedError
def end_of_epoch(self) -> bool:
"""Returns whether the most recent epoch iterator has been exhausted"""
raise NotImplementedError
@property
def iterations_in_epoch(self) -> int:
"""The number of consumed batches in the current epoch."""
raise NotImplementedError
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
raise NotImplementedError
def load_state_dict(self, state_dict):
"""Copies the state of the iterator from the given *state_dict*."""
raise NotImplementedError
@property
def first_batch(self):
return "DUMMY"
class StreamingEpochBatchIterator(EpochBatchIterating):
"""A steaming-style iterator over a :class:`torch.utils.data.IterableDataset`.
Args:
dataset (~torch.utils.data.Dataset): dataset from which to load the data
max_sentences: batch size
collate_fn (callable): merges a list of samples to form a mini-batch
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
epoch (int, optional): the epoch to start the iterator from
(default: 1).
buffer_size (int, optional): the number of batches to keep ready in the
queue. Helps speeding up dataloading. When buffer_size is zero, the
default torch.utils.data.DataLoader preloading is used.
timeout (int, optional): if positive, the timeout value for collecting a batch
from workers. Should always be non-negative (default: ``0``).
"""
def __init__(
self,
dataset,
max_sentences=1,
collate_fn=None,
epoch=1,
num_workers=0,
buffer_size=0,
timeout=0,
):
assert isinstance(dataset, torch.utils.data.IterableDataset)
self.dataset = dataset
self.max_sentences = max_sentences
self.collate_fn = collate_fn
self.epoch = max(epoch, 1) # we use 1-based indexing for epochs
self.num_workers = num_workers
# This upper limit here is to prevent people from abusing this feature
# in a shared computing environment.
self.buffer_size = min(buffer_size, 20)
self.timeout = timeout
self._current_epoch_iterator = None
@property
def next_epoch_idx(self):
"""Return the epoch index after *next_epoch_itr* is called."""
if self._current_epoch_iterator is not None and self.end_of_epoch():
return self.epoch + 1
else:
return self.epoch
def next_epoch_itr(
self, shuffle=True, fix_batches_to_gpus=False, set_dataset_epoch=True
):
self.epoch = self.next_epoch_idx
if set_dataset_epoch and hasattr(self.dataset, "set_epoch"):
self.dataset.set_epoch(self.epoch)
self._current_epoch_iterator = self._get_iterator_for_epoch(self.epoch, shuffle)
return self._current_epoch_iterator
def end_of_epoch(self) -> bool:
return not self._current_epoch_iterator.has_next()
@property
def iterations_in_epoch(self) -> int:
if self._current_epoch_iterator is not None:
return self._current_epoch_iterator.n
return 0
def state_dict(self):
return {
"epoch": self.epoch,
}
def load_state_dict(self, state_dict):
self.epoch = state_dict["epoch"]
def _get_iterator_for_epoch(self, epoch, shuffle, offset=0):
if self.num_workers > 0:
os.environ["PYTHONWARNINGS"] = "ignore:semaphore_tracker:UserWarning"
# Create data loader
worker_init_fn = getattr(self.dataset, "worker_init_fn", None)
itr = torch.utils.data.DataLoader(
self.dataset,
batch_size=self.max_sentences,
collate_fn=self.collate_fn,
num_workers=self.num_workers,
timeout=self.timeout,
worker_init_fn=worker_init_fn,
)
# Wrap with a BufferedIterator if needed
if self.buffer_size > 0:
itr = BufferedIterator(self.buffer_size, itr)
# Wrap with CountingIterator
itr = CountingIterator(itr, start=offset)
return itr
class EpochBatchIterator(EpochBatchIterating):
"""A multi-epoch iterator over a :class:`torch.utils.data.Dataset`.
Compared to :class:`torch.utils.data.DataLoader`, this iterator:
- can be reused across multiple epochs with the :func:`next_epoch_itr`
method (optionally shuffled between epochs)
- can be serialized/deserialized with the :func:`state_dict` and
:func:`load_state_dict` methods
- supports sharding with the *num_shards* and *shard_id* arguments
Args:
dataset (~torch.utils.data.Dataset): dataset from which to load the data
collate_fn (callable): merges a list of samples to form a mini-batch
batch_sampler (~torch.utils.data.Sampler or a callable): an iterator over batches of
indices, or a callable to create such an iterator (~torch.utils.data.Sampler).
A callable batch_sampler will be called for each epoch to enable per epoch dynamic
batch iterators defined by this callable batch_sampler.
seed (int, optional): seed for random number generator for
reproducibility (default: 1).
num_shards (int, optional): shard the data iterator into N
shards (default: 1).
shard_id (int, optional): which shard of the data iterator to
return (default: 0).
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
epoch (int, optional): the epoch to start the iterator from
(default: 1).
buffer_size (int, optional): the number of batches to keep ready in the
queue. Helps speeding up dataloading. When buffer_size is zero, the
default torch.utils.data.DataLoader preloading is used.
timeout (int, optional): if positive, the timeout value for collecting a batch
from workers. Should always be non-negative (default: ``0``).
disable_shuffling (bool, optional): force disable shuffling
(default: ``False``).
"""
def __init__(
self,
dataset,
collate_fn,
batch_sampler,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=1,
buffer_size=0,
timeout=0,
disable_shuffling=False,
):
assert isinstance(dataset, torch.utils.data.Dataset)
self.dataset = dataset
self.collate_fn = collate_fn
self.batch_sampler = batch_sampler
self._frozen_batches = (
tuple(batch_sampler) if not callable(batch_sampler) else None
)
self.seed = seed
self.num_shards = num_shards
self.shard_id = shard_id
self.num_workers = num_workers
# This upper limit here is to prevent people from abusing this feature
# in a shared computing environment.
self.buffer_size = min(buffer_size, 20)
self.timeout = timeout
self.disable_shuffling = disable_shuffling
self.epoch = max(epoch, 1) # we use 1-based indexing for epochs
self.shuffle = not disable_shuffling
self._cur_epoch_itr = None
self._next_epoch_itr = None
self._supports_prefetch = getattr(dataset, "supports_prefetch", False)
@property
def frozen_batches(self):
if self._frozen_batches is None:
self._frozen_batches = tuple(self.batch_sampler(self.dataset, self.epoch))
return self._frozen_batches
@property
def first_batch(self):
if len(self.frozen_batches) == 0:
raise Exception(
"The dataset is empty. This could indicate "
"that all elements in the dataset have been skipped. "
"Try increasing the max number of allowed tokens or using "
"a larger dataset."
)
if getattr(self.dataset, "supports_fetch_outside_dataloader", True):
return self.collate_fn([self.dataset[i] for i in self.frozen_batches[0]])
else:
return "DUMMY"
def __len__(self):
return int(math.ceil(len(self.frozen_batches) / float(self.num_shards)))
@property
def n(self):
return self.iterations_in_epoch
@property
def next_epoch_idx(self):
"""Return the epoch index after *next_epoch_itr* is called."""
if self._next_epoch_itr is not None:
return self.epoch
elif self._cur_epoch_itr is not None and self.end_of_epoch():
return self.epoch + 1
else:
return self.epoch
def next_epoch_itr(
self, shuffle=True, fix_batches_to_gpus=False, set_dataset_epoch=True
):
"""Return a new iterator over the dataset.
Args:
shuffle (bool, optional): shuffle batches before returning the
iterator (default: True).
fix_batches_to_gpus (bool, optional): ensure that batches are always
allocated to the same shards across epochs. Requires
that :attr:`dataset` supports prefetching (default: False).
set_dataset_epoch (bool, optional): update the wrapped Dataset with
the new epoch number (default: True).
"""
if self.disable_shuffling:
shuffle = False
self.epoch = self.next_epoch_idx
if set_dataset_epoch and hasattr(self.dataset, "set_epoch"):
self.dataset.set_epoch(self.epoch)
if self._next_epoch_itr is not None:
self._cur_epoch_itr = self._next_epoch_itr
self._next_epoch_itr = None
else:
if callable(self.batch_sampler):
# reset _frozen_batches to refresh the next epoch
self._frozen_batches = None
self._cur_epoch_itr = self._get_iterator_for_epoch(
self.epoch,
shuffle,
fix_batches_to_gpus=fix_batches_to_gpus,
)
self.shuffle = shuffle
return self._cur_epoch_itr
def end_of_epoch(self) -> bool:
"""Returns whether the most recent epoch iterator has been exhausted"""
return not self._cur_epoch_itr.has_next()
@property
def iterations_in_epoch(self):
"""The number of consumed batches in the current epoch."""
if self._cur_epoch_itr is not None:
return self._cur_epoch_itr.n
elif self._next_epoch_itr is not None:
return self._next_epoch_itr.n
return 0
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
if self.end_of_epoch():
epoch = self.epoch + 1
iter_in_epoch = 0
else:
epoch = self.epoch
iter_in_epoch = self.iterations_in_epoch
return {
"version": 2,
"epoch": epoch,
"iterations_in_epoch": iter_in_epoch,
"shuffle": self.shuffle,
}
def load_state_dict(self, state_dict):
"""Copies the state of the iterator from the given *state_dict*."""
self.epoch = state_dict["epoch"]
itr_pos = state_dict.get("iterations_in_epoch", 0)
version = state_dict.get("version", 1)
if itr_pos > 0:
# fast-forward epoch iterator
self._next_epoch_itr = self._get_iterator_for_epoch(
self.epoch,
shuffle=state_dict.get("shuffle", True),
offset=itr_pos,
)
if self._next_epoch_itr is None:
if version == 1:
# legacy behavior: we finished the epoch, increment epoch counter
self.epoch += 1
else:
raise RuntimeError(
"Cannot resume training due to dataloader mismatch, please "
"report this to the fairseq developers. You can relaunch "
"training with `--reset-dataloader` and it should work."
)
else:
self._next_epoch_itr = None
def _get_iterator_for_epoch(
self, epoch, shuffle, fix_batches_to_gpus=False, offset=0
):
def shuffle_batches(batches, seed):
with data_utils.numpy_seed(seed):
np.random.shuffle(batches)
return batches
if self._supports_prefetch:
batches = self.frozen_batches
if shuffle and not fix_batches_to_gpus:
batches = shuffle_batches(list(batches), self.seed + epoch)
batches = list(
ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[])
)
self.dataset.prefetch([i for s in batches for i in s])
if shuffle and fix_batches_to_gpus:
batches = shuffle_batches(batches, self.seed + epoch + self.shard_id)
else:
if shuffle:
batches = shuffle_batches(list(self.frozen_batches), self.seed + epoch)
else:
batches = self.frozen_batches
batches = list(
ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[])
)
if offset > 0 and offset >= len(batches):
return None
if self.num_workers > 0:
os.environ["PYTHONWARNINGS"] = "ignore:semaphore_tracker:UserWarning"
# Create data loader
itr = torch.utils.data.DataLoader(
self.dataset,
collate_fn=self.collate_fn,
batch_sampler=batches[offset:],
num_workers=self.num_workers,
timeout=self.timeout,
)
# Wrap with a BufferedIterator if needed
if self.buffer_size > 0:
itr = BufferedIterator(self.buffer_size, itr)
# Wrap with CountingIterator
itr = CountingIterator(itr, start=offset)
return itr
class GroupedIterator(CountingIterator):
"""Wrapper around an iterable that returns groups (chunks) of items.
Args:
iterable (iterable): iterable to wrap
chunk_size (int): size of each chunk
Attributes:
n (int): number of elements consumed from this iterator
"""
def __init__(self, iterable, chunk_size):
itr = _chunk_iterator(iterable, chunk_size)
super().__init__(
itr,
start=int(math.ceil(getattr(iterable, "n", 0) / float(chunk_size))),
total=int(math.ceil(len(iterable) / float(chunk_size))),
)
self.chunk_size = chunk_size
def _chunk_iterator(itr, chunk_size):
chunk = []
for x in itr:
chunk.append(x)
if len(chunk) == chunk_size:
yield chunk
chunk = []
if len(chunk) > 0:
yield chunk
class ShardedIterator(CountingIterator):
"""A sharded wrapper around an iterable, padded to length.
Args:
iterable (iterable): iterable to wrap
num_shards (int): number of shards to split the iterable into
shard_id (int): which shard to iterator over
fill_value (Any, optional): padding value when the iterable doesn't
evenly divide *num_shards* (default: None).
Attributes:
n (int): number of elements consumed from this iterator
"""
def __init__(self, iterable, num_shards, shard_id, fill_value=None):
if shard_id < 0 or shard_id >= num_shards:
raise ValueError("shard_id must be between 0 and num_shards")
sharded_len = int(math.ceil(len(iterable) / float(num_shards)))
itr = map(
operator.itemgetter(1),
itertools.zip_longest(
range(sharded_len),
itertools.islice(iterable, shard_id, len(iterable), num_shards),
fillvalue=fill_value,
),
)
super().__init__(
itr,
start=int(math.ceil(getattr(iterable, "n", 0) / float(num_shards))),
total=sharded_len,
)
class BackgroundConsumer(Thread):
def __init__(self, queue, source, max_len):
Thread.__init__(self)
self._queue = queue
self._source = source
self._max_len = max_len
self.count = 0
def run(self):
try:
for item in self._source:
self._queue.put(item)
# Stop if we reached the maximum length
self.count += 1
if self._max_len is not None and self.count >= self._max_len:
break
# Signal the consumer we are done.
self._queue.put(_sentinel)
except Exception as e:
self._queue.put(e)
class BufferedIterator(object):
def __init__(self, size, iterable):
self._queue = queue.Queue(size)
self._iterable = iterable
self._consumer = None
self.start_time = time.time()
self.warning_time = None
self.total = len(iterable)
def _create_consumer(self):
self._consumer = BackgroundConsumer(
self._queue,
self._iterable,
self.total,
)
self._consumer.daemon = True
self._consumer.start()
def __iter__(self):
return self
def __len__(self):
return self.total
def take(self, n):
self.total = min(self.total, n)
# Propagate this change to the underlying iterator
if hasattr(self._iterable, "take"):
self._iterable.take(n)
def __next__(self):
# Create consumer if not created yet
if self._consumer is None:
self._create_consumer()
# Notify the user if there is a data loading bottleneck
if self._queue.qsize() < min(2, max(1, self._queue.maxsize // 2)):
if time.time() - self.start_time > 5 * 60:
if (
self.warning_time is None
or time.time() - self.warning_time > 15 * 60
):
logger.debug(
"Data loading buffer is empty or nearly empty. This may "
"indicate a data loading bottleneck, and increasing the "
"number of workers (--num-workers) may help."
)
self.warning_time = time.time()
# Get next example
item = self._queue.get(True)
if isinstance(item, Exception):
raise item
if item is _sentinel:
raise StopIteration()
return item
|
COCO-LM/fairseq/fairseq/data/iterators.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/iterators.py",
"repo_id": "COCO-LM",
"token_count": 10366
}
| 181 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import hashlib
import logging
import time
from bisect import bisect_right
from collections import OrderedDict, defaultdict
from enum import Enum
from typing import List
import numpy as np
import torch
from fairseq.data import FairseqDataset, data_utils
from fairseq.distributed import utils as distributed_utils
def get_time_gap(s, e):
return (
datetime.datetime.fromtimestamp(e) - datetime.datetime.fromtimestamp(s)
).__str__()
logger = logging.getLogger(__name__)
def default_virtual_size_func(datasets, ratios, max_scale_up=1.5):
sizes = [len(d) for d in datasets]
if ratios is None:
return sum(sizes)
largest_idx = np.argmax(sizes)
largest_r = ratios[largest_idx]
largest_s = sizes[largest_idx]
# set virtual sizes relative to the largest dataset
virtual_sizes = [(r / largest_r) * largest_s for r in ratios]
vsize = sum(virtual_sizes)
max_size = sum(sizes) * max_scale_up
return int(vsize if vsize < max_size else max_size)
class CollateFormat(Enum):
single = 1
ordered_dict = 2
class SampledMultiDataset(FairseqDataset):
"""Samples from multiple sub-datasets according to given sampling ratios.
Args:
datasets (
List[~torch.utils.data.Dataset]
or OrderedDict[str, ~torch.utils.data.Dataset]
): datasets
sampling_ratios (List[float]): list of probability of each dataset to be sampled
(default: None, which corresponds to concatenating all dataset together).
seed (int): RNG seed to use (default: 2).
epoch (int): starting epoch number (default: 1).
eval_key (str, optional): a key used at evaluation time that causes
this instance to pass-through batches from *datasets[eval_key]*.
collate_format (CollateFormat): collater output format, either CollateFormat.ordered_dict or
CollateFormat.single (default: CollateFormat.single) where CollateFormat.single configures
the collater to output batches of data mixed from all sub-datasets,
and CollateFormat.ordered_dict configures the collater to output a dictionary of batches indexed by keys
of sub-datasets.
Note that not all sub-datasets will present in a single batch in both formats.
virtual_size (int, or callable): the expected virtual size of the dataset (default: default_virtual_size_func).
split (str): the split of the data, e.g. 'train', 'valid' or 'test'.
shared_collater (bool): whether or not to all sub-datasets have the same collater.
shuffle (bool): whether or not to shuffle data (default: True).
"""
def __init__(
self,
datasets,
sampling_ratios=None,
seed=2,
epoch=1,
eval_key=None,
collate_format=CollateFormat.single,
virtual_size=default_virtual_size_func,
split="",
shared_collater=False,
shuffle=True,
):
super().__init__()
self.shared_collater = shared_collater
self.shuffle = shuffle
if isinstance(datasets, OrderedDict):
self.keys = list(datasets.keys())
datasets = list(datasets.values())
elif isinstance(datasets, List):
self.keys = list(range(len(datasets)))
else:
raise AssertionError()
self.datasets = datasets
self.split = split
self.eval_key = eval_key
if self.eval_key is not None:
self.collate_format = CollateFormat.single
else:
self.collate_format = collate_format
self.seed = seed
self._cur_epoch = None
self.cumulated_sizes = None
# self.datasets[k][self._cur_indices[i]] is the data item i in this sampled dataset
# namely, data item i is sampled from the kth sub-dataset self.datasets[k]
# where self.cumulated_sizes[k-1] <= i < self.cumulated_sizes[k]
self._cur_indices = None
self._sizes = None
self.virtual_size_per_dataset = None
# caching properties
self._reset_cached_properties()
self.setup_sampling(sampling_ratios, virtual_size)
self.set_epoch(epoch)
def _clean_if_not_none(self, var_list):
for v in var_list:
if v is not None:
del v
def _reset_cached_properties(self):
self._clean_if_not_none([self._sizes, self._cur_indices])
self._sizes = None
self._cur_indices = None
def setup_sampling(self, sample_ratios, virtual_size):
sizes = [len(d) for d in self.datasets]
if sample_ratios is None:
# default back to concating datasets
self.sample_ratios = None
self.virtual_size = sum(sizes)
else:
if not isinstance(sample_ratios, np.ndarray):
sample_ratios = np.array(sample_ratios)
self.sample_ratios = sample_ratios
virtual_size = (
default_virtual_size_func if virtual_size is None else virtual_size
)
self.virtual_size = (
virtual_size(self.datasets, self.sample_ratios)
if callable(virtual_size)
else virtual_size
)
def adjust_sampling(self, epoch, sampling_ratios, virtual_size):
if sampling_ratios is not None:
sampling_ratios = self._sync_sample_ratios(sampling_ratios)
self.setup_sampling(sampling_ratios, virtual_size)
def _sync_sample_ratios(self, ratios):
# in case the ratios are not precisely the same across processes
# also to ensure every procresses update the ratios in the same pace
ratios = torch.DoubleTensor(ratios)
if torch.distributed.is_initialized():
if torch.cuda.is_available():
distributed_utils.all_reduce(
ratios.cuda(), group=distributed_utils.get_data_parallel_group()
)
else:
distributed_utils.all_reduce(
ratios, group=distributed_utils.get_data_parallel_group()
)
ret = ratios.cpu()
ret = ret.numpy()
return ret
def random_choice_in_dataset(self, rng, dataset, choice_size):
if hasattr(dataset, "random_choice_in_dataset"):
return dataset.random_choice_in_dataset(rng, choice_size)
dataset_size = len(dataset)
return rng.choice(
dataset_size, choice_size, replace=(choice_size > dataset_size)
)
def get_virtual_indices(self, rng, datasets, sample_ratios, virtual_size):
def get_counts(sample_ratios):
counts = np.array([virtual_size * r for r in sample_ratios], dtype=np.int64)
diff = virtual_size - counts.sum()
assert diff >= 0
# due to round-offs, the size might not match the desired sizes
if diff > 0:
dataset_indices = rng.choice(
len(sample_ratios), size=diff, p=sample_ratios
)
for i in dataset_indices:
counts[i] += 1
return counts
def get_in_dataset_indices(datasets, sizes, sample_ratios):
counts = get_counts(sample_ratios)
# uniformally sample desired counts for each dataset
# if the desired counts are large, sample with replacement:
indices = [
self.random_choice_in_dataset(rng, d, c)
for c, d in zip(counts, datasets)
]
return indices
sizes = [len(d) for d in datasets]
if sample_ratios is None:
# default back to concating datasets
in_dataset_indices = [list(range(s)) for s in sizes]
virtual_sizes_per_dataset = sizes
else:
ratios = sample_ratios / sample_ratios.sum()
in_dataset_indices = get_in_dataset_indices(datasets, sizes, ratios)
virtual_sizes_per_dataset = [len(d) for d in in_dataset_indices]
virtual_sizes_per_dataset = np.array(virtual_sizes_per_dataset, np.int64)
cumulative_sizes = np.cumsum(virtual_sizes_per_dataset)
assert sum(virtual_sizes_per_dataset) == virtual_size
assert cumulative_sizes[-1] == virtual_size
if virtual_size < sum(sizes):
logger.warning(
f"virtual data size ({virtual_size}) is less than real data size ({sum(sizes)})."
" If virtual size << real data size, there could be data coverage issue."
)
in_dataset_indices = np.hstack(in_dataset_indices)
return in_dataset_indices, cumulative_sizes, virtual_sizes_per_dataset
def _get_dataset_and_index(self, index):
i = bisect_right(self.cumulated_sizes, index)
return i, self._cur_indices[index]
def __getitem__(self, index):
# self.__getitem__(index) returns self.datasets[k][self._cur_indices[index]]
# where k satisfies self.cumulated_sizes[k - 1] <= k < self.cumulated_sizes[k]
ds_idx, ds_sample_idx = self._get_dataset_and_index(index)
ret = (ds_idx, self.datasets[ds_idx][ds_sample_idx])
return ret
def num_tokens(self, index):
return self.sizes[index].max()
def num_tokens_vec(self, indices):
sizes_vec = self.sizes[np.array(indices)]
# max across all dimensions but first one
return np.amax(sizes_vec, axis=tuple(range(1, len(sizes_vec.shape))))
def size(self, index):
return self.sizes[index]
def __len__(self):
return self.virtual_size
def collater(self, samples, **extra_args):
"""Merge a list of samples to form a mini-batch."""
if len(samples) == 0:
return None
if self.collate_format == "ordered_dict":
collect_samples = [[] for _ in range(len(self.datasets))]
for (i, sample) in samples:
collect_samples[i].append(sample)
batch = OrderedDict(
[
(self.keys[i], dataset.collater(collect_samples[i]))
for i, (key, dataset) in enumerate(zip(self.keys, self.datasets))
if len(collect_samples[i]) > 0
]
)
elif self.shared_collater:
batch = self.datasets[0].collater([s for _, s in samples])
else:
samples_dict = defaultdict(list)
pad_to_length = (
defaultdict(int)
if "pad_to_length" not in extra_args
else extra_args["pad_to_length"]
)
for ds_idx, s in samples:
pad_to_length["source"] = max(
pad_to_length["source"], s["source"].size(0)
)
if s["target"] is not None:
pad_to_length["target"] = max(
pad_to_length["target"], s["target"].size(0)
)
samples_dict[ds_idx].append(s)
batches = [
self.datasets[i].collater(samples_dict[i], pad_to_length=pad_to_length)
for i in range(len(self.datasets))
if len(samples_dict[i]) > 0
]
def straight_data(tensors):
batch = torch.cat(tensors, dim=0)
return batch
src_lengths = straight_data(
[b["net_input"]["src_lengths"] for b in batches]
)
src_lengths, sort_order = src_lengths.sort(descending=True)
def straight_order(tensors):
batch = straight_data(tensors)
return batch.index_select(0, sort_order)
batch = {
"id": straight_order([b["id"] for b in batches]),
"nsentences": sum(b["nsentences"] for b in batches),
"ntokens": sum(b["ntokens"] for b in batches),
"net_input": {
"src_tokens": straight_order(
[b["net_input"]["src_tokens"] for b in batches]
),
"src_lengths": src_lengths,
},
"target": straight_order([b["target"] for b in batches])
if batches[0]["target"] is not None
else None,
}
if "prev_output_tokens" in batches[0]["net_input"]:
batch["net_input"]["prev_output_tokens"] = straight_order(
[b["net_input"]["prev_output_tokens"] for b in batches]
)
if "src_lang_id" in batches[0]["net_input"]:
batch["net_input"]["src_lang_id"] = straight_order(
[b["net_input"]["src_lang_id"] for b in batches]
)
if "tgt_lang_id" in batches[0]:
batch["tgt_lang_id"] = straight_order(
[b["tgt_lang_id"] for b in batches]
)
return batch
@property
def sizes(self):
if self._sizes is not None:
return self._sizes
start_time = time.time()
in_sub_dataset_indices = [
self._cur_indices[
0 if i == 0 else self.cumulated_sizes[i - 1] : self.cumulated_sizes[i]
]
for i in range(len(self.datasets))
]
sub_dataset_sizes = [
d.sizes[indices]
for d, indices in zip(self.datasets, in_sub_dataset_indices)
]
self._sizes = np.vstack(sub_dataset_sizes)
logger.info(f"sizes() calling time: {get_time_gap(start_time, time.time())}")
return self._sizes
def ordered_indices(self):
if self.shuffle:
indices = np.random.permutation(len(self))
else:
indices = np.arange(len(self))
sizes = self.sizes
tgt_sizes = sizes[:, 1] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else None
src_sizes = (
sizes[:, 0] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else sizes
)
# sort by target length, then source length
if tgt_sizes is not None:
indices = indices[np.argsort(tgt_sizes[indices], kind="mergesort")]
sort_indices = indices[np.argsort(src_sizes[indices], kind="mergesort")]
return sort_indices
def prefetch(self, indices):
prefetch_indices = [[] for _ in range(len(self.datasets))]
for i in indices:
ds_idx, ds_sample_idx = self._get_dataset_and_index(i)
prefetch_indices[ds_idx].append(ds_sample_idx)
for i in range(len(prefetch_indices)):
self.datasets[i].prefetch(prefetch_indices[i])
@property
def can_reuse_epoch_itr_across_epochs(self):
return False
def set_epoch(self, epoch):
super().set_epoch(epoch)
if epoch == self._cur_epoch:
# re-enter so return
return
for d in self.datasets:
if hasattr(d, "set_epoch"):
d.set_epoch(epoch)
self._cur_epoch = epoch
self._establish_virtual_datasets()
def _establish_virtual_datasets(self):
if self.sample_ratios is None and self._cur_indices is not None:
# not a samping dataset, no need to resample if indices are already established
return
self._reset_cached_properties()
start_time = time.time()
# Generate a weighted sample of indices as a function of the
# random seed and the current epoch.
rng = np.random.RandomState(
[
int(
hashlib.sha1(
str(self.__class__.__name__).encode("utf-8")
).hexdigest(),
16,
)
% (2 ** 32),
self.seed % (2 ** 32), # global seed
self._cur_epoch, # epoch index,
]
)
self._clean_if_not_none(
[self.cumulated_sizes, self.virtual_size_per_dataset, self._sizes]
)
self._sizes = None
indices, cumulated_sizes, virtual_size_per_dataset = self.get_virtual_indices(
rng, self.datasets, self.sample_ratios, self.virtual_size
)
self._cur_indices = indices
self.cumulated_sizes = cumulated_sizes
self.virtual_size_per_dataset = virtual_size_per_dataset
raw_sizes = [len(d) for d in self.datasets]
sampled_sizes = self.virtual_size_per_dataset
logger.info(
f"[{self.split}] Raw sizes: {str(dict(zip(self.keys, raw_sizes)))}; "
f"raw total size: {sum(raw_sizes)}"
)
logger.info(
f"[{self.split}] Resampled sizes: {str(dict(zip(self.keys, sampled_sizes)))}; "
f"resampled total size: {sum(sampled_sizes)}"
)
if self.sample_ratios is not None:
logger.info(
f"[{self.split}] Upsampling ratios: {str(dict(zip(self.keys, self.sample_ratios)))}"
)
else:
logger.info(f"[{self.split}] A concat dataset")
logger.info(
f"[{self.split}] virtual dataset established time: {get_time_gap(start_time, time.time())}"
)
def filter_indices_by_size(self, indices, max_sizes):
"""Filter a list of sample indices. Remove those that are longer
than specified in max_sizes.
Args:
indices (np.array): original array of sample indices
max_sizes (int or list[int] or tuple[int]): max sample size,
can be defined separately for src and tgt (then list or tuple)
Returns:
np.array: filtered sample array
list: list of removed indices
"""
sizes = self.sizes
tgt_sizes = sizes[:, 1] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else None
src_sizes = (
sizes[:, 0] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else sizes
)
return data_utils.filter_paired_dataset_indices_by_size(
src_sizes, tgt_sizes, indices, max_sizes
)
|
COCO-LM/fairseq/fairseq/data/multilingual/sampled_multi_dataset.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/multilingual/sampled_multi_dataset.py",
"repo_id": "COCO-LM",
"token_count": 8831
}
| 182 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from collections import OrderedDict
from typing import Dict, Sequence
import numpy as np
from . import FairseqDataset, LanguagePairDataset
logger = logging.getLogger(__name__)
class RoundRobinZipDatasets(FairseqDataset):
"""Zip multiple :class:`~fairseq.data.FairseqDataset` instances together.
Shorter datasets are repeated in a round-robin fashion to match the length
of the longest one.
Args:
datasets (Dict[~fairseq.data.FairseqDataset]): a dictionary of
:class:`~fairseq.data.FairseqDataset` instances.
eval_key (str, optional): a key used at evaluation time that causes
this instance to pass-through batches from *datasets[eval_key]*.
"""
def __init__(self, datasets, eval_key=None):
super().__init__()
if isinstance(datasets, dict):
datasets = OrderedDict(datasets)
assert isinstance(datasets, OrderedDict)
assert datasets, "Can't make a RoundRobinZipDatasets out of nothing"
for dataset in datasets.values():
assert isinstance(dataset, FairseqDataset)
self.datasets = datasets
self.eval_key = eval_key
self.longest_dataset_key = max(datasets, key=lambda k: len(datasets[k]))
self.longest_dataset = datasets[self.longest_dataset_key]
self._ordered_indices: Dict[str, Sequence[int]] = None
def _map_index(self, key, index):
assert (
self._ordered_indices is not None
), "Must call RoundRobinZipDatasets.ordered_indices() first"
o = self._ordered_indices[key]
return o[index % len(o)]
def __getitem__(self, index):
if self.eval_key is None:
return OrderedDict(
[
(key, dataset[self._map_index(key, index)])
for key, dataset in self.datasets.items()
]
)
else:
# at evaluation time it's useful to pass-through batches from a single key
return self.datasets[self.eval_key][self._map_index(self.eval_key, index)]
def __len__(self):
if self._ordered_indices is not None:
return len(self._ordered_indices[self.longest_dataset_key])
return len(self.longest_dataset)
def collater(self, samples):
"""Merge a list of samples to form a mini-batch."""
if len(samples) == 0:
return None
if self.eval_key is None:
return OrderedDict(
[
(key, dataset.collater([sample[key] for sample in samples]))
for key, dataset in self.datasets.items()
]
)
else:
# at evaluation time it's useful to pass-through batches from a single key
return self.datasets[self.eval_key].collater(samples)
def num_tokens(self, index):
"""Return an example's length (number of tokens), used for batching."""
# TODO make it configurable whether to use max() or sum() here
return max(
dataset.num_tokens(self._map_index(key, index))
for key, dataset in self.datasets.items()
)
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
return {
key: dataset.size(self._map_index(key, index))
for key, dataset in self.datasets.items()
}
def ordered_indices(self):
"""Ordered indices for batching."""
if self._ordered_indices is None:
# Call the underlying dataset's ordered_indices() here, so that we
# get the same random ordering as we would have from using the
# underlying sub-datasets directly.
self._ordered_indices = OrderedDict(
[
(key, dataset.ordered_indices())
for key, dataset in self.datasets.items()
]
)
return np.arange(len(self))
def filter_indices_by_size(self, indices, max_positions=None):
"""
Filter each sub-dataset independently, then update the round robin to work
on the filtered sub-datasets.
"""
def _deep_until_language_pair(dataset):
if isinstance(dataset, LanguagePairDataset):
return dataset
if hasattr(dataset, "tgt_dataset"):
return _deep_until_language_pair(dataset.tgt_dataset)
if hasattr(dataset, "dataset"):
return _deep_until_language_pair(dataset.dataset)
raise Exception(f"Don't know how to unwrap this dataset: {dataset}")
if not isinstance(max_positions, dict):
max_positions = {k: max_positions for k in self.datasets.keys()}
ignored_some = False
for key, dataset in self.datasets.items():
dataset = _deep_until_language_pair(dataset)
self._ordered_indices[key], ignored = dataset.filter_indices_by_size(
self._ordered_indices[key], max_positions[key]
)
if len(ignored) > 0:
ignored_some = True
logger.warning(
f"{len(ignored)} samples from {key} have invalid sizes and will be skipped, "
f"max_positions={max_positions[key]}, first few sample ids={ignored[:10]}"
)
# Since we are modifiying in place the _ordered_indices,
# it's not possible anymore to return valid ignored indices.
# Hopefully the extra debug information print above should be enough to debug.
# Ideally we would receive ignore_invalid_inputs so that we could have
# a proper error message.
return (np.arange(len(self)), [0] if ignored_some else [])
@property
def supports_prefetch(self):
return all(
getattr(dataset, "supports_prefetch", False)
for dataset in self.datasets.values()
)
def prefetch(self, indices):
for key, dataset in self.datasets.items():
dataset.prefetch([self._map_index(key, index) for index in indices])
|
COCO-LM/fairseq/fairseq/data/round_robin_zip_datasets.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/round_robin_zip_datasets.py",
"repo_id": "COCO-LM",
"token_count": 2818
}
| 183 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
from dataclasses import _MISSING_TYPE, dataclass, field
from typing import Any, List, Optional
import torch
from fairseq.dataclass.constants import (
DATASET_IMPL_CHOICES,
DDP_BACKEND_CHOICES,
GENERATION_CONSTRAINTS_CHOICES,
GENERATION_DECODING_FORMAT_CHOICES,
LOG_FORMAT_CHOICES,
PIPELINE_CHECKPOINT_CHOICES,
PRINT_ALIGNMENT_CHOICES,
ZERO_SHARDING_CHOICES,
)
from omegaconf import II, MISSING
@dataclass
class FairseqDataclass:
"""fairseq base dataclass that supported fetching attributes and metas"""
_name: Optional[str] = None
@staticmethod
def name():
return None
def _get_all_attributes(self) -> List[str]:
return [k for k in self.__dataclass_fields__.keys()]
def _get_meta(
self, attribute_name: str, meta: str, default: Optional[Any] = None
) -> Any:
return self.__dataclass_fields__[attribute_name].metadata.get(meta, default)
def _get_name(self, attribute_name: str) -> str:
return self.__dataclass_fields__[attribute_name].name
def _get_default(self, attribute_name: str) -> Any:
if hasattr(self, attribute_name):
if str(getattr(self, attribute_name)).startswith("${"):
return str(getattr(self, attribute_name))
elif str(self.__dataclass_fields__[attribute_name].default).startswith(
"${"
):
return str(self.__dataclass_fields__[attribute_name].default)
elif (
getattr(self, attribute_name)
!= self.__dataclass_fields__[attribute_name].default
):
return getattr(self, attribute_name)
f = self.__dataclass_fields__[attribute_name]
if not isinstance(f.default_factory, _MISSING_TYPE):
return f.default_factory()
return f.default
def _get_type(self, attribute_name: str) -> Any:
return self.__dataclass_fields__[attribute_name].type
def _get_help(self, attribute_name: str) -> Any:
return self._get_meta(attribute_name, "help")
def _get_argparse_const(self, attribute_name: str) -> Any:
return self._get_meta(attribute_name, "argparse_const")
def _get_argparse_alias(self, attribute_name: str) -> Any:
return self._get_meta(attribute_name, "argparse_alias")
def _get_choices(self, attribute_name: str) -> Any:
return self._get_meta(attribute_name, "choices")
@dataclass
class CommonConfig(FairseqDataclass):
# This is the core dataclass including common parameters shared by all different jobs. Please append your params to other dataclasses if they were
# used for a particular purpose or task, such as those dedicated for `distributed training`, `optimization`, etc.
no_progress_bar: bool = field(
default=False, metadata={"help": "disable progress bar"}
)
log_interval: int = field(
default=100,
metadata={
"help": "log progress every N batches (when progress bar is disabled)"
},
)
log_format: Optional[LOG_FORMAT_CHOICES] = field(
default=None, metadata={"help": "log format to use"}
)
tensorboard_logdir: Optional[str] = field(
default=None,
metadata={
"help": "path to save logs for tensorboard, should match --logdir "
"of running tensorboard (default: no tensorboard logging)"
},
)
wandb_project: Optional[str] = field(
default=None,
metadata={"help": "Weights and Biases project name to use for logging"},
)
azureml_logging: Optional[bool] = field(
default=False, metadata={"help": "Log scalars to AzureML context"},
)
seed: int = field(
default=1, metadata={"help": "pseudo random number generator seed"}
)
cpu: bool = field(default=False, metadata={"help": "use CPU instead of CUDA"})
tpu: bool = field(default=False, metadata={"help": "use TPU instead of CUDA"})
bf16: bool = field(default=False, metadata={"help": "use bfloat16; implies --tpu"})
memory_efficient_bf16: bool = field(
default=False,
metadata={
"help": "use a memory-efficient version of BF16 training; implies --bf16"
},
)
fp16: bool = field(default=False, metadata={"help": "use FP16"})
memory_efficient_fp16: bool = field(
default=False,
metadata={
"help": "use a memory-efficient version of FP16 training; implies --fp16"
},
)
fp16_no_flatten_grads: bool = field(
default=False, metadata={"help": "don't flatten FP16 grads tensor"}
)
fp16_init_scale: int = field(
default=2 ** 7, metadata={"help": "default FP16 loss scale"}
)
fp16_scale_window: Optional[int] = field(
default=None,
metadata={"help": "number of updates before increasing loss scale"},
)
fp16_scale_tolerance: float = field(
default=0.0,
metadata={
"help": "pct of updates that can overflow before decreasing the loss scale"
},
)
min_loss_scale: float = field(
default=1e-4,
metadata={"help": "minimum FP16 loss scale, after which training is stopped"},
)
threshold_loss_scale: Optional[float] = field(
default=None, metadata={"help": "threshold FP16 loss scale from below"}
)
user_dir: Optional[str] = field(
default=None,
metadata={
"help": "path to a python module containing custom extensions (tasks and/or architectures)"
},
)
empty_cache_freq: int = field(
default=0,
metadata={"help": "how often to clear the PyTorch CUDA cache (0 to disable)"},
)
all_gather_list_size: int = field(
default=16384,
metadata={"help": "number of bytes reserved for gathering stats from workers"},
)
model_parallel_size: int = field(
default=1, metadata={"help": "total number of GPUs to parallelize model over"}
)
quantization_config_path: Optional[str] = field(
default=None, metadata={"help": "path to quantization config file"}
)
profile: bool = field(
default=False, metadata={"help": "enable autograd profiler emit_nvtx"}
)
reset_logging: bool = field(
default=False,
metadata={
"help": "when using Hydra, reset the logging at the beginning of training"
},
)
suppress_crashes: bool = field(
default=False,
metadata={
"help": "suppress crashes when training with the hydra_train entry point so that the "
"main method can return a value (useful for sweeps)"
},
)
use_plasma_view: bool = field(
default=False, metadata={"help": "Store indices and sizes in shared memory"}
)
plasma_path: Optional[str] = field(
default="/tmp/plasma",
metadata={
"help": "path to run plasma_store, defaults to /tmp/plasma. Paths outside /tmp tend to fail."
},
)
@dataclass
class DistributedTrainingConfig(FairseqDataclass):
distributed_world_size: int = field(
default=max(1, torch.cuda.device_count()),
metadata={
"help": "total number of GPUs across all nodes (default: all visible GPUs)"
},
)
distributed_rank: Optional[int] = field(
default=0, metadata={"help": "rank of the current worker"}
)
distributed_backend: str = field(
default="nccl", metadata={"help": "distributed backend"}
)
distributed_init_method: Optional[str] = field(
default=None,
metadata={
"help": "typically tcp://hostname:port that will be used to "
"establish initial connetion"
},
)
distributed_port: int = field(
default=-1,
metadata={
"help": "port number (not required if using --distributed-init-method)"
},
)
device_id: int = field(
default=0,
metadata={
"help": "which GPU to use (usually configured automatically)",
"argparse_alias": "--local_rank",
},
)
distributed_no_spawn: bool = field(
default=False,
metadata={
"help": "do not spawn multiple processes even if multiple GPUs are visible"
},
)
ddp_backend: DDP_BACKEND_CHOICES = field(
default="pytorch_ddp", metadata={"help": "DistributedDataParallel backend"}
)
bucket_cap_mb: int = field(
default=25, metadata={"help": "bucket size for reduction"}
)
fix_batches_to_gpus: bool = field(
default=False,
metadata={
"help": "don't shuffle batches between GPUs; this reduces overall "
"randomness and may affect precision but avoids the cost of re-reading the data"
},
)
find_unused_parameters: bool = field(
default=False,
metadata={
"help": "disable unused parameter detection (not applicable to "
"--ddp-backend=legacy_ddp)"
},
)
fast_stat_sync: bool = field(
default=False,
metadata={"help": "[deprecated] this is now defined per Criterion"},
)
heartbeat_timeout: int = field(
default=-1,
metadata={
"help": "kill the job if no progress is made in N seconds; "
"set to -1 to disable"
},
)
broadcast_buffers: bool = field(
default=False,
metadata={
"help": "Copy non-trainable parameters between GPUs, such as "
"batchnorm population statistics"
},
)
slowmo_momentum: Optional[float] = field(
default=None,
metadata={
"help": "SlowMo momentum term; by default use 0.0 for 16 GPUs, "
"0.2 for 32 GPUs; 0.5 for 64 GPUs, 0.6 for > 64 GPUs"
},
)
slowmo_algorithm: str = field(
default="LocalSGD", metadata={"help": "whether to use LocalSGD or SGP"}
)
localsgd_frequency: int = field(
default=3, metadata={"help": "Local SGD allreduce frequency"}
)
nprocs_per_node: int = field(
default=max(1, torch.cuda.device_count()),
metadata={
"help": "number of GPUs in each node. An allreduce operation across GPUs in "
"a node is very fast. Hence, we do allreduce across GPUs in a node, "
"and gossip across different nodes"
},
)
pipeline_model_parallel: bool = field(
default=False,
metadata={"help": "if set, use pipeline model parallelism across GPUs"},
)
pipeline_balance: Optional[str] = field(
default=None,
metadata={
"help": "partition the model into N_K pieces, where each piece "
"contains N_i layers. The sum(args.pipeline_balance) "
"should equal the total number of layers in the model"
},
)
pipeline_devices: Optional[str] = field(
default=None,
metadata={
"help": "a list of device indices indicating which device to place "
"each of the N_K partitions. The length of this list should "
"equal the length of the --pipeline-balance argument"
},
)
pipeline_chunks: Optional[int] = field(
default=0, metadata={"help": "microbatch count for pipeline model parallelism"}
)
pipeline_encoder_balance: Optional[str] = field(
default=None,
metadata={
"help": "partition the pipeline parallel encoder into N_K pieces, where each piece "
"contains N_i layers. The sum(args.pipeline_encoder_balance) "
"should equal the total number of encoder layers in the model"
},
)
pipeline_encoder_devices: Optional[str] = field(
default=None,
metadata={
"help": "a list of device indices indicating which device to place "
"each of the N_K partitions. The length of this list should "
"equal the length of the --pipeline-encoder-balance argument"
},
)
pipeline_decoder_balance: Optional[str] = field(
default=None,
metadata={
"help": "partition the pipeline parallel decoder into N_K pieces, where each piece "
"contains N_i layers. The sum(args.pipeline_decoder_balance) "
"should equal the total number of decoder layers in the model"
},
)
pipeline_decoder_devices: Optional[str] = field(
default=None,
metadata={
"help": "a list of device indices indicating which device to place "
"each of the N_K partitions. The length of this list should "
"equal the length of the --pipeline-decoder-balance argument"
},
)
pipeline_checkpoint: PIPELINE_CHECKPOINT_CHOICES = field(
default="never",
metadata={"help": "checkpointing mode for pipeline model parallelism"},
)
zero_sharding: ZERO_SHARDING_CHOICES = field(
default="none", metadata={"help": "ZeRO sharding"}
)
fp16: bool = II("common.fp16")
memory_efficient_fp16: bool = II("common.memory_efficient_fp16")
tpu: bool = II("common.tpu")
# configuration for --ddp-backend=fully_sharded
no_reshard_after_forward: bool = field(
default=False, metadata={"help": "don't reshard parameters after forward pass"},
)
fp32_reduce_scatter: bool = field(
default=False, metadata={"help": "reduce-scatter grads in FP32"},
)
cpu_offload: bool = field(
default=False, metadata={"help": "offload FP32 params to CPU"}
)
@dataclass
class DatasetConfig(FairseqDataclass):
num_workers: int = field(
default=1, metadata={"help": "how many subprocesses to use for data loading"}
)
skip_invalid_size_inputs_valid_test: bool = field(
default=False,
metadata={"help": "ignore too long or too short lines in valid and test set"},
)
max_tokens: Optional[int] = field(
default=None, metadata={"help": "maximum number of tokens in a batch"}
)
batch_size: Optional[int] = field(
default=None,
metadata={
"help": "number of examples in a batch",
"argparse_alias": "--max-sentences",
},
)
required_batch_size_multiple: int = field(
default=8, metadata={"help": "batch size will be a multiplier of this value"}
)
required_seq_len_multiple: int = field(
default=1,
metadata={
"help": "maximum sequence length in batch will be a multiplier of this value"
},
)
dataset_impl: Optional[DATASET_IMPL_CHOICES] = field(
default=None, metadata={"help": "output dataset implementation"}
)
data_buffer_size: int = field(
default=10, metadata={"help": "Number of batches to preload"}
)
train_subset: str = field(
default="train",
metadata={"help": "data subset to use for training (e.g. train, valid, test)"},
)
valid_subset: str = field(
default="valid",
metadata={
"help": "comma separated list of data subsets to use for validation"
" (e.g. train, valid, test)"
},
)
validate_interval: int = field(
default=1, metadata={"help": "validate every N epochs"}
)
validate_interval_updates: int = field(
default=0, metadata={"help": "validate every N updates"}
)
validate_after_updates: int = field(
default=0, metadata={"help": "dont validate until reaching this many updates"}
)
fixed_validation_seed: Optional[int] = field(
default=None, metadata={"help": "specified random seed for validation"}
)
disable_validation: bool = field(
default=False, metadata={"help": "disable validation"}
)
max_tokens_valid: Optional[int] = field(
default=II("dataset.max_tokens"),
metadata={
"help": "maximum number of tokens in a validation batch"
" (defaults to --max-tokens)"
},
)
batch_size_valid: Optional[int] = field(
default=II("dataset.batch_size"),
metadata={
"help": "batch size of the validation batch (defaults to --batch-size)",
"argparse_alias": "--max-sentences-valid",
},
)
max_valid_steps: Optional[int] = field(default=None, metadata={'help': 'How many batches to evaluate',
"argparse_alias": "--nval"})
curriculum: int = field(
default=0, metadata={"help": "don't shuffle batches for first N epochs"}
)
gen_subset: str = field(
default="test",
metadata={"help": "data subset to generate (train, valid, test)"},
)
num_shards: int = field(
default=1, metadata={"help": "shard generation over N shards"}
)
shard_id: int = field(
default=0, metadata={"help": "id of the shard to generate (id < num_shards)"}
)
@dataclass
class OptimizationConfig(FairseqDataclass):
max_epoch: int = field(
default=0, metadata={"help": "force stop training at specified epoch"}
)
max_update: int = field(
default=0, metadata={"help": "force stop training at specified update"}
)
stop_time_hours: float = field(
default=0,
metadata={
"help": "force stop training after specified cumulative time (if >0)"
},
)
clip_norm: float = field(
default=0.0, metadata={"help": "clip threshold of gradients"}
)
sentence_avg: bool = field(
default=False,
metadata={
"help": "normalize gradients by the number of sentences in a batch"
" (default is to normalize by number of tokens)"
},
)
update_freq: List[int] = field(
default_factory=lambda: [1],
metadata={"help": "update parameters every N_i batches, when in epoch i"},
)
lr: List[float] = field(
default_factory=lambda: [0.25],
metadata={
"help": "learning rate for the first N epochs; all epochs >N using LR_N"
" (note: this may be interpreted differently depending on --lr-scheduler)"
},
)
stop_min_lr: float = field(
default=-1.0,
metadata={"help": "stop training when the learning rate reaches this minimum"},
)
use_bmuf: bool = field(
default=False,
metadata={
"help": "specify global optimizer for syncing models on different GPUs/shards"
},
)
@dataclass
class CheckpointConfig(FairseqDataclass):
save_dir: str = field(
default="checkpoints", metadata={"help": "path to save checkpoints"}
)
tmp_save_dir: str = field(
default="./", metadata={"help": "temp path to save checkpoints, then copy to save_dir"}
)
restore_file: str = field(
default="checkpoint_last.pt",
metadata={
"help": "filename from which to load checkpoint "
"(default: <save-dir>/checkpoint_last.pt"
},
)
finetune_from_model: Optional[str] = field(
default=None,
metadata={
"help": "finetune from a pretrained model; note that meters and lr scheduler will be reset"
},
)
reset_dataloader: bool = field(
default=False,
metadata={
"help": "if set, does not reload dataloader state from the checkpoint"
},
)
reset_lr_scheduler: bool = field(
default=False,
metadata={
"help": "if set, does not load lr scheduler state from the checkpoint"
},
)
reset_meters: bool = field(
default=False,
metadata={"help": "if set, does not load meters from the checkpoint"},
)
reset_optimizer: bool = field(
default=False,
metadata={"help": "if set, does not load optimizer state from the checkpoint"},
)
optimizer_overrides: str = field(
default="{}",
metadata={
"help": "a dictionary used to override optimizer args when loading a checkpoint"
},
)
save_interval: int = field(
default=1, metadata={"help": "save a checkpoint every N epochs"}
)
save_interval_updates: int = field(
default=0, metadata={"help": "save a checkpoint (and validate) every N updates"}
)
keep_interval_updates: int = field(
default=-1,
metadata={
"help": "keep the last N checkpoints saved with --save-interval-updates"
},
)
keep_last_epochs: int = field(
default=-1, metadata={"help": "keep last N epoch checkpoints"}
)
keep_best_checkpoints: int = field(
default=-1, metadata={"help": "keep best N checkpoints based on scores"}
)
no_save: bool = field(
default=False, metadata={"help": "don't save models or checkpoints"}
)
no_epoch_checkpoints: bool = field(
default=False, metadata={"help": "only store last and best checkpoints"}
)
no_last_checkpoints: bool = field(
default=False, metadata={"help": "don't store last checkpoints"}
)
no_save_optimizer_state: bool = field(
default=False,
metadata={"help": "don't save optimizer-state as part of checkpoint"},
)
best_checkpoint_metric: str = field(
default="loss", metadata={"help": 'metric to use for saving "best" checkpoints'}
)
maximize_best_checkpoint_metric: bool = field(
default=False,
metadata={
"help": 'select the largest metric value for saving "best" checkpoints'
},
)
patience: int = field(
default=-1,
metadata={
"help": (
"early stop training if valid performance doesn't "
"improve for N consecutive validation runs; note "
"that this is influenced by --validate-interval"
)
},
)
checkpoint_suffix: str = field(
default="", metadata={"help": "suffix to add to the checkpoint file name"}
)
checkpoint_shard_count: int = field(
default=1,
metadata={
"help": "Number of shards containing the checkpoint - "
"if the checkpoint is over 300GB, it is preferable "
"to split it into shards to prevent OOM on CPU while loading "
"the checkpoint"
},
)
load_checkpoint_on_all_dp_ranks: bool = field(
default=False,
metadata={
"help": "load checkpoints on all data parallel devices "
"(default: only load on rank 0 and broadcast to other devices)"
},
)
write_checkpoints_asynchronously: bool = field(
default=False,
metadata={
"help": (
"Write checkpoints asynchronously in a separate "
"thread. NOTE: This feature is currently being tested."
),
"argparse_alias": "--save-async",
},
)
model_parallel_size: int = II("common.model_parallel_size")
@dataclass
class FairseqBMUFConfig(FairseqDataclass):
block_lr: float = field(
default=1, metadata={"help": "block learning rate for bmuf"}
)
block_momentum: float = field(
default=0.875, metadata={"help": "block momentum for bmuf"}
)
global_sync_iter: int = field(
default=50, metadata={"help": "Iteration for syncing global model"}
)
warmup_iterations: int = field(
default=500, metadata={"help": "warmup iterations for model to broadcast"}
)
use_nbm: bool = field(
default=False,
metadata={"help": "Specify whether you want to use classical BM / Nesterov BM"},
)
average_sync: bool = field(
default=False,
metadata={
"help": "Specify whether you want to average the local momentum after each sync"
},
)
distributed_world_size: int = II("distributed_training.distributed_world_size")
@dataclass
class GenerationConfig(FairseqDataclass):
beam: int = field(
default=5, metadata={"help": "beam size"},
)
nbest: int = field(
default=1, metadata={"help": "number of hypotheses to output"},
)
max_len_a: float = field(
default=0,
metadata={
"help": "generate sequences of maximum length ax + b, where x is the source length"
},
)
max_len_b: int = field(
default=200,
metadata={
"help": "generate sequences of maximum length ax + b, where x is the source length"
},
)
min_len: int = field(
default=1, metadata={"help": "minimum generation length"},
)
match_source_len: bool = field(
default=False, metadata={"help": "generations should match the source length"},
)
unnormalized: bool = field(
default=False, metadata={"help": "compare unnormalized hypothesis scores"},
)
no_early_stop: bool = field(
default=False, metadata={"help": "deprecated"},
)
no_beamable_mm: bool = field(
default=False, metadata={"help": "don't use BeamableMM in attention layers"},
)
lenpen: float = field(
default=1,
metadata={
"help": "length penalty: <1.0 favors shorter, >1.0 favors longer sentences"
},
)
unkpen: float = field(
default=0,
metadata={
"help": "unknown word penalty: <0 produces more unks, >0 produces fewer"
},
)
replace_unk: Optional[str] = field(
default=None,
metadata={
"help": "perform unknown replacement (optionally with alignment dictionary)",
"argparse_const": "@@ ",
},
)
sacrebleu: bool = field(
default=False, metadata={"help": "score with sacrebleu"},
)
score_reference: bool = field(
default=False, metadata={"help": "just score the reference translation"},
)
prefix_size: int = field(
default=0,
metadata={"help": "initialize generation by target prefix of given length"},
)
no_repeat_ngram_size: int = field(
default=0,
metadata={
"help": "ngram blocking such that this size ngram cannot be repeated in the generation"
},
)
sampling: bool = field(
default=False,
metadata={"help": "sample hypotheses instead of using beam search"},
)
sampling_topk: int = field(
default=-1,
metadata={"help": "sample from top K likely next words instead of all words"},
)
sampling_topp: float = field(
default=-1.0,
metadata={
"help": "sample from the smallest set whose cumulative probability mass exceeds p for next words"
},
)
constraints: Optional[GENERATION_CONSTRAINTS_CHOICES] = field(
default=None,
metadata={
"help": "enables lexically constrained decoding",
"argparse_const": "ordered",
},
)
temperature: float = field(
default=1.0, metadata={"help": "temperature for generation"},
)
diverse_beam_groups: int = field(
default=-1, metadata={"help": "number of groups for Diverse Beam Search"},
)
diverse_beam_strength: float = field(
default=0.5,
metadata={"help": "strength of diversity penalty for Diverse Beam Search"},
)
diversity_rate: float = field(
default=-1.0,
metadata={"help": "strength of diversity penalty for Diverse Siblings Search"},
)
print_alignment: Optional[PRINT_ALIGNMENT_CHOICES] = field(
default=None,
metadata={
"help": "if set, uses attention feedback to compute and print alignment to source tokens "
"(valid options are: hard, soft, otherwise treated as hard alignment)",
"argparse_const": "hard",
},
)
print_step: bool = field(
default=False, metadata={"help": "print steps"},
)
lm_path: Optional[str] = field(
default=None, metadata={"help": "path to lm checkpoint for lm fusion"},
)
lm_weight: float = field(
default=0.0, metadata={"help": "weight for lm probs for lm fusion"},
)
# arguments for iterative refinement generator
iter_decode_eos_penalty: float = field(
default=0.0,
metadata={"help": "if > 0.0, it penalized early-stopping in decoding."},
)
iter_decode_max_iter: int = field(
default=10, metadata={"help": "maximum iterations for iterative refinement."},
)
iter_decode_force_max_iter: bool = field(
default=False,
metadata={
"help": "if set, run exact the maximum number of iterations without early stop"
},
)
iter_decode_with_beam: int = field(
default=1,
metadata={
"help": "if > 1, model will generate translations varying by the lengths."
},
)
iter_decode_with_external_reranker: bool = field(
default=False,
metadata={
"help": "if set, the last checkpoint are assumed to be a reranker to rescore the translations"
},
)
retain_iter_history: bool = field(
default=False,
metadata={
"help": "if set, decoding returns the whole history of iterative refinement"
},
)
retain_dropout: bool = field(
default=False, metadata={"help": "Use dropout at inference time"},
)
# temporarily set to Any until https://github.com/facebookresearch/hydra/issues/1117 is fixed
# retain_dropout_modules: Optional[List[str]] = field(
retain_dropout_modules: Any = field(
default=None,
metadata={
"help": "if set, only retain dropout for the specified modules; "
"if not set, then dropout will be retained for all modules"
},
)
# special decoding format for advanced decoding.
decoding_format: Optional[GENERATION_DECODING_FORMAT_CHOICES] = field(
default=None,
metadata={"help": "special decoding format for advanced decoding."},
)
no_seed_provided: bool = field(
default=False,
metadata={"help": "if set, dont use seed for initializing random generators"},
)
@dataclass
class CommonEvalConfig(FairseqDataclass):
path: Optional[str] = field(
default=None, metadata={"help": "path(s) to model file(s), colon separated"},
)
post_process: Optional[str] = field(
default=None,
metadata={
"help": (
"post-process text by removing BPE, letter segmentation, etc. "
"Valid options can be found in fairseq.data.utils.post_process."
),
"argparse_const": "subword_nmt",
"argparse_alias": "--remove-bpe",
},
)
quiet: bool = field(default=False, metadata={"help": "only print final scores"})
model_overrides: str = field(
default="{}",
metadata={
"help": "a dictionary used to override model args at generation that were used during model training"
},
)
results_path: Optional[str] = field(
default=None, metadata={"help": "path to save eval results (optional)"}
)
@dataclass
class EvalLMConfig(FairseqDataclass):
output_word_probs: bool = field(
default=False,
metadata={
"help": "if set, outputs words and their predicted log probabilities to standard output"
},
)
output_word_stats: bool = field(
default=False,
metadata={
"help": "if set, outputs word statistics such as word count, average probability, etc"
},
)
context_window: int = field(
default=0,
metadata={
"help": "ensures that every evaluated token has access to a context of at least this size, if possible"
},
)
softmax_batch: int = field(
default=sys.maxsize,
metadata={
"help": "if BxT is more than this, will batch the softmax over vocab to this amount of tokens, in order to fit into GPU memory"
},
)
@dataclass
class InteractiveConfig(FairseqDataclass):
buffer_size: int = field(
default=0,
metadata={
"help": "read this many sentences into a buffer before processing them"
},
)
input: str = field(
default="-", metadata={"help": "file to read from; use - for stdin"},
)
@dataclass
class FairseqConfig(FairseqDataclass):
common: CommonConfig = CommonConfig()
common_eval: CommonEvalConfig = CommonEvalConfig()
distributed_training: DistributedTrainingConfig = DistributedTrainingConfig()
dataset: DatasetConfig = DatasetConfig()
optimization: OptimizationConfig = OptimizationConfig()
checkpoint: CheckpointConfig = CheckpointConfig()
bmuf: FairseqBMUFConfig = FairseqBMUFConfig()
generation: GenerationConfig = GenerationConfig()
eval_lm: EvalLMConfig = EvalLMConfig()
interactive: InteractiveConfig = InteractiveConfig()
model: Any = MISSING
task: Any = None
criterion: Any = None
optimizer: Any = None
lr_scheduler: Any = None
scoring: Any = None
bpe: Any = None
tokenizer: Any = None
|
COCO-LM/fairseq/fairseq/dataclass/configs.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/dataclass/configs.py",
"repo_id": "COCO-LM",
"token_count": 13696
}
| 184 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
from .multihead_attention import ModelParallelMultiheadAttention
from .transformer_layer import (
ModelParallelTransformerEncoderLayer,
ModelParallelTransformerDecoderLayer,
)
__all__ = [
"ModelParallelMultiheadAttention",
"ModelParallelTransformerEncoderLayer",
"ModelParallelTransformerDecoderLayer",
]
|
COCO-LM/fairseq/fairseq/model_parallel/modules/__init__.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/model_parallel/modules/__init__.py",
"repo_id": "COCO-LM",
"token_count": 157
}
| 185 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq import utils
from fairseq.models import (
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.models.fconv import FConvDecoder
@register_model("fconv_lm")
class FConvLanguageModel(FairseqLanguageModel):
def __init__(self, decoder):
super().__init__(decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension",
)
parser.add_argument(
"--decoder-layers",
type=str,
metavar="EXPR",
help="decoder layers [(dim, kernel_size), ...]",
)
parser.add_argument(
"--decoder-out-embed-dim",
type=int,
metavar="N",
help="decoder output embedding dimension",
)
parser.add_argument(
"--adaptive-softmax-cutoff",
metavar="EXPR",
help="comma separated list of adaptive softmax cutoff points. "
"Must be used with adaptive_loss criterion",
)
parser.add_argument(
"--adaptive-softmax-dropout",
type=float,
metavar="D",
help="sets adaptive softmax dropout for the tail projections",
)
parser.add_argument(
"--decoder-attention",
type=str,
metavar="EXPR",
help="decoder attention [True, ...]",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_lm_architecture(args)
if hasattr(args, "max_target_positions") and not hasattr(
args, "tokens_per_sample"
):
args.tokens_per_sample = args.max_target_positions
decoder = FConvDecoder(
dictionary=task.target_dictionary,
embed_dim=args.decoder_embed_dim,
convolutions=eval(args.decoder_layers),
out_embed_dim=args.decoder_embed_dim,
attention=eval(args.decoder_attention),
dropout=args.dropout,
max_positions=args.tokens_per_sample,
share_embed=False,
positional_embeddings=False,
adaptive_softmax_cutoff=(
utils.eval_str_list(args.adaptive_softmax_cutoff, type=int)
if args.criterion == "adaptive_loss"
else None
),
adaptive_softmax_dropout=args.adaptive_softmax_dropout,
)
return FConvLanguageModel(decoder)
@register_model_architecture("fconv_lm", "fconv_lm")
def base_lm_architecture(args):
args.dropout = getattr(args, "dropout", 0.1)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 128)
args.decoder_layers = getattr(args, "decoder_layers", "[(1268, 4)] * 13")
args.decoder_attention = getattr(args, "decoder_attention", "False")
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
@register_model_architecture("fconv_lm", "fconv_lm_dauphin_wikitext103")
def fconv_lm_dauphin_wikitext103(args):
layers = "[(850, 6)] * 3"
layers += " + [(850, 1)] * 1"
layers += " + [(850, 5)] * 4"
layers += " + [(850, 1)] * 1"
layers += " + [(850, 4)] * 3"
layers += " + [(1024, 4)] * 1"
layers += " + [(2048, 4)] * 1"
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 280)
args.decoder_layers = getattr(args, "decoder_layers", layers)
args.decoder_attention = getattr(args, "decoder_attention", "False")
args.adaptive_softmax_cutoff = getattr(
args, "adaptive_softmax_cutoff", "10000,20000,200000"
)
base_lm_architecture(args)
@register_model_architecture("fconv_lm", "fconv_lm_dauphin_gbw")
def fconv_lm_dauphin_gbw(args):
layers = "[(512, 5)]"
layers += " + [(128, 1, 0), (128, 5, 0), (512, 1, 3)] * 3"
layers += " + [(512, 1, 0), (512, 5, 0), (1024, 1, 3)] * 3"
layers += " + [(1024, 1, 0), (1024, 5, 0), (2048, 1, 3)] * 6"
layers += " + [(1024, 1, 0), (1024, 5, 0), (4096, 1, 3)]"
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 128)
args.decoder_layers = getattr(args, "decoder_layers", layers)
args.decoder_attention = getattr(args, "decoder_attention", "False")
args.adaptive_softmax_cutoff = getattr(
args, "adaptive_softmax_cutoff", "10000,50000,200000"
)
base_lm_architecture(args)
|
COCO-LM/fairseq/fairseq/models/fconv_lm.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/fconv_lm.py",
"repo_id": "COCO-LM",
"token_count": 2308
}
| 186 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.iterative_refinement_generator import DecoderOut
from fairseq.models import register_model, register_model_architecture
from fairseq.models.nat import FairseqNATDecoder, FairseqNATModel, ensemble_decoder
from fairseq.models.transformer import Embedding, TransformerDecoderLayer
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from .levenshtein_utils import (
_apply_del_words,
_apply_ins_masks,
_apply_ins_words,
_fill,
_get_del_targets,
_get_ins_targets,
_skip,
_skip_encoder_out,
)
@register_model("levenshtein_transformer")
class LevenshteinTransformerModel(FairseqNATModel):
@property
def allow_length_beam(self):
return False
@staticmethod
def add_args(parser):
FairseqNATModel.add_args(parser)
parser.add_argument(
"--early-exit",
default="6,6,6",
type=str,
help="number of decoder layers before word_del, mask_ins, word_ins",
)
parser.add_argument(
"--no-share-discriminator",
action="store_true",
help="separate parameters for discriminator",
)
parser.add_argument(
"--no-share-maskpredictor",
action="store_true",
help="separate parameters for mask-predictor",
)
parser.add_argument(
"--share-discriminator-maskpredictor",
action="store_true",
help="share the parameters for both mask-predictor and discriminator",
)
parser.add_argument(
"--sampling-for-deletion",
action="store_true",
help="instead of argmax, use sampling to predict the tokens",
)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
decoder = LevenshteinTransformerDecoder(args, tgt_dict, embed_tokens)
if getattr(args, "apply_bert_init", False):
decoder.apply(init_bert_params)
return decoder
def forward(
self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs
):
assert tgt_tokens is not None, "forward function only supports training."
# encoding
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
# generate training labels for insertion
masked_tgt_masks, masked_tgt_tokens, mask_ins_targets = _get_ins_targets(
prev_output_tokens, tgt_tokens, self.pad, self.unk
)
mask_ins_targets = mask_ins_targets.clamp(min=0, max=255) # for safe prediction
mask_ins_masks = prev_output_tokens[:, 1:].ne(self.pad)
mask_ins_out, _ = self.decoder.forward_mask_ins(
normalize=False,
prev_output_tokens=prev_output_tokens,
encoder_out=encoder_out,
)
word_ins_out, _ = self.decoder.forward_word_ins(
normalize=False,
prev_output_tokens=masked_tgt_tokens,
encoder_out=encoder_out,
)
# make online prediction
if self.decoder.sampling_for_deletion:
word_predictions = torch.multinomial(
F.softmax(word_ins_out, -1).view(-1, word_ins_out.size(-1)), 1
).view(word_ins_out.size(0), -1)
else:
word_predictions = F.log_softmax(word_ins_out, dim=-1).max(2)[1]
word_predictions.masked_scatter_(
~masked_tgt_masks, tgt_tokens[~masked_tgt_masks]
)
# generate training labels for deletion
word_del_targets = _get_del_targets(word_predictions, tgt_tokens, self.pad)
word_del_out, _ = self.decoder.forward_word_del(
normalize=False,
prev_output_tokens=word_predictions,
encoder_out=encoder_out,
)
word_del_masks = word_predictions.ne(self.pad)
return {
"mask_ins": {
"out": mask_ins_out,
"tgt": mask_ins_targets,
"mask": mask_ins_masks,
"ls": 0.01,
},
"word_ins": {
"out": word_ins_out,
"tgt": tgt_tokens,
"mask": masked_tgt_masks,
"ls": self.args.label_smoothing,
"nll_loss": True,
},
"word_del": {
"out": word_del_out,
"tgt": word_del_targets,
"mask": word_del_masks,
},
}
def forward_decoder(
self, decoder_out, encoder_out, eos_penalty=0.0, max_ratio=None, **kwargs
):
output_tokens = decoder_out.output_tokens
output_scores = decoder_out.output_scores
attn = decoder_out.attn
history = decoder_out.history
bsz = output_tokens.size(0)
if max_ratio is None:
max_lens = torch.zeros_like(output_tokens).fill_(255)
else:
if not encoder_out["encoder_padding_mask"]:
max_src_len = encoder_out["encoder_out"].size(0)
src_lens = encoder_out["encoder_out"].new(bsz).fill_(max_src_len)
else:
src_lens = (~encoder_out["encoder_padding_mask"][0]).sum(1)
max_lens = (src_lens * max_ratio).clamp(min=10).long()
# delete words
# do not delete tokens if it is <s> </s>
can_del_word = output_tokens.ne(self.pad).sum(1) > 2
if can_del_word.sum() != 0: # we cannot delete, skip
word_del_score, word_del_attn = self.decoder.forward_word_del(
normalize=True,
prev_output_tokens=_skip(output_tokens, can_del_word),
encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_del_word),
)
word_del_pred = word_del_score.max(-1)[1].bool()
_tokens, _scores, _attn = _apply_del_words(
output_tokens[can_del_word],
output_scores[can_del_word],
word_del_attn,
word_del_pred,
self.pad,
self.bos,
self.eos,
)
output_tokens = _fill(output_tokens, can_del_word, _tokens, self.pad)
output_scores = _fill(output_scores, can_del_word, _scores, 0)
attn = _fill(attn, can_del_word, _attn, 0.0)
if history is not None:
history.append(output_tokens.clone())
# insert placeholders
can_ins_mask = output_tokens.ne(self.pad).sum(1) < max_lens
if can_ins_mask.sum() != 0:
mask_ins_score, _ = self.decoder.forward_mask_ins(
normalize=True,
prev_output_tokens=_skip(output_tokens, can_ins_mask),
encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_ins_mask),
)
if eos_penalty > 0.0:
mask_ins_score[:, :, 0] = mask_ins_score[:, :, 0] - eos_penalty
mask_ins_pred = mask_ins_score.max(-1)[1]
mask_ins_pred = torch.min(
mask_ins_pred, max_lens[can_ins_mask, None].expand_as(mask_ins_pred)
)
_tokens, _scores = _apply_ins_masks(
output_tokens[can_ins_mask],
output_scores[can_ins_mask],
mask_ins_pred,
self.pad,
self.unk,
self.eos,
)
output_tokens = _fill(output_tokens, can_ins_mask, _tokens, self.pad)
output_scores = _fill(output_scores, can_ins_mask, _scores, 0)
if history is not None:
history.append(output_tokens.clone())
# insert words
can_ins_word = output_tokens.eq(self.unk).sum(1) > 0
if can_ins_word.sum() != 0:
word_ins_score, word_ins_attn = self.decoder.forward_word_ins(
normalize=True,
prev_output_tokens=_skip(output_tokens, can_ins_word),
encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_ins_word),
)
word_ins_score, word_ins_pred = word_ins_score.max(-1)
_tokens, _scores = _apply_ins_words(
output_tokens[can_ins_word],
output_scores[can_ins_word],
word_ins_pred,
word_ins_score,
self.unk,
)
output_tokens = _fill(output_tokens, can_ins_word, _tokens, self.pad)
output_scores = _fill(output_scores, can_ins_word, _scores, 0)
attn = _fill(attn, can_ins_word, word_ins_attn, 0.0)
if history is not None:
history.append(output_tokens.clone())
# delete some unnecessary paddings
cut_off = output_tokens.ne(self.pad).sum(1).max()
output_tokens = output_tokens[:, :cut_off]
output_scores = output_scores[:, :cut_off]
attn = None if attn is None else attn[:, :cut_off, :]
return decoder_out._replace(
output_tokens=output_tokens,
output_scores=output_scores,
attn=attn,
history=history,
)
def initialize_output_tokens(self, encoder_out, src_tokens):
initial_output_tokens = src_tokens.new_zeros(src_tokens.size(0), 2)
initial_output_tokens[:, 0] = self.bos
initial_output_tokens[:, 1] = self.eos
initial_output_scores = initial_output_tokens.new_zeros(
*initial_output_tokens.size()
).type_as(encoder_out["encoder_out"][0])
return DecoderOut(
output_tokens=initial_output_tokens,
output_scores=initial_output_scores,
attn=None,
step=0,
max_step=0,
history=None,
)
class LevenshteinTransformerDecoder(FairseqNATDecoder):
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
super().__init__(
args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn
)
self.dictionary = dictionary
self.bos = dictionary.bos()
self.unk = dictionary.unk()
self.eos = dictionary.eos()
self.sampling_for_deletion = getattr(args, "sampling_for_deletion", False)
self.embed_mask_ins = Embedding(256, self.output_embed_dim * 2, None)
self.embed_word_del = Embedding(2, self.output_embed_dim, None)
# del_word, ins_mask, ins_word
self.early_exit = [int(i) for i in args.early_exit.split(",")]
assert len(self.early_exit) == 3
# copy layers for mask-predict/deletion
self.layers_msk = None
if getattr(args, "no_share_maskpredictor", False):
self.layers_msk = nn.ModuleList(
[
TransformerDecoderLayer(args, no_encoder_attn)
for _ in range(self.early_exit[1])
]
)
self.layers_del = None
if getattr(args, "no_share_discriminator", False):
self.layers_del = nn.ModuleList(
[
TransformerDecoderLayer(args, no_encoder_attn)
for _ in range(self.early_exit[0])
]
)
if getattr(args, "share_discriminator_maskpredictor", False):
assert getattr(
args, "no_share_discriminator", False
), "must set saperate discriminator"
self.layers_msk = self.layers_del
def extract_features(
self,
prev_output_tokens,
encoder_out=None,
early_exit=None,
layers=None,
**unused
):
"""
Similar to *forward* but only return features.
Inputs:
prev_output_tokens: Tensor(B, T)
encoder_out: a dictionary of hidden states and masks
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
the LevenshteinTransformer decoder has full-attention to all generated tokens
"""
# embed positions
positions = (
self.embed_positions(prev_output_tokens)
if self.embed_positions is not None
else None
)
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = self.dropout_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
attn = None
inner_states = [x]
# decoder layers
decoder_padding_mask = prev_output_tokens.eq(self.padding_idx)
layers = self.layers if layers is None else layers
early_exit = len(layers) if early_exit is None else early_exit
for _, layer in enumerate(layers[:early_exit]):
x, attn, _ = layer(
x,
encoder_out["encoder_out"][0]
if (encoder_out is not None and len(encoder_out["encoder_out"]) > 0)
else None,
encoder_out["encoder_padding_mask"][0]
if (
encoder_out is not None
and len(encoder_out["encoder_padding_mask"]) > 0
)
else None,
self_attn_mask=None,
self_attn_padding_mask=decoder_padding_mask,
)
inner_states.append(x)
if self.layer_norm:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {"attn": attn, "inner_states": inner_states}
@ensemble_decoder
def forward_mask_ins(self, normalize, encoder_out, prev_output_tokens, **unused):
features, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
early_exit=self.early_exit[1],
layers=self.layers_msk,
**unused
)
features_cat = torch.cat([features[:, :-1, :], features[:, 1:, :]], 2)
decoder_out = F.linear(features_cat, self.embed_mask_ins.weight)
if normalize:
return F.log_softmax(decoder_out, -1), extra["attn"]
return decoder_out, extra["attn"]
@ensemble_decoder
def forward_word_ins(self, normalize, encoder_out, prev_output_tokens, **unused):
features, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
early_exit=self.early_exit[2],
layers=self.layers,
**unused
)
decoder_out = self.output_layer(features)
if normalize:
return F.log_softmax(decoder_out, -1), extra["attn"]
return decoder_out, extra["attn"]
@ensemble_decoder
def forward_word_del(self, normalize, encoder_out, prev_output_tokens, **unused):
features, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
early_exit=self.early_exit[0],
layers=self.layers_del,
**unused
)
decoder_out = F.linear(features, self.embed_word_del.weight)
if normalize:
return F.log_softmax(decoder_out, -1), extra["attn"]
return decoder_out, extra["attn"]
@register_model_architecture("levenshtein_transformer", "levenshtein_transformer")
def levenshtein_base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.apply_bert_init = getattr(args, "apply_bert_init", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.sampling_for_deletion = getattr(args, "sampling_for_deletion", False)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.early_exit = getattr(args, "early_exit", "6,6,6")
args.no_share_discriminator = getattr(args, "no_share_discriminator", False)
args.no_share_maskpredictor = getattr(args, "no_share_maskpredictor", False)
args.share_discriminator_maskpredictor = getattr(
args, "share_discriminator_maskpredictor", False
)
args.no_share_last_layer = getattr(args, "no_share_last_layer", False)
@register_model_architecture(
"levenshtein_transformer", "levenshtein_transformer_wmt_en_de"
)
def levenshtein_transformer_wmt_en_de(args):
levenshtein_base_architecture(args)
# similar parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017)
@register_model_architecture(
"levenshtein_transformer", "levenshtein_transformer_vaswani_wmt_en_de_big"
)
def levenshtein_transformer_vaswani_wmt_en_de_big(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.3)
levenshtein_base_architecture(args)
# default parameters used in tensor2tensor implementation
@register_model_architecture(
"levenshtein_transformer", "levenshtein_transformer_wmt_en_de_big"
)
def levenshtein_transformer_wmt_en_de_big_t2t(args):
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_dropout = getattr(args, "activation_dropout", 0.1)
levenshtein_transformer_vaswani_wmt_en_de_big(args)
|
COCO-LM/fairseq/fairseq/models/nat/levenshtein_transformer.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/nat/levenshtein_transformer.py",
"repo_id": "COCO-LM",
"token_count": 9868
}
| 187 |
#!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
import re
from functools import partial
from typing import List, Optional, Tuple
import torch
import torch.nn as nn
from fairseq.models import (
FairseqEncoder,
)
from fairseq.models.speech_to_text.utils import (
NoOp,
lengths_to_padding_mask,
segments_to_sequence,
)
from fairseq.models.speech_to_text.utils import (
attention_suppression,
layer_norm_backward_hook,
)
from torch import Tensor, device as Device
from torch.quantization.qconfig import (
default_dynamic_qconfig,
per_channel_dynamic_qconfig,
)
class RelativePositionEmbedding(nn.Module):
"""
Implementation according to https://arxiv.org/abs/1803.02155
"""
def __init__(self, head_dim, max_position, norm_init=True):
super().__init__()
self.head_dim = head_dim
self.max_position = max_position
self.embeddings = nn.Parameter(torch.Tensor(max_position * 2 + 1, head_dim))
if norm_init:
nn.init.xavier_normal_(self.embeddings)
else:
nn.init.xavier_uniform_(self.embeddings)
def forward(self, input: Tensor):
output = nn.functional.embedding(input.long(), self.embeddings)
return output
class Fp32LayerNorm(nn.Module):
def __init__(
self,
input_dim,
clamp_grad=True,
max_grad_value=256,
eps=1e-5,
elementwise_affine=True,
):
super().__init__()
self.torch_module = torch.nn.LayerNorm(
input_dim, eps=eps, elementwise_affine=elementwise_affine
)
if clamp_grad:
hook = partial(layer_norm_backward_hook, clamp_value=max_grad_value)
self.torch_module.register_backward_hook(hook)
def forward(self, input):
output = torch.nn.functional.layer_norm(
input.float(),
self.torch_module.normalized_shape,
self.torch_module.weight.float()
if self.torch_module.weight is not None
else None,
self.torch_module.bias.float()
if self.torch_module.bias is not None
else None,
self.torch_module.eps,
).type_as(input)
return output
# ------------------------------------------------------------------------------
# PositionwiseFF
# ------------------------------------------------------------------------------
class PositionwiseFF(nn.Module):
"""
FFN layer in transformer.
Args:
input_dim: input embedding dimension
ffn_dim: FFN layer inner dimension
dropout_on_fc1: dropout for first linear layer
dropout_on_fc2: dropout fr second linear layer
activation_fn: activation function used after first linear layer. \
Only relu or gelu is supported.
"""
def __init__(
self, input_dim, ffn_dim, dropout_on_fc1, dropout_on_fc2, activation_fn
):
super(PositionwiseFF, self).__init__()
self.input_dim = input_dim
self.ffn_dim = ffn_dim
if activation_fn == "relu":
ac = nn.ReLU()
elif activation_fn == "gelu":
ac = nn.GELU()
else:
raise ValueError("Unsupported activation_fn = ({})".format(activation_fn))
# fc1 -> ac -> dropout -> fc2 -> dropout
self.module = nn.Sequential(
nn.Linear(input_dim, ffn_dim),
ac,
nn.Dropout(dropout_on_fc1),
nn.Linear(ffn_dim, input_dim),
nn.Dropout(dropout_on_fc2),
)
self.layer_norm = Fp32LayerNorm(input_dim)
def forward(self, input):
module_out = self.module(self.layer_norm(input))
output = module_out + input
return output
def quantize_(self, params=None):
if params and "per_channel" in params and params["per_channel"]:
qconfig = per_channel_dynamic_qconfig
else:
qconfig = default_dynamic_qconfig
torch.quantization.quantize_dynamic(
self, {torch.nn.Linear: qconfig}, dtype=torch.qint8, inplace=True
)
return self
# ------------------------------------------------------------------------------
# SummarizationLayer
# ------------------------------------------------------------------------------
class SummarizationLayer(nn.Module):
def __init__(self, method, segment_size, embedding_dim):
super(SummarizationLayer, self).__init__()
self.segment_size = segment_size
self.embedding_dim = embedding_dim
nonlin_match = re.match(r"nonlinear\((?P<act>[a-z]+),(?P<dim>[0-9]+)\)", method)
self.method = method
if method == "mean":
self.module = nn.AvgPool1d(
kernel_size=segment_size,
stride=segment_size,
ceil_mode=True,
)
elif method == "max":
self.module = nn.MaxPool1d(
kernel_size=segment_size,
stride=segment_size,
ceil_mode=True,
)
elif method == "linear":
self.module = nn.Linear(segment_size, 1)
elif nonlin_match:
nonlin_args = nonlin_match.groupdict()
act_type = nonlin_args["act"]
hid_dim = int(nonlin_args["dim"])
if act_type == "relu":
act = nn.ReLU()
elif act_type == "gelu":
act = nn.GELU()
else:
raise ValueError("Unsupported activation_fn = ({})".format(act_type))
self.module = nn.Sequential(
nn.Linear(segment_size, hid_dim),
act,
nn.Linear(hid_dim, 1),
)
else:
raise ValueError("Unsupported summarization method = ({})".format(method))
def forward(self, input):
# T, B, D -> B, D, T
input = input.permute(1, 2, 0)
if self.method == "mean" or self.method == "max":
output = self.module(input)
output = output.permute(2, 0, 1)
return output
full_seg_length = input.size(2) // self.segment_size * self.segment_size
if full_seg_length > 0:
# at least one seg is full
B = input.size(0)
D = input.size(1)
input_todo = (
input[:, :, :full_seg_length]
.contiguous()
.view(B, -1, self.segment_size)
)
output = self.module(input_todo)
output = output.view(B, D, -1)
else:
output = input.new_zeros(input.size(0), input.size(1), 0)
left = input.size(2) - full_seg_length
if left > 0:
# when last seg is not full, use zeros as last memory placeholder
zeros = input.new_zeros(input.size(0), input.size(1), 1)
output = torch.cat([output, zeros], dim=2)
output = output.permute(2, 0, 1)
return output
# ------------------------------------------------------------------------------
# NoSegAugmentedMemoryMultiheadAttentionBmm
# ------------------------------------------------------------------------------
class NoSegAugmentedMemoryMultiheadAttentionBmm(nn.Module):
"""
Whole utterance augmented memory multihead attention using BMM.
Different with previous augmented memory multihead attention where
the utterance is chunked into segments. Here we use attention mask
achieve so. The input embedding [right_context, utterance, summary]
is a concatenation of right context, utterance and summary.
Right context block is the concatenation of all the right context for
each segments. [right_context_0, right_context_1, ..., right_context_n]
For example, if we have utterance = [v0, v1, v2, ...., v20]. segment
size 8, right_context size 4. Then the right context blocks =
[v8, v9, v10, v11, v16, v17, v18, v19, 0, 0, 0, 0], where v8, v9, v10,
and v11 are the right context for first segment. v16, v17, v18 and v19
are the right context for second segment. 0, 0, 0 and 0 are right context
for the last segment.
utterance is corresponding to input embedding sequence
summary is concatenation of average of each segments. [summary_0,
summary_1, ..., ].
In augmented memory multihead attention, the query is [right_context,
utterance, summary], key is [memory, right_context, utterance]. Different
with AugmentedMemoryMultiheadAttentionBmm, memory here is passed from
previous attention layer. For the first attention layer, memory is average
of each segment.
Memory is a concatenation of memory from each segments in previous attention
layer. For example, current layer is i, then memory is [m_0, m_1, ..., m_n].
Each m_k is the output from seg_k in layer i-1.
args:
input_dim: input embedding dimension
num_heads: number of heads in multihead self-attention
dropout: attention dropout
std_scale: if std_scale is not None. The weak attention suppression is
turned on. For std_scale = 0.5, all the attention smaller than
mean + 0.5 * std will be suppressed.
scaled_init: whether to use scaled init for linear weight
tanh_on_mem: whether to use tanh on memory output
use_mem: whether to use memory or not. When max_memory_size is 0, then
we don't have memory anymore.
layer_index: current self-attention layer index that is used in depth
initialization
max_relative_position: max relative position used in relative position
embedding
rpe_old_option: To be compatible with previous model. The previous model
was trained with attention += attention + rpe. The correct equation
should be attention = attention + rpe
"""
def __init__(
self,
input_dim,
num_heads,
dropout=0.0,
std_scale=None,
scaled_init=False,
tanh_on_mem=False,
use_mem=True,
mini_batches=False,
negative_inf="-inf",
layer_index=-1,
max_relative_position=0,
rpe_old_option=True,
):
if input_dim % num_heads:
raise ValueError(
"input_dim ({}) must be divisible by num_heads ({})".format(
input_dim, num_heads
)
)
super().__init__()
embed_dim = input_dim
self.e2h_kv = torch.nn.Linear(input_dim, 2 * input_dim, bias=True)
self.e2h_q = torch.nn.Linear(input_dim, input_dim, bias=True)
self.rpe_old_option = rpe_old_option
if max_relative_position > 0:
self.use_rpe = True
self.rpe_k = RelativePositionEmbedding(
head_dim=input_dim // num_heads,
max_position=max_relative_position,
)
self.rpe_v = RelativePositionEmbedding(
head_dim=input_dim // num_heads,
max_position=max_relative_position,
)
else:
self.use_rpe = False
self.rpe_k = None
self.rpe_v = None
if scaled_init:
if layer_index == -1:
gain = 1.0 / math.sqrt(2)
else:
# https://arxiv.org/abs/2005.09684 depthwise initialization
# stablize the training greatly. Use depthwise initialization to
# replace incremental loss.
gain = 1.0 / math.sqrt(layer_index + 1)
torch.nn.init.xavier_uniform_(self.e2h_kv.weight, gain=gain)
torch.nn.init.xavier_uniform_(self.e2h_q.weight, gain=gain)
self.out_proj = torch.nn.Linear(embed_dim, embed_dim, bias=True)
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
self.scaling = self.head_dim ** -0.5
self.std_scale = std_scale
self.use_mem = use_mem
self.mini_batches = mini_batches
self.negative_inf = negative_inf
if tanh_on_mem:
self.squash_mem = torch.tanh
self.nonlinear_squash_mem = True
else:
self.squash_mem = NoOp()
self.nonlinear_squash_mem = False
def prepare_qkv(
self,
input: Tensor,
mems: Tensor,
lengths: Tensor,
summary_length: int,
lc_length: int,
):
# T: right_context length + utterance_length + summary_length
T, B, D = input.shape
mem_length = mems.size(0)
utterance_length = torch.max(lengths)
right_context_blocks_length = T - utterance_length - summary_length
rc_block = input[:right_context_blocks_length, :, :]
utterance_block = input[right_context_blocks_length : T - summary_length, :, :]
if B == 1:
padding_mask = None
else:
klengths = lengths + mem_length + right_context_blocks_length + lc_length
padding_mask = lengths_to_padding_mask(lengths=klengths)
mem_rc_input = torch.cat([mems, rc_block, utterance_block], dim=0)
# In training lc_length = 0
key_length = mem_rc_input.size(0) + lc_length
rc_input_sum = input
q = self.e2h_q(rc_input_sum)
kv = self.e2h_kv(mem_rc_input)
k, v = kv.chunk(chunks=2, dim=2)
result_qkv = (q, k, v)
input_shape = (T, B, D)
result_lengths_info = (
mem_length,
utterance_length,
right_context_blocks_length,
key_length,
)
if padding_mask is not None:
assert padding_mask.size(0) == B
assert padding_mask.size(1) == key_length
return result_qkv, input_shape, result_lengths_info, padding_mask
def prepare_attention_weights(
self,
q: Tensor,
new_k: Tensor,
new_v: Tensor,
input_shape: Tuple[int, int, int],
rpe: Optional[Tensor],
) -> Tuple[Tensor, Tensor, Tensor]:
T, B, D = input_shape
q = (
q.contiguous().view(-1, B * self.num_heads, self.head_dim).transpose(0, 1)
* self.scaling
)
k = (
new_k.contiguous()
.view(-1, B * self.num_heads, self.head_dim)
.transpose(0, 1)
)
v = (
new_v.contiguous()
.view(-1, B * self.num_heads, self.head_dim)
.transpose(0, 1)
)
attention_weights = torch.bmm(q, k.transpose(1, 2))
if self.use_rpe and rpe is not None and self.rpe_v is not None:
r_k = self.rpe_k(rpe)
# [q, B*h, d] * [q, k, d] -> [B*h, q, k]
attention_weights_rpe = torch.matmul(
q.transpose(0, 1), r_k.transpose(1, 2)
).transpose(0, 1)
attention_weights = attention_weights + attention_weights_rpe
attention_weights_float = attention_weights.float()
return attention_weights, attention_weights_float, v
def prepare_attention_output(
self,
attention_weights: Tensor,
attention_weights_float: Tensor,
v: Tensor,
input_shape: Tuple[int, int, int],
key_length: int,
padding_mask: Optional[Tensor],
rpe: Optional[Tensor],
) -> Tensor:
T, B, D = input_shape
if padding_mask is not None:
attention_weights_float = attention_weights_float.view(
B, self.num_heads, T, key_length
)
attention_weights_float = attention_weights_float.masked_fill(
padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf")
)
attention_weights_float = attention_weights_float.view(
B * self.num_heads, T, key_length
)
if self.std_scale is not None:
attention_weights_float = attention_suppression(
attention_weights_float, self.std_scale
)
attention_weights_float = torch.nn.functional.softmax(
attention_weights_float, dim=-1
)
attention_weights = attention_weights_float.type_as(attention_weights)
attention_probs = torch.nn.functional.dropout(
attention_weights, p=self.dropout, training=self.training
)
# [T, key_length, B, n_head]+ [key_length, B, n_head, d_head]
# -> [T, B, n_head, d_head]
attention = torch.bmm(attention_probs, v)
if self.use_rpe and rpe is not None and self.rpe_v is not None:
r_v = self.rpe_v(rpe)
attention_rpe = torch.matmul(
attention_probs.transpose(0, 1), r_v
).transpose(0, 1)
if self.rpe_old_option:
attention += attention + attention_rpe
else:
attention = attention + attention_rpe
assert list(attention.shape) == [B * self.num_heads, T, self.head_dim]
attention = attention.transpose(0, 1).contiguous().view(T, B, self.embed_dim)
rc_output_memory = self.out_proj(attention)
return rc_output_memory
@torch.jit.unused
def forward(
self,
input: Tensor,
lengths: Tensor,
mems: Tensor,
attention_mask: Tensor,
pre_mems: Optional[Tensor] = None,
left_context_key: Optional[Tensor] = None,
left_context_val: Optional[Tensor] = None,
rpe: Optional[Tensor] = None,
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""
forward function for NoSegAugmentedMemoryMultiheadAttentionBmm in training.
args:
input: formed in the following way
[right_context_0, right_contex_1, ..., seg_0, seg_1,
..., summary_0, summary_1,..]
lengths: the length of query which is [seg_0, seg_1, ....]
mems: [mem_0, mem_1, ...].
attention_mask: attention mask for query = [right_context, query, summary]
key = [mem, right_context, query]. This is only used for traing.
"""
if self.use_mem:
mem_length = mems.size(0)
summary_length = mem_length + 1
if pre_mems is not None:
mems = torch.cat([pre_mems, mems], dim=0)
else:
mem_length = 0
summary_length = 0
# In training, lc_length = 0
if left_context_key is not None:
lc_length = left_context_key.size(0)
else:
lc_length = 0
results = self.prepare_qkv(
input=input,
mems=mems,
lengths=lengths,
summary_length=summary_length,
lc_length=lc_length,
)
result_qkv, input_shape, result_lengths_info, padding_mask = results
q, k, v = result_qkv
(
mem_length,
utterance_length,
right_context_blocks_length,
key_length,
) = result_lengths_info
if left_context_key is not None:
# add the cache key and value
new_k = torch.cat(
[
k[: mem_length + right_context_blocks_length, :, :],
left_context_key,
k[-utterance_length:, :, :],
],
dim=0,
)
new_v = torch.cat(
[
v[: mem_length + right_context_blocks_length, :, :],
left_context_val,
v[-utterance_length:, :, :],
],
dim=0,
)
next_k = new_k[mem_length + right_context_blocks_length :, :, :]
next_v = new_v[mem_length + right_context_blocks_length :, :, :]
else:
new_k = k
new_v = v
next_k = None
next_v = None
attention_weights, attention_weights_float, v = self.prepare_attention_weights(
q=q,
new_k=new_k,
new_v=new_v,
input_shape=input_shape,
rpe=rpe,
)
# mask attention
attention_mask = attention_mask.unsqueeze(0)
attention_weights_float = attention_weights_float.masked_fill(
attention_mask, float(self.negative_inf)
)
rc_output_memory = self.prepare_attention_output(
attention_weights=attention_weights,
attention_weights_float=attention_weights_float,
v=v,
input_shape=input_shape,
key_length=key_length,
padding_mask=padding_mask,
rpe=rpe,
)
if self.use_mem:
# next_m length equals to summary length - 1
# last memory is ignored
if self.mini_batches:
next_m = rc_output_memory[-summary_length:]
else:
next_m = rc_output_memory[-summary_length:-1]
next_m = self.squash_mem(next_m)
# rc and output
rc_output = rc_output_memory[:-summary_length]
if not self.nonlinear_squash_mem:
next_m = torch.clamp(next_m, min=-10, max=10)
else:
next_m = mems
rc_output = rc_output_memory
return rc_output, next_m, next_k, next_v
@torch.jit.export
def forward_jit(
self,
input: Tensor,
lengths: Tensor,
mems: Tensor,
left_context_key: Tensor,
left_context_val: Tensor,
rpe: Optional[Tensor],
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""
forward function for NoSegAugmentedMemoryMultiheadAttentionBmm in decoding.
args:
input: formed in the following way
[right_context_0, right_contex_1, ..., seg_0, seg_1,
..., summary_0, summary_1,..]
lengths: the length of query which is [seg_0, seg_1, ....]
mems: [mem_0, mem_1, ...].
left_context_key: left_context for key part. This is only used for online
decoding. In training, this is empty tensor
left_context_val: left_context for value part. This is only used for online
decoding. In training, this is empty tensor
"""
lc_length = left_context_key.size(0)
# In decoding, summary_length = 1 or 0
if self.use_mem:
summary_length = 1
else:
summary_length = 0
results = self.prepare_qkv(
input=input,
mems=mems,
lengths=lengths,
summary_length=summary_length,
lc_length=lc_length,
)
result_qkv, input_shape, result_lengths_info, padding_mask = results
q, k, v = result_qkv
(
mem_length,
utterance_length,
right_context_blocks_length,
key_length,
) = result_lengths_info
# add the cache key and value
new_k = torch.cat(
[
k[: mem_length + right_context_blocks_length, :, :],
left_context_key,
k[-utterance_length:, :, :],
],
dim=0,
)
new_v = torch.cat(
[
v[: mem_length + right_context_blocks_length, :, :],
left_context_val,
v[-utterance_length:, :, :],
],
dim=0,
)
next_k = new_k[mem_length + right_context_blocks_length :, :, :]
next_v = new_v[mem_length + right_context_blocks_length :, :, :]
attention_weights, attention_weights_float, v = self.prepare_attention_weights(
q=q,
new_k=new_k,
new_v=new_v,
input_shape=input_shape,
rpe=rpe,
)
# In online decoding, we don't have attention mask. But we still need
# to disable the attention from summary query to memory
attention_weights_float[:, -1, :mem_length] = float(self.negative_inf)
rc_output_memory = self.prepare_attention_output(
attention_weights=attention_weights,
attention_weights_float=attention_weights_float,
v=v,
input_shape=input_shape,
key_length=key_length,
padding_mask=padding_mask,
rpe=rpe,
)
# In decoding, summary length is 1
if self.use_mem:
next_m = rc_output_memory[-1:]
next_m = self.squash_mem(next_m)
# rc and output
rc_output = rc_output_memory[:-1]
if not self.nonlinear_squash_mem:
next_m = torch.clamp(next_m, min=-10, max=10)
else:
rc_output = rc_output_memory
# empty tensor as input mems
next_m = mems
return rc_output, next_m, next_k, next_v
def quantize_(self, params=None):
if params and "per_channel" in params and params["per_channel"]:
qconfig = per_channel_dynamic_qconfig
else:
qconfig = default_dynamic_qconfig
torch.quantization.quantize_dynamic(
self, {torch.nn.Linear: qconfig}, dtype=torch.qint8, inplace=True
)
return self
class NoSegAugmentedMemoryTransformer(nn.Module):
"""
Whole utterance augmented memory transformer.
This is not pyspeech nn layer. It is used as a module in a master layer where
multiple transformers is used.
"""
def __init__(
self,
input_dim,
num_heads,
ffn_dim,
dropout_in_attn=0.0,
dropout_on_attn=None,
dropout_on_fc1=None,
dropout_on_fc2=None,
activation_fn="relu",
tanh_on_mem=False,
std_scale=None,
scaled_init=False,
segment_size=128,
use_mem=True,
mini_batches=False,
negative_inf="-inf",
layer_index=-1,
summarization_method="mean",
max_relative_position=0,
rpe_old_option=True,
):
super(NoSegAugmentedMemoryTransformer, self).__init__()
self.attention = NoSegAugmentedMemoryMultiheadAttentionBmm(
input_dim=input_dim,
num_heads=num_heads,
dropout=dropout_in_attn,
scaled_init=scaled_init,
tanh_on_mem=tanh_on_mem,
std_scale=std_scale,
use_mem=use_mem,
mini_batches=mini_batches,
negative_inf=negative_inf,
layer_index=layer_index,
max_relative_position=max_relative_position,
)
self.dropout = nn.Dropout(dropout_on_attn)
self.pos_ff = PositionwiseFF(
input_dim=input_dim,
ffn_dim=ffn_dim,
dropout_on_fc1=dropout_on_fc1,
dropout_on_fc2=dropout_on_fc2,
activation_fn=activation_fn,
)
self.layer_norm_pre = Fp32LayerNorm(input_dim)
self.layer_norm = Fp32LayerNorm(input_dim)
self.segment_size = segment_size
self.use_mem = use_mem
self.memory_op = SummarizationLayer(
summarization_method, segment_size, input_dim
)
def set_mini_batches(self, mini_batches):
self.attention.mini_batches = mini_batches
def gen_summary_queries(self, input):
sum_input = self.memory_op(input)
return sum_input
def pre_attention_ops(self, input, right_context_blocks):
rc_length = right_context_blocks.size(0)
input_length = input.size(0)
rc_and_input = torch.cat([right_context_blocks, input], dim=0)
residual_input = rc_and_input
rc_and_input = self.layer_norm_pre(rc_and_input)
query_input = rc_and_input[-input_length:, :, :]
return rc_length, input_length, residual_input, query_input, rc_and_input
def after_attention_ops(self, attention_output, residual_input):
output = self.dropout(attention_output)
output = output + residual_input
output = self.pos_ff(output)
output = self.layer_norm(output)
return output
@torch.jit.export
def forward_jit(
self,
input: Tensor,
lengths: Tensor,
mems: Tensor,
left_context_key: Tensor,
left_context_val: Tensor,
right_context_blocks: Tensor,
rpe: Optional[Tensor],
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
results = self.pre_attention_ops(input, right_context_blocks)
rc_length, input_length, residual_input, query_input, rc_and_input = results
# In online decoding, the summary query size is always 1 or 0
if self.use_mem:
summary_query = self.gen_summary_queries(query_input)
summary_query = summary_query[0:1, :, :]
rc_qu_su = torch.cat([rc_and_input, summary_query], dim=0)
else:
rc_qu_su = rc_and_input
rc_output, next_m, next_k, next_v = self.attention.forward_jit(
input=rc_qu_su,
lengths=lengths,
mems=mems,
left_context_key=left_context_key,
left_context_val=left_context_val,
rpe=rpe,
)
rc_output = self.after_attention_ops(rc_output, residual_input)
results = (
rc_output[-input_length:, :, :],
next_m,
rc_output[0:rc_length, :, :],
next_k,
next_v,
)
return results
@torch.jit.unused
def forward(
self,
input,
lengths,
mems,
right_context_blocks,
attention_mask,
pre_mems,
left_context_key,
left_context_val,
rpe,
):
results = self.pre_attention_ops(input, right_context_blocks)
rc_length, input_length, residual_input, query_input, rc_and_input = results
if self.use_mem:
summary_query = self.gen_summary_queries(query_input)
rc_qu_su = torch.cat([rc_and_input, summary_query], dim=0)
else:
rc_qu_su = rc_and_input
rc_output, next_m, next_k, next_v = self.attention(
input=rc_qu_su,
lengths=lengths,
mems=mems,
attention_mask=attention_mask,
pre_mems=pre_mems,
left_context_key=left_context_key,
left_context_val=left_context_val,
rpe=rpe,
)
# [TODO] Note memory did not go through pos_ff. What happen if we pass
# memory through the pos_ff as well?
rc_output = self.after_attention_ops(rc_output, residual_input)
results = (
rc_output[-input_length:, :, :],
next_m,
rc_output[0:rc_length, :, :],
next_k,
next_v,
)
return results
class NoSegAugmentedMemoryTransformerEncoderLayer(FairseqEncoder):
"""
Whole utterance augmented memory transformer encoder layer. This is a master layer
where we can define multiple augmented memory transformers. There are two reasons
to setup the master layer.
1. We only need to define once about the attention mask. All the layers in the master
layer share the same mask.
2. pyspeech nn layer has special input and output format. Defining one master layer is
easier to passing memory between different layes inside the master layer
args:
input_dim: input embedding dimension
num_heads: number of heads in multihead self-attention
ffn_dim: ffn dimension in FFN layer
num_layers: number of augmented memory transformer layers
dropout_in_attn: dropout used in multi-head self-attention
dropout_on_attn: dropout used for output from te multihead self-attention
dropout_on_fc1: dropout used in FFN layer for the first linear layer
dropout_on_fc2: dropout used in FFN layer for the second linear layer
segment_size: segment size for each segment
context_config: (left_context_size, right_context_size) defines the surround context size
for each segment
max_memory_size: maximum memory size used for each segment
scaled_init: whether use scaled init for weight initialization in attention layer
std_scale: if std_scale is not None. The weak attention suppression is
turned on. For std_scale = 0.5, all the attention smaller than
mean + 0.5 * std will be suppressed.
activation_fn: activation function used in FFN layer. [ReLU, GELU] supported
tanh_on_mem: whether use tanh on memory
mini_batches: use mini-btach training
negative_inf: the negative infinity value used in attention masking. default is "-inf".
For some situation, e.g. LM. it is better to use "-1e8" to avoid nan issue.
summarization_method: method to generate segment summrization embedding
max_relative_position: max relatie position for relative position embedding
rpe_old_option: To be compatible with previous model. The previous model
was trained with attention += attention + rpe. The correct equation
should be attention = attention + rpe
[TODO]: remove the rpe_old_option by the end of 2021 Q1.
"""
def __init__(
self,
input_dim,
num_heads,
ffn_dim,
num_layers=1,
dropout_in_attn=0.0,
dropout_on_attn=0.0,
dropout_on_fc1=0.0,
dropout_on_fc2=0.0,
segment_size=128,
context_config=(0, 0),
max_memory_size=0,
scaled_init=True,
std_scale=None,
activation_fn="relu",
tanh_on_mem=False,
mini_batches=False,
negative_inf="-inf",
deep_init=True,
summarization_method="mean",
max_relative_position=0,
rpe_old_option=True,
):
super().__init__(None)
if input_dim % num_heads:
raise ValueError(
"input_dim ({}) must be divisible by num_heads ({})".format(
input_dim, num_heads
)
)
# we used to support growing memory size. However, it will cause
# cross stream batching failure. Now we need to have exact max memory size
if max_memory_size < 0:
raise ValueError("max_memory_size must be >= 0")
# Only assign right_context. In decoding, left context will be cached.
# No need to let the online decoder to re-assign the left context
self.left_context, self.right_context = context_config
self.segment_size = segment_size
self.memory_dim = input_dim
self.max_memory_size = max_memory_size
self.mini_batches = mini_batches
if self.max_memory_size != 0:
self.use_mem = True
else:
self.use_mem = False
self.memory_op = SummarizationLayer(
summarization_method, segment_size, input_dim
)
self.layers = torch.nn.ModuleList()
self.num_layers = num_layers
self.max_relative_position = max_relative_position
if self.max_relative_position > 0:
self.use_rpe = True
else:
self.use_rpe = False
for i in range(self.num_layers):
if deep_init:
layer_index = i
else:
layer_index = -1
self.layers.append(
NoSegAugmentedMemoryTransformer(
num_heads=num_heads,
input_dim=input_dim,
ffn_dim=ffn_dim,
dropout_in_attn=dropout_in_attn,
dropout_on_attn=dropout_on_attn,
dropout_on_fc1=dropout_on_fc1,
dropout_on_fc2=dropout_on_fc2,
segment_size=segment_size,
std_scale=std_scale,
activation_fn=activation_fn,
tanh_on_mem=tanh_on_mem,
scaled_init=scaled_init,
use_mem=self.use_mem,
mini_batches=mini_batches,
negative_inf=negative_inf,
layer_index=layer_index,
summarization_method=summarization_method,
max_relative_position=max_relative_position,
rpe_old_option=rpe_old_option,
)
)
def set_mini_batches(self, mini_batches):
# handy function only used for unit test
self.mini_batches = mini_batches
for layer in self.layers:
layer.set_mini_batches(mini_batches)
def _get_relative_position(
self,
input: Tensor,
max_relative_position: int,
left_context_length: int,
past_length: int,
is_decoding: bool,
):
# For training, we copy the right context to the start of the utterance
# First dimension in distance is corresponding to query.
# [right context, utterance, summary vector]
# Second dimension in distance is corresponding to key.
# [Memory bank, right context, utterance]
# For summary vector in query part, the distance with
# all other position is 2*max_position. For memory bank in key,
# the distance with all other positions is 0.
T, B, D = input.shape
num_segs = math.ceil((T - self.right_context) / self.segment_size)
# utterance
u_st = past_length * self.segment_size
u_ed = u_st + T
utterance_ranges = torch.arange(u_st, u_ed - self.right_context)
# left context. Only in minibatch or decoding
left_context_ranges = torch.arange(u_st - left_context_length, u_st)
# Right context block
# right context + utterance
right_context_blocks = []
for i in range(0, num_segs - 1):
st = (i + 1) * self.segment_size + u_st
ed = st + self.right_context
assert ed < u_ed
temp = torch.arange(st, ed)
right_context_blocks.append(temp)
right_context_blocks.append(torch.arange(u_ed - self.right_context, u_ed))
right_context_ranges = torch.cat(right_context_blocks)
if self.use_mem:
# Memory bank
# The position for memory -n, .., -1
if is_decoding:
memory_size = min(past_length, self.max_memory_size)
else:
memory_size = num_segs + past_length - 1
memory_bank_ranges = torch.arange(
-max_relative_position - 1, -max_relative_position - 1 - memory_size, -1
)
# summary vector
# The position for summary vector as the T+max_relative_position+1.
# After the clamping, the relative position is max_relative_position
summary_pos_st = u_ed + max_relative_position + 1
summary_vector_ranges = torch.arange(
summary_pos_st, summary_pos_st + num_segs
)
key_ranges = torch.cat(
[
memory_bank_ranges,
right_context_ranges,
left_context_ranges,
utterance_ranges,
]
)
query_ranges = torch.cat(
[right_context_ranges, utterance_ranges, summary_vector_ranges]
)
else:
key_ranges = torch.cat(
[right_context_ranges, left_context_ranges, utterance_ranges]
)
query_ranges = torch.cat([right_context_ranges, utterance_ranges])
distance = key_ranges[None, :] - query_ranges[:, None]
distance_clamp = (
torch.clamp(distance, -max_relative_position, max_relative_position)
+ max_relative_position
)
distance_clamp = distance_clamp.to(input.device).long().detach()
return distance_clamp
def _get_attention_mask(self, input, past_length=0, left_context_cache=0):
# attention mask for each query contains three parts:
# 1. memory part
# 2. left_context + segment
# 3. right_context_block
# so for each segment and its correspoinding right context block,
# the attention matrix is formed by 9 parts:
# [0, m, 0, 0, right_context, 0, 0, seg, 0]
# [before memory, memory, after memory, before right context, right_context,
# after right context, before seg, seg, after seg]
#
# Query is formed in the way as [right_context_blocks, utterance, summary]
#
# Note: put m and right_context before segment is convenient
# for padding_mask operation.
# Key lengths = m_length + right_context_block_length + lengths
utterance_length, batch_size, _ = input.shape
summary_length = math.ceil(utterance_length / self.segment_size)
num_segs = summary_length
rc_length = self.right_context * num_segs
rc = self.right_context
lc = self.left_context
# using mini-batches, there is left context cache available for current
# sequence.
lcc = left_context_cache
# max_memory_size is 0 then we don't have memory and summary
# past_length is the memory carry from previous sequence
if self.use_mem:
mem_length = num_segs - 1 + past_length
else:
mem_length = 0
rc_mask = []
query_mask = []
summary_mask = []
for j in range(0, num_segs):
ssize = min(self.segment_size, utterance_length - j * self.segment_size)
rc_size = rc
rc_mat = []
q_mat = []
s_mat = []
m_start = max(j + past_length - self.max_memory_size, 0)
# max_memory_size is 0, then we don't use memory
if self.use_mem:
# part 0: before memory
rc_mat.append(input.new_zeros(rc_size, m_start))
q_mat.append(input.new_zeros(ssize, m_start))
s_mat.append(input.new_zeros(1, m_start))
# part 1: memory
col_1 = j + past_length - m_start
rc_mat.append(torch.ones(rc_size, col_1, device=input.device))
q_mat.append(torch.ones(ssize, col_1, device=input.device))
# based on D22875746, disable summary query attention
# on memeory is better for long form utterance
s_mat.append(input.new_zeros(1, col_1))
# part 2: after memory
col_2 = mem_length - (j + past_length)
rc_mat.append(input.new_zeros(rc_size, col_2))
q_mat.append(input.new_zeros(ssize, col_2))
s_mat.append(input.new_zeros(1, col_2))
# part 3: before right context
rc_start = j * rc
rc_mat.append(input.new_zeros(rc_size, rc_start))
q_mat.append(input.new_zeros(ssize, rc_start))
s_mat.append(input.new_zeros(1, rc_start))
# part 4: right context
rc_end = rc_start + rc
col_4 = rc
rc_mat.append(torch.ones(rc_size, col_4, device=input.device))
q_mat.append(torch.ones(ssize, col_4, device=input.device))
s_mat.append(torch.ones(1, col_4, device=input.device))
# part 5: after right context
col_5 = rc_length - rc_end
rc_mat.append(input.new_zeros(rc_size, col_5))
q_mat.append(input.new_zeros(ssize, col_5))
s_mat.append(input.new_zeros(1, col_5))
# part 6: before query segment
seg_start = max(j * self.segment_size + lcc - lc, 0)
rc_mat.append(input.new_zeros(rc_size, seg_start))
q_mat.append(input.new_zeros(ssize, seg_start))
s_mat.append(input.new_zeros(1, seg_start))
# part 7: query segment
# note: right context is put in right context block
# here we only need to consider about left context
seg_end = min((j + 1) * self.segment_size + lcc, utterance_length + lcc)
col_7 = seg_end - seg_start
rc_mat.append(torch.ones(rc_size, col_7, device=input.device))
q_mat.append(torch.ones(ssize, col_7, device=input.device))
s_mat.append(torch.ones(1, col_7, device=input.device))
# part 8: after query segment
col_8 = utterance_length + lcc - seg_end
rc_mat.append(input.new_zeros(rc_size, col_8))
q_mat.append(input.new_zeros(ssize, col_8))
s_mat.append(input.new_zeros(1, col_8))
rc_mask.append(torch.cat(rc_mat, dim=1))
query_mask.append(torch.cat(q_mat, dim=1))
summary_mask.append(torch.cat(s_mat, dim=1))
# no memory, then we don't need summary either
if self.use_mem:
attention_mask = (
1
- torch.cat(
[
torch.cat(rc_mask, dim=0),
torch.cat(query_mask, dim=0),
torch.cat(summary_mask, dim=0),
],
dim=0,
)
).to(torch.bool)
else:
attention_mask = (
1
- torch.cat(
[torch.cat(rc_mask, dim=0), torch.cat(query_mask, dim=0)], dim=0
)
).to(torch.bool)
return attention_mask
@torch.jit.export
def init_state(
self, batch_size: int, device: Optional[Device] = None
) -> List[Tensor]:
empty_memory = torch.zeros(
self.num_layers,
self.max_memory_size,
batch_size,
self.memory_dim,
device=device,
)
left_context_key = torch.zeros(
self.num_layers,
self.left_context,
batch_size,
self.memory_dim,
device=device,
)
left_context_val = torch.zeros(
self.num_layers,
self.left_context,
batch_size,
self.memory_dim,
device=device,
)
past_length = torch.zeros(1, batch_size, dtype=torch.int32, device=device)
return [empty_memory, left_context_key, left_context_val, past_length]
@torch.jit.export
def batch_state(self, states: List[List[Tensor]]) -> List[Tensor]:
if len(states) == 0:
return []
batched_m = []
batched_lc_key = []
batched_lc_val = []
batched_past_length = []
for state in states:
if len(state) == 0:
continue
m, lc_key, lc_val, past_length = state
batched_m.append(m)
batched_lc_key.append(lc_key)
batched_lc_val.append(lc_val)
batched_past_length.append(past_length)
if (
(len(batched_m) == 0)
or (len(batched_lc_key) == 0)
or (len(batched_lc_val) == 0)
or (len(batched_past_length) == 0)
):
return [
torch.tensor([]),
torch.tensor([]),
torch.tensor([]),
torch.tensor([]),
]
batched_m = torch.cat(batched_m, dim=2)
batched_lc_key = torch.cat(batched_lc_key, dim=2)
batched_lc_val = torch.cat(batched_lc_val, dim=2)
batched_past_length = torch.cat(batched_past_length, dim=1)
return [batched_m, batched_lc_key, batched_lc_val, batched_past_length]
@torch.jit.export
def reorder_state(self, state: List[Tensor], indices: Tensor) -> List[Tensor]:
if len(state) == 0:
return []
m, lc_key, lc_val, past_length = state
indices = indices.to(device=m.device)
reord_m = torch.index_select(m, 2, indices)
reord_lc_key = torch.index_select(lc_key, 2, indices)
reord_lc_val = torch.index_select(lc_val, 2, indices)
reord_past_length = torch.index_select(past_length, 1, indices)
return [reord_m, reord_lc_key, reord_lc_val, reord_past_length]
@torch.jit.export
def reset_state(self, state: List[Tensor], indices: Tensor) -> List[Tensor]:
m, lc_key, lc_val, past_length = state
m = m.index_fill(dim=2, index=indices, value=0.0)
lc_key = lc_key.index_fill(dim=2, index=indices, value=0.0)
lc_val = lc_val.index_fill(dim=2, index=indices, value=0.0)
past_length = past_length.index_fill(dim=1, index=indices, value=0)
return [m, lc_key, lc_val, past_length]
@torch.jit.export
def state_size(self) -> int:
return 4
@torch.jit.export
def batch_size_in_state(
self, state: Optional[List[Tensor]], sloppy: bool = True
) -> Optional[int]:
if state is None:
return None
return state[0].size(2)
def gen_summary_queries(self, input):
sum_input = self.memory_op(input)
return sum_input
def _gen_right_context_padded_input(self, input):
# This function deals with input that is already
# padded with right context (e.g. minibatch training)
right_context_blocks = []
T, B, D = input.shape
num_segs = math.ceil((T - self.right_context) / self.segment_size)
for i in range(0, num_segs - 1):
st = (i + 1) * self.segment_size
ed = st + self.right_context
assert ed < T
temp = input[st:ed, :, :]
right_context_blocks.append(temp)
# last segment right context is already available
right_context_blocks.append(input[T - self.right_context :, :, :])
return torch.cat(right_context_blocks, dim=0)
def _gen_segs_right_context(self, input, lengths):
segments = []
T, B, D = input.size()
nT = T - self.right_context
# assume input is right context padded
num_segs = math.ceil(nT / self.segment_size)
# pad zeros to the utterance to make sure each
# segment has the same right context. For the
for i in range(0, num_segs - 1):
st = i * self.segment_size
ed = min(T, st + self.segment_size + self.right_context)
temp = input[st:ed, :, :]
rest_lengths = torch.clamp(
lengths - self.segment_size, min=0, max=nT - (i + 1) * self.segment_size
)
segments.append((temp, lengths - rest_lengths + self.right_context))
lengths = rest_lengths
last_seg = input[st + self.segment_size :, :, :]
segments.append((last_seg, rest_lengths + self.right_context))
return segments
@torch.jit.unused
def forward(
self, input: Tensor, padding_masks: Tensor, state: Optional[List[Tensor]] = None
) -> Tuple[Tensor, Tensor, List[Tensor], List[Tensor]]:
# Xutai: originally the second argument is lengths.
lengths = (~padding_masks).sum(dim=1).long()
# mini batch training.
if self.mini_batches:
return self.forward_mini_batches(input, lengths, state)
# regular full sequence training. Note, assume the right context in provided
# in the input.
T, B, D = input.size()
right_context_blocks = self._gen_right_context_padded_input(input)
# generate the relative positional embedding
if self.use_rpe:
rpe = self._get_relative_position(
input=input,
max_relative_position=self.max_relative_position,
left_context_length=0,
past_length=0,
is_decoding=False,
)
else:
rpe = None
input = input[: T - self.right_context, :, :]
attention_mask = self._get_attention_mask(input)
# firt layer use each segment mean as memory
# ignore the last one seg average
if self.use_mem:
mems = self.gen_summary_queries(input)[:-1, :, :]
else:
mems = torch.zeros(0, input.size(1), input.size(2), device=input.device)
mems = mems.type_as(input)
output = input
all_outputs = []
for layer in self.layers:
output, mems, right_context_blocks, _, _ = layer(
input=output,
lengths=lengths,
attention_mask=attention_mask,
mems=mems,
right_context_blocks=right_context_blocks,
pre_mems=None,
left_context_key=None,
left_context_val=None,
rpe=rpe,
)
all_outputs.append(output)
return output, padding_masks, [], all_outputs
def forward_jit_mini_batch_init(
self,
seg: Tensor,
state: Optional[List[Tensor]] = None,
is_decoding: bool = False,
):
# Prepare state. In whole sequence training, state is ignored.
# For minibatch training, we need to prepare state
if state is None:
state = self.init_state(batch_size=seg.size(1), device=seg.device)
if seg.dtype == torch.half:
state = [state[0].half(), state[1].half(), state[2].half(), state[3]]
if self.use_mem:
# note input average only on seg, not on right context
# first layer use each segmetn mean as memory. the last
# one segment average is used in state
full_mems = self.gen_summary_queries(seg)
if is_decoding:
mems = full_mems[0:1, :, :]
state_mems = torch.cat([state[0][0], mems], dim=0)
else:
mems = full_mems[:-1, :, :]
state_mems = torch.cat([state[0][0], full_mems], dim=0)
else:
mems = state[0][0]
state_mems = mems
# track processed segment number or memory number
# the same batch as the same bumber of past length
past_length = state[3][0][0].item()
past_left_context = min(past_length * self.segment_size, self.left_context)
past_length = min(self.max_memory_size, past_length)
return state, mems, state_mems, past_length, past_left_context
def state_update_before(
self, layer: int, state: List[Tensor], past_length: int, past_left_context: int
):
pre_mems = state[0][layer][self.max_memory_size - past_length :, :, :]
lc_key = state[1][layer][self.left_context - past_left_context :, :, :]
lc_val = state[2][layer][self.left_context - past_left_context :, :, :]
return pre_mems, lc_key, lc_val
def state_update_after(
self,
layer: int,
state: List[Tensor],
mems: Tensor,
next_key: Tensor,
next_val: Tensor,
mems_list: List[Tensor],
lc_key_list: List[Tensor],
lc_val_list: List[Tensor],
):
# mems is used for next layer
if layer < self.num_layers - 1:
state_mems = torch.cat([state[0][layer + 1], mems], dim=0)
mems_list.append(state_mems[-self.max_memory_size :, :, :])
# when mems pass to next sequence, we need the last memory. when mems
# use for the next layer, we can ignore the last memory
mems = mems[:-1, :, :]
# note state[1][i] and state[2][i] original length equals to self.left_context
new_k = torch.cat([state[1][layer], next_key], dim=0)
new_v = torch.cat([state[2][layer], next_val], dim=0)
lc_key_list.append(new_k[-self.left_context :, :, :])
lc_val_list.append(new_v[-self.left_context :, :, :])
return mems_list, lc_key_list, lc_val_list, mems
def state_update_after_loop(
self,
state: List[Tensor],
mems_list: List[Tensor],
lc_key_list: List[Tensor],
lc_val_list: List[Tensor],
update_length: int,
):
state[0] = torch.stack(mems_list, dim=0)
state[1] = torch.stack(lc_key_list, dim=0)
state[2] = torch.stack(lc_val_list, dim=0)
state[3] = state[3] + update_length
return state
@torch.jit.unused
def forward_mini_batches(
self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None
) -> Tuple[Tensor, Tensor, List[Tensor], List[Tensor]]:
T, B, D = input.size()
# input without right context
seg = input[: T - self.right_context, :, :]
# get right context blocks
right_context_blocks = self._gen_right_context_padded_input(input)
mems_list = []
lc_key_list = []
lc_val_list = []
results = self.forward_jit_mini_batch_init(seg, state, False)
state, mems, state_mems, past_length, past_left_context = results
# relative position embedding
if self.use_rpe:
rpe = self._get_relative_position(
input=input,
max_relative_position=self.max_relative_position,
left_context_length=past_left_context,
past_length=past_length,
is_decoding=False,
)
else:
rpe = None
# get attention mask based on seg (not include right context) and available
# left context
attention_mask = self._get_attention_mask(seg, past_length, past_left_context)
mems_list.append(state_mems[-self.max_memory_size :, :, :])
output = seg
i = 0
all_outputs = []
for layer in self.layers:
# In order to make cross stream batching work, mem, left context key
# and left context value in the state should always be the same shape.
# We use the past length to track the processed segment number. In this
# way, we take out the essential memory, left context key and left
# context val from the state. After finish the forward for current segment
# we add the new memory, left context key and left context value into the
# staate and trim out the oldest part to keep the shape consistent.
pre_mems, lc_key, lc_val = self.state_update_before(
i, state, past_length, past_left_context
)
output, mems, right_context_blocks, next_key, next_val = layer.forward(
input=output,
lengths=lengths,
attention_mask=attention_mask,
mems=mems,
right_context_blocks=right_context_blocks,
pre_mems=pre_mems,
left_context_key=lc_key,
left_context_val=lc_val,
rpe=rpe,
)
all_outputs.append(output)
mems_list, lc_key_list, lc_val_list, mems = self.state_update_after(
layer=i,
state=state,
mems=mems,
next_key=next_key,
next_val=next_val,
mems_list=mems_list,
lc_key_list=lc_key_list,
lc_val_list=lc_val_list,
)
i += 1
# update state
update_length = math.ceil((T - self.right_context) / self.segment_size)
state = self.state_update_after_loop(
state=state,
mems_list=mems_list,
lc_key_list=lc_key_list,
lc_val_list=lc_val_list,
update_length=update_length,
)
return output, lengths, state, all_outputs
def forward_jit_test(
self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None
) -> Tuple[Tensor, Tensor, List[Tensor]]:
"""
This one simulate sequence encoder forward jit. This is for unit test purpose.
It is not used in training or decoding. Note, extra_right_context is set in
the model. In unit test, input = [utterance, right_context], lengths =
[utterance_length].
args:
input: input utterance
lengths: utterance input length
state: None here. input is whole utterance
"""
# [TODO] sequence_to_segment has bug in lengths.
seg_src_tokens_lengths = self._gen_segs_right_context(input, lengths)
seg_enc_tokens_lengths: List[Tuple[Tensor, Tensor]] = []
state: Optional[List[Tensor]] = None
for seg_src_tokens, seg_src_lengths in seg_src_tokens_lengths:
seg_enc_tokens, seg_enc_lengths, state = self.forward_jit(
input=seg_src_tokens, lengths=seg_src_lengths, state=state
)
seg_enc_tokens_lengths.append((seg_enc_tokens, seg_enc_lengths))
enc_tokens, enc_lengths = segments_to_sequence(
segments=seg_enc_tokens_lengths, time_axis=0
)
state = [] # returns trivial state
return enc_tokens, enc_lengths, state
@torch.jit.export
def forward_jit(
self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None
) -> Tuple[Tensor, Tensor, List[Tensor]]:
"""
Forward helper for online decoding.
args:
input: [seg, right_context]. We assume in online we
always padding the right context to the preset right context size.
For the last segment, we may have short segment size, but right
context size is the same as other segments
lengths: utterance input length is the utterance segment length and
right context size
state: [memory, left_context_key, left_context_val]. To improve throughput,
in addition to memory, we also cache key and value for left_context in
multihead self-attention
"""
# In online decoding, input = [segment, right_context]
# Lengths = [segment_length, right_context_length]
# so we need strip right context in output
T, B, D = input.size()
rc_str = T - self.right_context
rc_end = T
right_context_blocks = input[rc_str:rc_end, :, :]
seg = input[:rc_str, :, :]
lengths = torch.clamp(lengths - self.right_context, min=0)
mems_list = []
lc_key_list = []
lc_val_list = []
results = self.forward_jit_mini_batch_init(seg, state, True)
state, mems, state_mems, past_length, past_left_context = results
# relative position embedding
if self.use_rpe:
rpe = self._get_relative_position(
input=input,
max_relative_position=self.max_relative_position,
left_context_length=past_left_context,
past_length=past_length,
is_decoding=True,
)
else:
rpe = None
# memory for first layer.
mems_list.append(state_mems[-self.max_memory_size :, :, :])
output = seg
i = 0
for layer in self.layers:
# In order to make cross stream batching work, mem, left context key
# and left context value in the state should always be the same shape.
# We use the past length to track the processed segment number. In this
# way, we take out the essential memory, left context key and left
# context val from the state. After finish the forward for current segment
# we add the new memory, left context key and left context value into the
# staate and trim out the oldest part to keep the shape consistent.
true_mems, lc_key, lc_val = self.state_update_before(
layer=i,
state=state,
past_length=past_length,
past_left_context=past_left_context,
)
output, mems, right_context_blocks, next_key, next_val = layer.forward_jit(
input=output,
lengths=lengths,
mems=true_mems,
right_context_blocks=right_context_blocks,
left_context_key=lc_key,
left_context_val=lc_val,
rpe=rpe,
)
# mems is used for next layer
mems_list, lc_key_list, lc_val_list, _ = self.state_update_after(
layer=i,
state=state,
mems_list=mems_list,
mems=mems,
next_key=next_key,
next_val=next_val,
lc_key_list=lc_key_list,
lc_val_list=lc_val_list,
)
i += 1
# update state
state = self.state_update_after_loop(
state=state,
mems_list=mems_list,
lc_key_list=lc_key_list,
lc_val_list=lc_val_list,
update_length=1,
)
return output, lengths, state
def quantize_(self, params=None):
if params and "per_channel" in params and params["per_channel"]:
qconfig = per_channel_dynamic_qconfig
else:
qconfig = default_dynamic_qconfig
torch.quantization.quantize_dynamic(
self, {torch.nn.Linear: qconfig}, dtype=torch.qint8, inplace=True
)
return self
# ------------------------------------------------------------------------------
# Emformer encoder for seq2seq model
# This is a wrapper over the original emformer
# ------------------------------------------------------------------------------
def emformer_encoder(klass):
class SpeechEncoder(klass):
def __init__(self, args):
super().__init__(args)
stride = SpeechEncoder.conv_layer_stride(args)
trf_left_context = args.segment_left_context // stride
trf_right_context = args.segment_right_context // stride
context_config = [trf_left_context, trf_right_context]
self.transformer_layers = nn.ModuleList(
[
NoSegAugmentedMemoryTransformerEncoderLayer(
input_dim=args.encoder_embed_dim,
num_heads=args.encoder_attention_heads,
ffn_dim=args.encoder_ffn_embed_dim,
num_layers=args.encoder_layers,
dropout_in_attn=args.dropout,
dropout_on_attn=args.dropout,
dropout_on_fc1=args.dropout,
dropout_on_fc2=args.dropout,
activation_fn=args.activation_fn,
context_config=context_config,
segment_size=args.segment_length,
max_memory_size=args.max_memory_size,
scaled_init=True, # TODO: use constant for now.
tanh_on_mem=args.amtrf_tanh_on_mem,
)
]
)
def forward(self, src_tokens, src_lengths):
encoder_out = super().forward(src_tokens, src_lengths)
(output, encoder_padding_masks, [], _) = encoder_out["encoder_out"][0]
# This is because that in the original implementation
# the output didn't consider the last segment as right context.
encoder_padding_masks = encoder_padding_masks[:, : output.size(0)]
return {
"encoder_out": [output],
"encoder_padding_mask": [encoder_padding_masks],
"encoder_embedding": [],
"encoder_states": [],
"src_tokens": [],
"src_lengths": [],
}
@staticmethod
def conv_layer_stride(args):
# TODO: make it configurable from the args
return 4
SpeechEncoder.__name__ = klass.__name__
return SpeechEncoder
|
COCO-LM/fairseq/fairseq/models/speech_to_text/modules/emformer.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/speech_to_text/modules/emformer.py",
"repo_id": "COCO-LM",
"token_count": 33288
}
| 188 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
class BeamableMM(nn.Module):
"""This module provides an optimized MM for beam decoding with attention.
It leverage the fact that the source-side of the input is replicated beam
times and the target-side of the input is of width one. This layer speeds up
inference by replacing the inputs {(bsz x 1 x nhu), (bsz x sz2 x nhu)}
with smaller inputs {(bsz/beam x beam x nhu), (bsz/beam x sz2 x nhu)}.
"""
def __init__(self, beam_size=None):
super(BeamableMM, self).__init__()
self.beam_size = beam_size
def forward(self, input1, input2):
if (
not self.training
and self.beam_size is not None # test mode
and input1.dim() == 3 # beam size is set
and input1.size(1) # only support batched input
== 1 # single time step update
):
bsz, beam = input1.size(0), self.beam_size
# bsz x 1 x nhu --> bsz/beam x beam x nhu
input1 = input1[:, 0, :].unfold(0, beam, beam).transpose(2, 1)
# bsz x sz2 x nhu --> bsz/beam x sz2 x nhu
input2 = input2.unfold(0, beam, beam)[:, :, :, 0]
# use non batched operation if bsz = beam
if input1.size(0) == 1:
output = torch.mm(input1[0, :, :], input2[0, :, :])
else:
output = input1.bmm(input2)
return output.view(bsz, 1, -1)
else:
return input1.bmm(input2)
def set_beam_size(self, beam_size):
self.beam_size = beam_size
|
COCO-LM/fairseq/fairseq/modules/beamable_mm.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/beamable_mm.py",
"repo_id": "COCO-LM",
"token_count": 786
}
| 189 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name="lightconv_layer",
ext_modules=[
CUDAExtension(
"lightconv_cuda",
[
"lightconv_cuda.cpp",
"lightconv_cuda_kernel.cu",
],
),
],
cmdclass={"build_ext": BuildExtension},
)
|
COCO-LM/fairseq/fairseq/modules/lightconv_layer/setup.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/lightconv_layer/setup.py",
"repo_id": "COCO-LM",
"token_count": 246
}
| 190 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, Optional
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.modules import LayerNorm, MultiheadAttention, SelfMultiheadAttention
from fairseq.modules.fairseq_dropout import FairseqDropout
from fairseq.modules.quant_noise import quant_noise
class TransformerSentenceEncoderLayer(nn.Module):
"""
Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained
models.
"""
def __init__(
self,
embedding_dim: int = 768,
ffn_embedding_dim: int = 3072,
num_attention_heads: int = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
activation_fn: str = "relu",
export: bool = False,
q_noise: float = 0.0,
qn_block_size: int = 8,
init_fn: Callable = None,
# new added
encoder_normalize_before: bool = False,
) -> None:
super().__init__()
if init_fn is not None:
init_fn()
# Initialize parameters
self.embedding_dim = embedding_dim
self.num_attention_heads = num_attention_heads
self.attention_dropout = attention_dropout
self.q_noise = q_noise
self.qn_block_size = qn_block_size
self.dropout_module = FairseqDropout(
dropout, module_name=self.__class__.__name__
)
self.activation_dropout_module = FairseqDropout(
activation_dropout, module_name=self.__class__.__name__
)
# new added
self.normalize_before = encoder_normalize_before
# Initialize blocks
self.activation_fn = utils.get_activation_fn(activation_fn)
self.self_attn = self.build_self_attention(
self.embedding_dim,
num_attention_heads,
dropout=attention_dropout,
)
# layer norm associated with the self attention layer
self.self_attn_layer_norm = LayerNorm(self.embedding_dim, export=export)
self.fc1 = self.build_fc1(
self.embedding_dim,
ffn_embedding_dim,
q_noise=q_noise,
qn_block_size=qn_block_size,
)
self.fc2 = self.build_fc2(
ffn_embedding_dim,
self.embedding_dim,
q_noise=q_noise,
qn_block_size=qn_block_size,
)
# layer norm associated with the position wise feed-forward NN
self.final_layer_norm = LayerNorm(self.embedding_dim, export=export)
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def build_self_attention(
self,
embed_dim,
num_attention_heads,
dropout,
):
return SelfMultiheadAttention(
embed_dim,
num_attention_heads,
dropout=dropout,
)
def forward(
self,
x: torch.Tensor,
attn_bias: Optional[torch.Tensor] = None,
self_attn_mask: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[torch.Tensor] = None,
):
"""
LayerNorm is applied either before or after the self-attention/ffn
modules similar to the original Transformer implementation.
"""
residual = x
# new added
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)
x, attn = self.self_attn(
query=x,
key_padding_mask=self_attn_padding_mask,
need_weights=False,
attn_mask=self_attn_mask,
attn_bias=attn_bias,
)
# sometimes padding tokens can have nowhere to attend, just set their values to zero
x[x != x] = 0
x = self.dropout_module(x)
x = residual + x
# change
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)
# x = self.self_attn_layer_norm(x)
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = residual + x
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)
# x = self.final_layer_norm(x)
return x, attn
def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return layer_norm(x)
else:
return x
|
COCO-LM/fairseq/fairseq/modules/transformer_sentence_encoder_layer.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/transformer_sentence_encoder_layer.py",
"repo_id": "COCO-LM",
"token_count": 2314
}
| 191 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq import utils
from fairseq.dataclass.utils import gen_parser_from_dataclass
class FairseqOptimizer(object):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
@classmethod
def add_args(cls, parser):
"""Add optimizer-specific arguments to the parser."""
dc = getattr(cls, "__dataclass", None)
if dc is not None:
gen_parser_from_dataclass(parser, dc())
@property
def optimizer(self):
"""Return a torch.optim.optimizer.Optimizer instance."""
if not hasattr(self, "_optimizer"):
raise NotImplementedError
if not isinstance(self._optimizer, torch.optim.Optimizer):
raise ValueError("_optimizer must be an instance of torch.optim.Optimizer")
return self._optimizer
@optimizer.setter
def optimizer(self, optimizer):
"""Reset optimizer instance."""
if not hasattr(self, "_optimizer"):
raise NotImplementedError
if not isinstance(self._optimizer, torch.optim.Optimizer):
raise ValueError("_optimizer must be an instance of torch.optim.Optimizer")
self._optimizer = optimizer
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
raise NotImplementedError
@property
def params(self):
"""Return an iterable of the parameters held by the optimizer."""
for param_group in self.param_groups:
for p in param_group["params"]:
yield p
@property
def param_groups(self):
return self.optimizer.param_groups
def __getstate__(self):
return self._optimizer.__getstate__()
def get_lr(self):
"""Return the current learning rate."""
return self.param_groups[0]["lr"]
def set_lr(self, lr):
"""Set the learning rate."""
for param_group in self.param_groups:
param_group["lr"] = lr
def state_dict(self):
"""Return the optimizer's state dict."""
return self.optimizer.state_dict()
def load_state_dict(self, state_dict, optimizer_overrides=None):
"""Load an optimizer state dict.
In general we should prefer the configuration of the existing optimizer
instance (e.g., learning rate) over that found in the state_dict. This
allows us to resume training from a checkpoint using a new set of
optimizer args.
"""
self.optimizer.load_state_dict(state_dict)
if optimizer_overrides is not None and len(optimizer_overrides) > 0:
# override learning rate, momentum, etc. with latest values
for group in self.param_groups:
group.update(optimizer_overrides)
def backward(self, loss):
"""Computes the sum of gradients of the given tensor w.r.t. graph leaves."""
loss.backward()
def all_reduce_grads(self, module):
"""Manually all-reduce gradients (if required)."""
if hasattr(module, "all_reduce_grads"):
module.all_reduce_grads()
def multiply_grads(self, c):
"""Multiplies grads by a constant *c*."""
for p in self.params:
if p.grad is not None:
if torch.is_tensor(c):
c = c.to(p.grad.device)
p.grad.data.mul_(c)
def clip_grad_norm(self, max_norm, aggregate_norm_fn=None):
"""Clips gradient norm."""
return utils.clip_grad_norm_(self.params, max_norm, aggregate_norm_fn)
def step(self, closure=None, scale=1.0, groups=None):
"""Performs a single optimization step."""
if self.supports_step_with_scale:
if self.supports_groups:
self.optimizer.step(closure, scale=scale, groups=groups)
else:
self.optimizer.step(closure, scale=scale)
else:
if scale != 1.0:
self.multiply_grads(1.0 / scale)
if self.supports_groups:
self.optimizer.step(closure, groups=groups)
else:
self.optimizer.step(closure)
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
for p in self.params:
p.grad = None
self.optimizer.zero_grad()
@property
def supports_memory_efficient_fp16(self):
if hasattr(self.optimizer, "supports_memory_efficient_fp16"):
return self.optimizer.supports_memory_efficient_fp16
return False
@property
def supports_step_with_scale(self):
if hasattr(self.optimizer, "supports_step_with_scale"):
return self.optimizer.supports_step_with_scale
return False
@property
def supports_groups(self):
if hasattr(self.optimizer, "supports_groups"):
return self.optimizer.supports_groups
return False
@property
def supports_flat_params(self):
"""
Whether the optimizer supports collapsing of the model
parameters/gradients into a single contiguous Tensor.
"""
if hasattr(self.optimizer, "supports_flat_params"):
return self.optimizer.supports_flat_params
return False
def average_params(self):
pass
def broadcast_global_state_dict(self, state_dict):
"""
Broadcasts a global state dict to all ranks.
Useful for optimizers that shard state between ranks.
"""
if hasattr(self.optimizer, "broadcast_global_state_dict"):
return self.optimizer.broadcast_global_state_dict(state_dict)
else:
return state_dict
class LegacyFairseqOptimizer(FairseqOptimizer):
def __init__(self, args):
self.args = args
|
COCO-LM/fairseq/fairseq/optim/fairseq_optimizer.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/optim/fairseq_optimizer.py",
"repo_id": "COCO-LM",
"token_count": 2596
}
| 192 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.optim
from . import LegacyFairseqOptimizer, register_optimizer
@register_optimizer("sgd")
class SGD(LegacyFairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
self._optimizer = torch.optim.SGD(params, **self.optimizer_config)
@staticmethod
def add_args(parser):
"""Add optimizer-specific arguments to the parser."""
# fmt: off
parser.add_argument('--momentum', default=0.0, type=float, metavar='M',
help='momentum factor')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
help='weight decay')
# fmt: on
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
"lr": self.args.lr[0],
"momentum": self.args.momentum,
"weight_decay": self.args.weight_decay,
}
@property
def supports_flat_params(self):
return True
|
COCO-LM/fairseq/fairseq/optim/sgd.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/optim/sgd.py",
"repo_id": "COCO-LM",
"token_count": 595
}
| 193 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
import os
from collections import OrderedDict
import numpy as np
from fairseq import tokenizer, utils
from fairseq.data import ConcatDataset, Dictionary, TokenBlockDataset, data_utils
from fairseq.data.legacy.masked_lm_dataset import MaskedLMDataset
from fairseq.data.legacy.masked_lm_dictionary import MaskedLMDictionary
from fairseq.data.multi_corpus_sampled_dataset import MultiCorpusSampledDataset
from fairseq.tasks import LegacyFairseqTask, register_task
logger = logging.getLogger(__name__)
@register_task("cross_lingual_lm")
class CrossLingualLMTask(LegacyFairseqTask):
"""
Task for training cross-lingual language models.
For more details look at: https://arxiv.org/pdf/1901.07291.pdf
Args:
dictionary (Dictionary): the dictionary for the input of the task
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument(
"data",
help="colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner",
)
parser.add_argument(
"--tokens-per-sample",
default=512,
type=int,
help="max number of total tokens over all segments" " per sample",
)
parser.add_argument(
"--monolingual-langs",
default="en",
type=str,
help="comma separated list of languages for which we"
" want to train XLM on",
)
parser.add_argument(
"--shuffle",
action="store_true",
help="shuffle each monolingual dataset while" " training",
)
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
self.distributed_world_size = args.distributed_world_size
self.langs2id = self._lang_to_id(args.monolingual_langs)
def _lang_to_id(self, languages: str):
"""
Build a map from languages to ids. These ids are used as segment labels
for cross-lingual LM training.
"""
lang2id = {}
langs = [l.strip() for l in languages.split(",")]
for id, lang in enumerate(langs):
lang2id[lang] = id
return lang2id
@classmethod
def load_dictionary(cls, filename):
return MaskedLMDictionary.load(filename)
@classmethod
def build_dictionary(
cls, filenames, workers=1, threshold=-1, nwords=-1, padding_factor=8
):
d = MaskedLMDictionary()
for filename in filenames:
Dictionary.add_file_to_dictionary(
filename, d, tokenizer.tokenize_line, workers
)
d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor)
return d
@property
def target_dictionary(self):
return self.dictionary
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task."""
dictionary = MaskedLMDictionary.load(os.path.join(args.data, "dict.txt"))
logger.info("dictionary: {} types".format(len(dictionary)))
return cls(args, dictionary)
def _load_single_lang_dataset(self, split, epoch):
loaded_datasets = []
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
path = os.path.join(data_path, split_k)
ds = data_utils.load_indexed_dataset(
path, self.dictionary, self.args.dataset_impl
)
if ds is None:
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
# Since we append each block with the classification_token,
# we need to effectively create blocks of length
# tokens_per_sample-1
loaded_datasets.append(
TokenBlockDataset(
ds,
ds.sizes,
self.args.tokens_per_sample - 1,
pad=self.dictionary.pad(),
eos=self.dictionary.eos(),
)
)
logger.info(
"{} {} {} examples".format(data_path, split_k, len(loaded_datasets[-1]))
)
if len(loaded_datasets) == 1:
dataset = loaded_datasets[0]
sizes = dataset.sizes
else:
dataset = ConcatDataset(loaded_datasets)
sizes = np.concatenate([ds.sizes for ds in loaded_datasets])
return dataset, sizes
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
dataset_map = OrderedDict()
for lang in self.langs2id.keys():
# Datasets are expected to be in "split.lang" format (Eg: train.en)
language_split = "{}.{}".format(split, lang)
block_dataset, sizes = self._load_single_lang_dataset(
split=language_split, epoch=epoch
)
dataset_map[lang] = MaskedLMDataset(
dataset=block_dataset,
sizes=sizes,
vocab=self.dictionary,
pad_idx=self.dictionary.pad(),
mask_idx=self.dictionary.mask(),
classif_token_idx=self.dictionary.eos(),
sep_token_idx=self.dictionary.eos(),
shuffle=getattr(self.args, "shuffle", False),
has_pairs=False,
segment_id=self.langs2id[lang],
seed=self.seed,
)
self.datasets[split] = MultiCorpusSampledDataset(dataset_map)
logger.info(
"{} {} {} examples".format(
utils.split_paths(self.args.data)[epoch - 1],
split,
len(self.datasets[split]),
)
)
|
COCO-LM/fairseq/fairseq/tasks/cross_lingual_lm.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/tasks/cross_lingual_lm.py",
"repo_id": "COCO-LM",
"token_count": 3113
}
| 194 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from fairseq.data.legacy.masked_lm_dictionary import MaskedLMDictionary
from fairseq.tasks.translation import TranslationConfig, TranslationTask
from . import register_task
@dataclass
class TranslationFromPretrainedXLMConfig(TranslationConfig):
pass
@register_task(
"translation_from_pretrained_xlm", dataclass=TranslationFromPretrainedXLMConfig
)
class TranslationFromPretrainedXLMTask(TranslationTask):
"""
Same as TranslationTask except use the MaskedLMDictionary class so that
we can load data that was binarized with the MaskedLMDictionary class.
This task should be used for the entire training pipeline when we want to
train an NMT model from a pretrained XLM checkpoint: binarizing NMT data,
training NMT with the pretrained XLM checkpoint, and subsequent evaluation
of that trained model.
"""
@classmethod
def load_dictionary(cls, filename):
"""Load the masked LM dictionary from the filename
Args:
filename (str): the filename
"""
return MaskedLMDictionary.load(filename)
|
COCO-LM/fairseq/fairseq/tasks/translation_from_pretrained_xlm.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/tasks/translation_from_pretrained_xlm.py",
"repo_id": "COCO-LM",
"token_count": 393
}
| 195 |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a new model on one or across multiple GPUs.
"""
import argparse
import logging
import math
import os
import sys
from typing import Dict, Optional, Any, List, Tuple, Callable
import numpy as np
import torch
from fairseq import (
checkpoint_utils,
options,
quantization_utils,
tasks,
utils,
)
from fairseq.data import iterators
from fairseq.data.plasma_utils import PlasmaStore
from fairseq.dataclass.configs import FairseqConfig
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.distributed import fsdp_enable_wrap, fsdp_wrap, utils as distributed_utils
from fairseq.file_io import PathManager
from fairseq.logging import meters, metrics, progress_bar
from fairseq.model_parallel.megatron_trainer import MegatronTrainer
from fairseq.trainer import Trainer
from omegaconf import DictConfig, OmegaConf
from multiprocessing.pool import ThreadPool
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.train")
def main(cfg: FairseqConfig) -> None:
if isinstance(cfg, argparse.Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
utils.import_user_module(cfg.common)
if distributed_utils.is_master(cfg.distributed_training) and "job_logging_cfg" in cfg:
# make hydra logging work with ddp (see # see https://github.com/facebookresearch/hydra/issues/1126)
logging.config.dictConfig(OmegaConf.to_container(cfg.job_logging_cfg))
assert (
cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None
), "Must specify batch size either with --max-tokens or --batch-size"
metrics.reset()
np.random.seed(cfg.common.seed)
utils.set_torch_seed(cfg.common.seed)
if distributed_utils.is_master(cfg.distributed_training):
checkpoint_utils.verify_checkpoint_directory(cfg.checkpoint.save_dir)
ckp_copy_thread = ThreadPool(processes=1)
else:
ckp_copy_thread = None
# Print args
logger.info(cfg)
if cfg.checkpoint.write_checkpoints_asynchronously:
assert False
try:
import iopath # noqa: F401
except ImportError:
logging.exception(
"Asynchronous checkpoint writing is specified but iopath is "
"not installed: `pip install iopath`"
)
return
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(cfg.task)
# Load valid dataset (we load training data below, based on the latest checkpoint)
for valid_sub_split in cfg.dataset.valid_subset.split(","):
task.load_dataset(valid_sub_split, combine=False, epoch=1)
assert cfg.criterion, "Please specify criterion to train a model"
# Build model and criterion
if cfg.distributed_training.ddp_backend == "fully_sharded":
with fsdp_enable_wrap(cfg.distributed_training):
model = fsdp_wrap(task.build_model(cfg.model))
else:
model = task.build_model(cfg.model)
criterion = task.build_criterion(cfg.criterion)
logger.info(model)
logger.info("task: {}".format(task.__class__.__name__))
logger.info("model: {}".format(model.__class__.__name__))
logger.info("criterion: {}".format(criterion.__class__.__name__))
logger.info(
"num. model params: {:,} (num. trained: {:,})".format(
sum(getattr(p, "_orig_size", p).numel() for p in model.parameters()),
sum(getattr(p, "_orig_size", p).numel() for p in model.parameters() if p.requires_grad),
)
)
# (optionally) Configure quantization
if cfg.common.quantization_config_path is not None:
quantizer = quantization_utils.Quantizer(
config_path=cfg.common.quantization_config_path,
max_epoch=cfg.optimization.max_epoch,
max_update=cfg.optimization.max_update,
)
else:
quantizer = None
# Build trainer
if cfg.common.model_parallel_size == 1:
trainer = Trainer(cfg, task, model, criterion, quantizer)
else:
trainer = MegatronTrainer(cfg, task, model, criterion)
logger.info(
"training on {} devices (GPUs/TPUs)".format(
cfg.distributed_training.distributed_world_size
)
)
logger.info(
"max tokens per device = {} and max sentences per device = {}".format(
cfg.dataset.max_tokens,
cfg.dataset.batch_size,
)
)
# Load the latest checkpoint if one is available and restore the
# corresponding train iterator
extra_state, epoch_itr = checkpoint_utils.load_checkpoint(
cfg.checkpoint,
trainer,
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=task.has_sharded_data("train"),
)
if cfg.common.tpu:
import torch_xla.core.xla_model as xm
xm.rendezvous("load_checkpoint") # wait for all workers
max_epoch = cfg.optimization.max_epoch or math.inf
lr = trainer.get_lr()
train_meter = meters.StopwatchMeter()
train_meter.start()
while epoch_itr.next_epoch_idx <= max_epoch:
if lr <= cfg.optimization.stop_min_lr:
logger.info(
f"stopping training because current learning rate ({lr}) is smaller "
"than or equal to minimum learning rate "
f"(--stop-min-lr={cfg.optimization.stop_min_lr})"
)
break
# train for one epoch
valid_losses, should_stop = train(cfg, trainer, task, epoch_itr, ckp_copy_thread)
if should_stop:
break
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
epoch_itr = trainer.get_train_iterator(
epoch_itr.next_epoch_idx,
# sharded data: get train iterator for next epoch
load_dataset=task.has_sharded_data("train"),
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=task.has_sharded_data("train"),
)
train_meter.stop()
if ckp_copy_thread is not None:
ckp_copy_thread.close()
ckp_copy_thread.join()
logger.info("done training in {:.1f} seconds".format(train_meter.sum))
# ioPath implementation to wait for all asynchronous file writes to complete.
if cfg.checkpoint.write_checkpoints_asynchronously:
logger.info(
"ioPath PathManager waiting for all asynchronous checkpoint "
"writes to finish."
)
PathManager.async_close()
logger.info("ioPath PathManager finished waiting.")
def should_stop_early(cfg: DictConfig, valid_loss: float) -> bool:
# skip check if no validation was done in the current epoch
if valid_loss is None:
return False
if cfg.checkpoint.patience <= 0:
return False
def is_better(a, b):
return a > b if cfg.checkpoint.maximize_best_checkpoint_metric else a < b
prev_best = getattr(should_stop_early, "best", None)
if prev_best is None or is_better(valid_loss, prev_best):
should_stop_early.best = valid_loss
should_stop_early.num_runs = 0
return False
else:
should_stop_early.num_runs += 1
if should_stop_early.num_runs >= cfg.checkpoint.patience:
logger.info(
"early stop since valid performance hasn't improved for last {} runs".format(
cfg.checkpoint.patience
)
)
return True
else:
return False
@metrics.aggregate("train")
def train(
cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr, ckp_copy_thread
) -> Tuple[List[Optional[float]], bool]:
"""Train the model for one epoch and return validation losses."""
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=cfg.distributed_training.fix_batches_to_gpus,
shuffle=(epoch_itr.next_epoch_idx > cfg.dataset.curriculum),
)
update_freq = (
cfg.optimization.update_freq[epoch_itr.epoch - 1]
if epoch_itr.epoch <= len(cfg.optimization.update_freq)
else cfg.optimization.update_freq[-1]
)
itr = iterators.GroupedIterator(itr, update_freq)
if cfg.common.tpu:
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
epoch=epoch_itr.epoch,
tensorboard_logdir=(
cfg.common.tensorboard_logdir
if distributed_utils.is_master(cfg.distributed_training)
else None
),
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
wandb_project=(
cfg.common.wandb_project
if distributed_utils.is_master(cfg.distributed_training)
else None
),
wandb_run_name=os.environ.get(
"WANDB_NAME", os.path.basename(cfg.checkpoint.save_dir)
),
azureml_logging=(
cfg.common.azureml_logging
if distributed_utils.is_master(cfg.distributed_training)
else False
),
)
progress.update_config(_flatten_config(cfg))
trainer.begin_epoch(epoch_itr.epoch)
valid_subsets = cfg.dataset.valid_subset.split(",")
should_stop = False
num_updates = trainer.get_num_updates()
logger.info("Start iterating over samples")
for i, samples in enumerate(progress):
with metrics.aggregate("train_inner"), torch.autograd.profiler.record_function(
"train_step-%d" % i
):
log_output = trainer.train_step(samples)
if log_output is not None: # not OOM, overflow, ...
# log mid-epoch stats
num_updates = trainer.get_num_updates()
if num_updates % cfg.common.log_interval == 0:
stats = get_training_stats(metrics.get_smoothed_values("train_inner"))
progress.log(stats, tag="train_inner", step=num_updates)
# reset mid-epoch stats after each log interval
# the end-of-epoch stats will still be preserved
metrics.reset_meters("train_inner")
end_of_epoch = not itr.has_next()
valid_losses, should_stop = validate_and_save(
cfg, trainer, task, epoch_itr, valid_subsets, end_of_epoch, ckp_copy_thread
)
if should_stop:
break
# log end-of-epoch stats
logger.info("end of epoch {} (average epoch stats below)".format(epoch_itr.epoch))
stats = get_training_stats(metrics.get_smoothed_values("train"))
progress.print(stats, tag="train", step=num_updates)
# reset epoch-level meters
metrics.reset_meters("train")
return valid_losses, should_stop
def _flatten_config(cfg: DictConfig):
config = OmegaConf.to_container(cfg)
# remove any legacy Namespaces and replace with a single "args"
namespace = None
for k, v in list(config.items()):
if isinstance(v, argparse.Namespace):
namespace = v
del config[k]
if namespace is not None:
config["args"] = vars(namespace)
return config
def validate_and_save(
cfg: DictConfig,
trainer: Trainer,
task: tasks.FairseqTask,
epoch_itr,
valid_subsets: List[str],
end_of_epoch: bool,
ckp_copy_thread,
) -> Tuple[List[Optional[float]], bool]:
num_updates = trainer.get_num_updates()
max_update = cfg.optimization.max_update or math.inf
# Stopping conditions (and an additional one based on validation loss later
# on)
should_stop = False
if num_updates >= max_update:
should_stop = True
logger.info(
f"Stopping training due to "
f"num_updates: {num_updates} >= max_update: {max_update}"
)
training_time_hours = trainer.cumulative_training_time() / (60 * 60)
if (
cfg.optimization.stop_time_hours > 0
and training_time_hours > cfg.optimization.stop_time_hours
):
should_stop = True
logger.info(
f"Stopping training due to "
f"cumulative_training_time: {training_time_hours} > "
f"stop_time_hours: {cfg.optimization.stop_time_hours} hour(s)"
)
do_save = (
(end_of_epoch and epoch_itr.epoch % cfg.checkpoint.save_interval == 0 and not cfg.checkpoint.no_epoch_checkpoints)
or should_stop
or (
cfg.checkpoint.save_interval_updates > 0
and num_updates > 0
and num_updates % cfg.checkpoint.save_interval_updates == 0
and num_updates >= cfg.dataset.validate_after_updates
)
)
do_validate = (
(not end_of_epoch and do_save) # validate during mid-epoch saves
or (end_of_epoch and epoch_itr.epoch % cfg.dataset.validate_interval == 0 and not cfg.checkpoint.no_epoch_checkpoints)
or should_stop
or (
cfg.dataset.validate_interval_updates > 0
and num_updates > 0
and num_updates % cfg.dataset.validate_interval_updates == 0
)
) and not cfg.dataset.disable_validation
# Validate
valid_losses = [None]
if do_validate:
valid_losses = validate(cfg, trainer, task, epoch_itr, valid_subsets)
should_stop |= should_stop_early(cfg, valid_losses[0])
# Save checkpoint
if do_save or should_stop:
checkpoint_utils.save_checkpoint(
cfg.checkpoint, trainer, epoch_itr, valid_losses[0], ckp_copy_thread
)
return valid_losses, should_stop
def get_training_stats(stats: Dict[str, Any]) -> Dict[str, Any]:
stats["wall"] = round(metrics.get_meter("default", "wall").elapsed_time, 0)
return stats
def validate(
cfg: DictConfig,
trainer: Trainer,
task: tasks.FairseqTask,
epoch_itr,
subsets: List[str],
) -> List[Optional[float]]:
"""Evaluate the model on the validation set(s) and return the losses."""
if cfg.dataset.fixed_validation_seed is not None:
# set fixed seed for every validation
utils.set_torch_seed(cfg.dataset.fixed_validation_seed)
trainer.begin_valid_epoch(epoch_itr.epoch)
valid_losses = []
for subset in subsets:
logger.info('begin validation on "{}" subset'.format(subset))
# Initialize data iterator
itr = trainer.get_valid_iterator(subset).next_epoch_itr(
shuffle=False, set_dataset_epoch=False # use a fixed valid set
)
if cfg.common.tpu:
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
epoch=epoch_itr.epoch,
prefix=f"valid on '{subset}' subset",
tensorboard_logdir=(
cfg.common.tensorboard_logdir
if distributed_utils.is_master(cfg.distributed_training)
else None
),
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
wandb_project=(
cfg.common.wandb_project
if distributed_utils.is_master(cfg.distributed_training)
else None
),
wandb_run_name=os.environ.get(
"WANDB_NAME", os.path.basename(cfg.checkpoint.save_dir)
),
)
# create a new root metrics aggregator so validation metrics
# don't pollute other aggregators (e.g., train meters)
with metrics.aggregate(new_root=True) as agg:
logging_outputs = []
for i, sample in enumerate(progress):
if cfg.dataset.max_valid_steps is not None and i > cfg.dataset.max_valid_steps:
break
inner_logging_outputs = trainer.valid_step(sample)
logging_outputs.extend(inner_logging_outputs)
task.reduce_metrics(logging_outputs, trainer.get_criterion())
# log validation stats
stats = get_valid_stats(cfg, trainer, agg.get_smoothed_values())
progress.print(stats, tag=subset, step=trainer.get_num_updates())
valid_losses.append(stats[cfg.checkpoint.best_checkpoint_metric])
return valid_losses
def get_valid_stats(
cfg: DictConfig, trainer: Trainer, stats: Dict[str, Any]
) -> Dict[str, Any]:
stats["num_updates"] = trainer.get_num_updates()
if hasattr(checkpoint_utils.save_checkpoint, "best"):
key = "best_{0}".format(cfg.checkpoint.best_checkpoint_metric)
best_function = max if cfg.checkpoint.maximize_best_checkpoint_metric else min
stats[key] = best_function(
checkpoint_utils.save_checkpoint.best,
stats[cfg.checkpoint.best_checkpoint_metric],
)
return stats
def cli_main(
modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None
) -> None:
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser, modify_parser=modify_parser)
cfg = convert_namespace_to_omegaconf(args)
if cfg.common.use_plasma_view:
server = PlasmaStore(path=cfg.common.plasma_path)
logger.info(f"Started plasma server pid {server.server.pid} {cfg.common.plasma_path}")
if args.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(cfg, main)
else:
distributed_utils.call_main(cfg, main)
# if cfg.common.use_plasma_view:
# server.server.kill()
if __name__ == "__main__":
cli_main()
|
COCO-LM/fairseq/fairseq_cli/train.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq_cli/train.py",
"repo_id": "COCO-LM",
"token_count": 7866
}
| 196 |
try:
import torch
import fused_xentropy_cuda
from .softmax_xentropy import SoftmaxCrossEntropyLoss
del torch
del fused_xentropy_cuda
del softmax_xentropy
except ImportError as err:
print("cannot import kernels, please install the package")
|
COCO-LM/fairseq/fused_ops/fused_ops/xentropy/__init__.py/0
|
{
"file_path": "COCO-LM/fairseq/fused_ops/fused_ops/xentropy/__init__.py",
"repo_id": "COCO-LM",
"token_count": 95
}
| 197 |
#!/bin/bash
if [ $# -ne 1 ]; then
echo "usage: $0 GENERATE_PY_OUTPUT"
exit 1
fi
GEN=$1
SYS=$GEN.sys
REF=$GEN.ref
if [ $(tail -n 1 $GEN | grep BLEU | wc -l) -ne 1 ]; then
echo "not done generating"
exit
fi
grep ^H $GEN | awk -F '\t' '{print $NF}' | perl -ple 's{(\S)-(\S)}{$1 ##AT##-##AT## $2}g' > $SYS
grep ^T $GEN | cut -f2- | perl -ple 's{(\S)-(\S)}{$1 ##AT##-##AT## $2}g' > $REF
fairseq-score --sys $SYS --ref $REF
|
COCO-LM/fairseq/scripts/compound_split_bleu.sh/0
|
{
"file_path": "COCO-LM/fairseq/scripts/compound_split_bleu.sh",
"repo_id": "COCO-LM",
"token_count": 223
}
| 198 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
import torch.nn as nn
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from torch.utils.checkpoint import checkpoint
class Model(nn.Module):
def __init__(
self, use_pytorch_checkpoint=False, use_fairseq_checkpoint=False, **kwargs
):
super().__init__()
torch.manual_seed(0)
self.use_pytorch_checkpoint = use_pytorch_checkpoint
self.ffn = nn.Sequential(
nn.Linear(32, 128),
# add a Dropout layer to test RNG save/restore
nn.Dropout(p=0.5),
nn.Linear(128, 32),
)
if use_fairseq_checkpoint:
self.ffn = checkpoint_wrapper(self.ffn, **kwargs)
self.out = nn.Linear(32, 1)
def forward(self, x):
if self.use_pytorch_checkpoint:
x = checkpoint(self.ffn, x)
else:
x = self.ffn(x)
return self.out(x)
class TestActivationCheckpointing(unittest.TestCase):
def _test_checkpoint_wrapper(self, device, log_memory_usage=False):
def get_loss_and_gnorm(model):
torch.manual_seed(1)
input = torch.rand(2, 16, 32).requires_grad_(True).to(device)
model.zero_grad()
loss = model(input).sum()
loss.backward()
gnorm = torch.norm(
torch.stack([torch.norm(p.grad.detach()) for p in model.parameters()])
)
return {"loss": loss, "gnorm": gnorm}
model = Model().to(device)
no_cpt = get_loss_and_gnorm(model)
model = Model(use_pytorch_checkpoint=True).to(device)
pyt_cpt = get_loss_and_gnorm(model)
torch.testing.assert_allclose(no_cpt["loss"], pyt_cpt["loss"])
torch.testing.assert_allclose(no_cpt["gnorm"], pyt_cpt["gnorm"])
model = Model(use_fairseq_checkpoint=True).to(device)
fairseq_cpt = get_loss_and_gnorm(model)
torch.testing.assert_allclose(no_cpt["loss"], fairseq_cpt["loss"])
torch.testing.assert_allclose(no_cpt["gnorm"], fairseq_cpt["gnorm"])
model = Model(use_fairseq_checkpoint=True, offload_to_cpu=True).to(device)
fairseq_cpt_offload = get_loss_and_gnorm(model)
torch.testing.assert_allclose(no_cpt["loss"], fairseq_cpt_offload["loss"])
torch.testing.assert_allclose(no_cpt["gnorm"], fairseq_cpt_offload["gnorm"])
def test_checkpoint_wrapper_cpu(self):
self._test_checkpoint_wrapper(device=torch.device("cpu"))
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
def test_checkpoint_wrapper_cuda(self):
self._test_checkpoint_wrapper(device=torch.device("cuda"))
if __name__ == "__main__":
unittest.main()
|
COCO-LM/fairseq/tests/test_activation_checkpointing.py/0
|
{
"file_path": "COCO-LM/fairseq/tests/test_activation_checkpointing.py",
"repo_id": "COCO-LM",
"token_count": 1316
}
| 199 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from unittest import mock
class TestIOPath(unittest.TestCase):
def test_no_iopath(self):
from .test_reproducibility import TestReproducibility
with mock.patch.dict("sys.modules", {"iopath": None}):
# reuse reproducibility tests, which are e2e tests that should cover
# most checkpoint related functionality
TestReproducibility._test_reproducibility(self, "test_reproducibility")
def test_no_supports_rename(self):
from .test_reproducibility import TestReproducibility
with mock.patch("fairseq.file_io.PathManager.supports_rename") as mock_fn:
mock_fn.return_value = False
TestReproducibility._test_reproducibility(self, "test_reproducibility")
if __name__ == "__main__":
unittest.main()
|
COCO-LM/fairseq/tests/test_iopath.py/0
|
{
"file_path": "COCO-LM/fairseq/tests/test_iopath.py",
"repo_id": "COCO-LM",
"token_count": 365
}
| 200 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from fairseq.modules.sparse_multihead_attention import SparseMultiheadAttention
class TestSparseMultiheadAttention(unittest.TestCase):
def test_sparse_multihead_attention(self):
attn_weights = torch.randn(1, 8, 8)
bidirectional_sparse_mask = torch.tensor(
[
[0, 0, 0, 0, 0, float("-inf"), float("-inf"), 0],
[0, 0, 0, 0, 0, float("-inf"), float("-inf"), 0],
[0, 0, 0, 0, 0, float("-inf"), float("-inf"), 0],
[0, 0, 0, 0, 0, float("-inf"), float("-inf"), 0],
[float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, 0],
[float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, 0],
[float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, 0],
[float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, 0],
]
)
bidirectional_attention = SparseMultiheadAttention(
16, 1, stride=4, expressivity=1, is_bidirectional=True
)
bidirectional_attention_sparse_mask = (
bidirectional_attention.buffered_sparse_mask(attn_weights, 8, 8)
)
torch.all(
torch.eq(bidirectional_attention_sparse_mask, bidirectional_sparse_mask)
)
sparse_mask = torch.tensor(
[
[
0,
float("-inf"),
float("-inf"),
float("-inf"),
float("-inf"),
float("-inf"),
float("-inf"),
float("-inf"),
],
[
0,
0,
float("-inf"),
float("-inf"),
float("-inf"),
float("-inf"),
float("-inf"),
float("-inf"),
],
[
0,
0,
0,
float("-inf"),
float("-inf"),
float("-inf"),
float("-inf"),
float("-inf"),
],
[
0,
0,
0,
0,
float("-inf"),
float("-inf"),
float("-inf"),
float("-inf"),
],
[0, 0, 0, 0, 0, float("-inf"), float("-inf"), float("-inf")],
[
float("-inf"),
float("-inf"),
float("-inf"),
0,
0,
0,
float("-inf"),
float("-inf"),
],
[
float("-inf"),
float("-inf"),
float("-inf"),
0,
0,
0,
0,
float("-inf"),
],
[float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, 0],
]
)
attention = SparseMultiheadAttention(
16, 1, stride=4, expressivity=1, is_bidirectional=False
)
attention_sparse_mask = attention.buffered_sparse_mask(attn_weights, 8, 8)
torch.all(torch.eq(attention_sparse_mask, sparse_mask))
if __name__ == "__main__":
unittest.main()
|
COCO-LM/fairseq/tests/test_sparse_multihead_attention.py/0
|
{
"file_path": "COCO-LM/fairseq/tests/test_sparse_multihead_attention.py",
"repo_id": "COCO-LM",
"token_count": 2337
}
| 201 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# Set pretrained model name, from ['cocolm-base', 'cocolm-large']
MODEL_NAME=$1
# Path to SQuAD dataset 'path/to/squad2_data'
DATASET_PATH=$2
# Output path for results and fine-tuned model
OUT_PATH=$3
mkdir -p $DATASET_PATH
# Train datset
export TRAIN_FILE=$DATASET_PATH/train-v2.0.json
if [ ! -f $TRAIN_FILE ]
then
wget https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json -O $TRAIN_FILE
fi
# Dev datset
export DEV_FILE=$DATASET_PATH/dev-v2.0.json
if [ ! -f $DEV_FILE ]
then
wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json -O $DEV_FILE
fi
# Set max sequence length
export MAX_LEN=384
# Set path to cache train & dev features (tokenized, only use for this tokenizer!)
export TRAIN_CACHE=${TRAIN_FILE}_cocolm_cased.384doc_new.cache
export DEV_CACHE=${DEV_FILE}_cocolm_cased.384doc_new.cache
# Setting the hyperparameters for the run.
export BSZ=$4
export LR=$5
export EPOCH=$6
export WM=$7
export SEED=$8
# Set path to save the finetuned model and result score
export OUTPUT_PATH=$OUT_PATH/$BSZ-$LR-$EPOCH-$WM-$SEED
mkdir -p $OUTPUT_PATH
touch $OUTPUT_PATH/train.log
python run_squad.py \
--model_type cocolm --model_name_or_path $MODEL_NAME \
--config_name $MODEL_NAME \
--train_file $TRAIN_FILE --predict_file $DEV_FILE \
--cached_train_file $TRAIN_CACHE --cached_dev_file $DEV_CACHE \
--do_train --evaluate_during_training --logging_steps 1000 \
--per_gpu_train_batch_size $BSZ --learning_rate $LR --num_train_epochs $EPOCH --gradient_accumulation_steps 1 \
--max_seq_length $MAX_LEN --doc_stride 128 --output_dir $OUTPUT_PATH \
--version_2_with_negative --seed 1 --max_grad_norm 0 \
--weight_decay 0.01 --warmup_ratio $WM \
--adam_epsilon 1e-6 --adam_betas "0.9,0.98" \
--seed $SEED \
--overwrite_output_dir \
--metric_for_choose_best_checkpoint "best_f1" |& tee $OUTPUT_PATH/train.log
# Add the following for fp16 training
# --fp16_init_loss_scale 128.0 --fp16 --fp16_opt_level O2
|
COCO-LM/huggingface/run_squad.sh/0
|
{
"file_path": "COCO-LM/huggingface/run_squad.sh",
"repo_id": "COCO-LM",
"token_count": 858
}
| 202 |
# CSWin-Transformer, CVPR 2022
[](https://paperswithcode.com/sota/semantic-segmentation-on-ade20k?p=cswin-transformer-a-general-vision)
[](https://paperswithcode.com/sota/semantic-segmentation-on-ade20k-val?p=cswin-transformer-a-general-vision)
This repo is the official implementation of ["CSWin Transformer: A General Vision Transformer Backbone with Cross-Shaped Windows"](https://arxiv.org/pdf/2107.00652.pdf).
## Introduction
**CSWin Transformer** (the name `CSWin` stands for **C**ross-**S**haped **Win**dow) is introduced in [arxiv](https://arxiv.org/abs/2107.00652), which is a new general-purpose backbone for computer vision. It is a hierarchical Transformer and replaces the traditional full attention with our newly proposed cross-shaped window self-attention. The cross-shaped window self-attention mechanism computes self-attention in the horizontal and vertical stripes in parallel that from a cross-shaped window, with each stripe obtained by splitting the input feature into stripes of equal width. With CSWin, we could realize global attention with a limited computation cost.
CSWin Transformer achieves strong performance on ImageNet classification (87.5 on val with only 97G flops) and ADE20K semantic segmentation (`55.7 mIoU` on val), surpassing previous models by a large margin.

## Main Results on ImageNet
| model | pretrain | resolution | acc@1 | #params | FLOPs | 22K model | 1K model |
|:---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |
| CSWin-T | ImageNet-1K | 224x224 | 82.8 | 23M | 4.3G | - | [model](https://github.com/microsoft/CSWin-Transformer/releases/download/v0.1.0/cswin_tiny_224.pth) |
| CSWin-S | ImageNet-1k | 224x224 | 83.6 | 35M | 6.9G | - | [model](https://github.com/microsoft/CSWin-Transformer/releases/download/v0.1.0/cswin_small_224.pth) |
| CSWin-B | ImageNet-1k | 224x224 | 84.2 | 78M | 15.0G | - | [model](https://github.com/microsoft/CSWin-Transformer/releases/download/v0.1.0/cswin_base_224.pth) |
| CSWin-B | ImageNet-1k | 384x384 | 85.5 | 78M | 47.0G | - | [model](https://github.com/microsoft/CSWin-Transformer/releases/download/v0.1.0/cswin_base_384.pth) |
| CSWin-L | ImageNet-22k | 224x224 | 86.5 | 173M | 31.5G | [model](https://github.com/microsoft/CSWin-Transformer/releases/download/v0.1.0/cswin_large_22k_224.pth) | [model](https://github.com/microsoft/CSWin-Transformer/releases/download/v0.1.0/cswin_large_224.pth) |
| CSWin-L | ImageNet-22k | 384x384 | 87.5 | 173M | 96.8G | - | [model](https://github.com/microsoft/CSWin-Transformer/releases/download/v0.1.0/cswin_large_384.pth) |
## Main Results on Downstream Tasks
**COCO Object Detection**
| backbone | Method | pretrain | lr Schd | box mAP | mask mAP | #params | FLOPS |
|:---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |
| CSwin-T | Mask R-CNN | ImageNet-1K | 3x | 49.0 | 43.6 | 42M | 279G |
| CSwin-S | Mask R-CNN | ImageNet-1K | 3x | 50.0 | 44.5 | 54M | 342G |
| CSwin-B | Mask R-CNN | ImageNet-1K | 3x | 50.8 | 44.9 | 97M | 526G |
| CSwin-T | Cascade Mask R-CNN | ImageNet-1K | 3x | 52.5 | 45.3 | 80M | 757G |
| CSwin-S | Cascade Mask R-CNN | ImageNet-1K | 3x | 53.7 | 46.4 | 92M | 820G |
| CSwin-B | Cascade Mask R-CNN | ImageNet-1K | 3x | 53.9 | 46.4 | 135M | 1004G |
**ADE20K Semantic Segmentation (val)**
| Backbone | Method | pretrain | Crop Size | Lr Schd | mIoU | mIoU (ms+flip) | #params | FLOPs |
| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |
| CSwin-T | Semantic FPN | ImageNet-1K | 512x512 | 80K | 48.2 | - | 26M | 202G |
| CSwin-S | Semantic FPN | ImageNet-1K | 512x512 | 80K | 49.2 | - | 39M | 271G |
| CSwin-B | Semantic FPN | ImageNet-1K | 512x512 | 80K | 49.9 | - | 81M | 464G |
| CSwin-T | UPerNet | ImageNet-1K | 512x512 | 160K | 49.3 | 50.7 | 60M | 959G |
| CSwin-S | UperNet | ImageNet-1K | 512x512 | 160K | 50.4 | 51.5 | 65M | 1027G |
| CSwin-B | UperNet | ImageNet-1K | 512x512 | 160K | 51.1 | 52.2 | 109M | 1222G |
| CSwin-B | UPerNet | ImageNet-22K | 640x640 | 160K | 51.8 | 52.6 | 109M | 1941G |
| CSwin-L | UperNet | ImageNet-22K | 640x640 | 160K | 53.4 | 55.7 | 208M | 2745G |
pretrained models and code could be found at [`segmentation`](segmentation)
## Requirements
timm==0.3.4, pytorch>=1.4, opencv, ... , run:
```
bash install_req.sh
```
Apex for mixed precision training is used for finetuning. To install apex, run:
```
git clone https://github.com/NVIDIA/apex
cd apex
pip install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./
```
Data prepare: ImageNet with the following folder structure, you can extract imagenet by this [script](https://gist.github.com/BIGBALLON/8a71d225eff18d88e469e6ea9b39cef4).
```
│imagenet/
├──train/
│ ├── n01440764
│ │ ├── n01440764_10026.JPEG
│ │ ├── n01440764_10027.JPEG
│ │ ├── ......
│ ├── ......
├──val/
│ ├── n01440764
│ │ ├── ILSVRC2012_val_00000293.JPEG
│ │ ├── ILSVRC2012_val_00002138.JPEG
│ │ ├── ......
│ ├── ......
```
## Train
Train the three lite variants: CSWin-Tiny, CSWin-Small and CSWin-Base:
```
bash train.sh 8 --data <data path> --model CSWin_64_12211_tiny_224 -b 256 --lr 2e-3 --weight-decay .05 --amp --img-size 224 --warmup-epochs 20 --model-ema-decay 0.99984 --drop-path 0.2
```
```
bash train.sh 8 --data <data path> --model CSWin_64_24322_small_224 -b 256 --lr 2e-3 --weight-decay .05 --amp --img-size 224 --warmup-epochs 20 --model-ema-decay 0.99984 --drop-path 0.4
```
```
bash train.sh 8 --data <data path> --model CSWin_96_24322_base_224 -b 128 --lr 1e-3 --weight-decay .1 --amp --img-size 224 --warmup-epochs 20 --model-ema-decay 0.99992 --drop-path 0.5
```
If you want to train our CSWin on images with 384x384 resolution, please use '--img-size 384'.
If the GPU memory is not enough, please use '-b 128 --lr 1e-3 --model-ema-decay 0.99992' or use [checkpoint](https://pytorch.org/docs/stable/checkpoint.html) '--use-chk'.
## Finetune
Finetune CSWin-Base with 384x384 resolution:
```
bash finetune.sh 8 --data <data path> --model CSWin_96_24322_base_384 -b 32 --lr 5e-6 --min-lr 5e-7 --weight-decay 1e-8 --amp --img-size 384 --warmup-epochs 0 --model-ema-decay 0.9998 --finetune <pretrained 224 model> --epochs 20 --mixup 0.1 --cooldown-epochs 10 --drop-path 0.7 --ema-finetune --lr-scale 1 --cutmix 0.1
```
Finetune ImageNet-22K pretrained CSWin-Large with 224x224 resolution:
```
bash finetune.sh 8 --data <data path> --model CSWin_144_24322_large_224 -b 64 --lr 2.5e-4 --min-lr 5e-7 --weight-decay 1e-8 --amp --img-size 224 --warmup-epochs 0 --model-ema-decay 0.9996 --finetune <22k-pretrained model> --epochs 30 --mixup 0.01 --cooldown-epochs 10 --interpolation bicubic --lr-scale 0.05 --drop-path 0.2 --cutmix 0.3 --use-chk --fine-22k --ema-finetune
```
If the GPU memory is not enough, please use [checkpoint](https://pytorch.org/docs/stable/checkpoint.html) '--use-chk'.
## Cite CSWin Transformer
```
@misc{dong2021cswin,
title={CSWin Transformer: A General Vision Transformer Backbone with Cross-Shaped Windows},
author={Xiaoyi Dong and Jianmin Bao and Dongdong Chen and Weiming Zhang and Nenghai Yu and Lu Yuan and Dong Chen and Baining Guo},
year={2021},
eprint={2107.00652},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
## Acknowledgement
This repository is built using the [timm](https://github.com/rwightman/pytorch-image-models) library and the [DeiT](https://github.com/facebookresearch/deit) repository.
## License
This project is licensed under the license found in the LICENSE file in the root directory of this source tree.
[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct)
### Contact Information
For help or issues using CSWin Transformer, please submit a GitHub issue.
For other communications related to CSWin Transformer, please contact Jianmin Bao (`[email protected]`), Dong Chen (`[email protected]`).
|
CSWin-Transformer/README.md/0
|
{
"file_path": "CSWin-Transformer/README.md",
"repo_id": "CSWin-Transformer",
"token_count": 3254
}
| 203 |
# model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained=None,
backbone=dict(
type='CSWin',
embed_dim=64,
patch_size=4,
depth=[1, 2, 21, 1],
num_heads=[2,4,8,16],
split_size=[1,2,7,7],
mlp_ratio=4.,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.1),
decode_head=dict(
type='UPerHead',
in_channels=[96, 192, 384, 768],
in_index=[0, 1, 2, 3],
pool_scales=(1, 2, 3, 6),
channels=512,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=384,
in_index=2,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
|
CSWin-Transformer/segmentation/configs/_base/upernet_cswin.py/0
|
{
"file_path": "CSWin-Transformer/segmentation/configs/_base/upernet_cswin.py",
"repo_id": "CSWin-Transformer",
"token_count": 708
}
| 204 |
site_name: ClimaX
repo_name: microsoft/ClimaX
repo_url: https://github.com/microsoft/ClimaX
markdown_extensions:
- attr_list
- tables
- admonition
- md_in_html
- pymdownx.details
- pymdownx.superfences
- pymdownx.tabbed:
alternate_style: true
- pymdownx.highlight:
anchor_linenums: true
- pymdownx.inlinehilite
- pymdownx.snippets
- pymdownx.superfences
- pymdownx.critic
- pymdownx.caret
- pymdownx.keys
- pymdownx.mark
- pymdownx.tilde
- pymdownx.arithmatex:
generic: true
- toc:
permalink: "¤"
- pymdownx.emoji:
emoji_index: !!python/name:materialx.emoji.twemoji
emoji_generator: !!python/name:materialx.emoji.to_svg
theme:
name: material
features:
- content.code.annotate
custom_dir: docs/overrides
font:
text: Lato
palette:
scheme: climax
logo: assets/images/climax-icon.png
icon:
repo: fontawesome/brands/github
favicon: assets/images/climax-icon.png
extra_css:
- stylesheets/extra.css
nav:
- Home: "index.md"
- Installation: "install.md"
- Usage: "usage.md"
- Code Reference:
- Pretraining: "reference/pretrain.md"
- Global Forecasting: "reference/global_forecast.md"
plugins:
- search
- mkdocstrings:
default_handler: python
enable_inventory: true
handlers:
python:
paths: [src]
import:
- https://docs.python.org/3/objects.inv
- https://pytorch.org/docs/stable/objects.inv
- https://pytorch.org/data/beta/objects.inv
- https://pytorch-lightning.readthedocs.io/en/stable/objects.inv
|
ClimaX/mkdocs.yml/0
|
{
"file_path": "ClimaX/mkdocs.yml",
"repo_id": "ClimaX",
"token_count": 720
}
| 205 |
year_strings = [
'185001010600-187001010000',
'187001010600-189001010000',
'189001010600-191001010000',
'191001010600-193001010000',
'193001010600-195001010000',
'195001010600-197001010000',
'197001010600-199001010000',
'199001010600-201001010000',
'201001010600-201501010000',
]
print(config)
rule download:
output:
"{dataset}/raw/{name}/{name}_{year_str}_raw.nc",
shell:
"wget https://esgf-data1.llnl.gov/thredds/fileServer/css03_data/CMIP6/CMIP/HAMMOZ-Consortium/MPI-ESM-1-2-HAM/historical/{config["
"run]}/6hrPlevPt/"
"{config[cmip_name]}/gn/{config[version]}/"
"{config[cmip_name]}_6hrPlevPt_MPI-ESM-1-2-HAM_historical_{config[run]}_gn_{wildcards.year_str}.nc "
"-O {wildcards.dataset}/raw/{config[name]}/{config[name]}_{wildcards.year_str}_raw.nc"
rule regrid:
input:
"{dataset}/raw/{name}/{name}_{year_str}_raw.nc"
output:
"{dataset}/{res}deg/{name}/{name}_{year_str}_{res}deg.nc.tmp"
shell:
"python ../../src/data_preprocessing/regrid.py \
--input_fns {input} \
--output_dir {wildcards.dataset}/{wildcards.res}deg/{wildcards.name} \
--ddeg_out {wildcards.res} \
--cmip 1 \
--rename {config[cmip_name]} {config[era_name]} \
--file_ending nc.tmp"
rule delete:
input:
expand("{{dataset}}/{res}deg/{{name}}/{{name}}_{{year_str}}_{res}deg.nc.tmp",
res=config['res']),
output:
expand("{{dataset}}/{res}deg/{{name}}/{{name}}_{{year_str}}_{res}deg.nc",
res=config['res'])
priority: 100
run:
for i, o in zip(input, output):
shell("mv {i} {o}")
# shell("rm {wildcards.dataset}/raw/{wildcards.name}/{wildcards.name}_{wildcards.year_str}_raw.nc"),
rule all:
input:
expand("{datadir}/{res}deg/{name}/{name}_{year_str}_{res}deg.nc",
datadir=config['datadir'], res=config['res'], name=config['name'], year_str=year_strings)
|
ClimaX/snakemake_configs/HAMMOZ/Snakefile/0
|
{
"file_path": "ClimaX/snakemake_configs/HAMMOZ/Snakefile",
"repo_id": "ClimaX",
"token_count": 1076
}
| 206 |
datadir: /data/CMIP6/MPI-ESM
server_prefix: http://esgf-data1.llnl.gov/thredds/fileServer/css03_data/CMIP6/CMIP
name: u_component_of_wind
cmip_name: ua
era_name: u
output_type: 6hrPlevPt
run: r1i1p1f1
version: v20190815
res:
- 1.40625
# - 5.625
|
ClimaX/snakemake_configs/MPI-ESM/config_u_component_of_wind.yml/0
|
{
"file_path": "ClimaX/snakemake_configs/MPI-ESM/config_u_component_of_wind.yml",
"repo_id": "ClimaX",
"token_count": 124
}
| 207 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
from pytorch_lightning.cli import LightningCLI
from climax.climate_projection.module import ClimateProjectionModule
from climax.climate_projection.datamodule import ClimateBenchDataModule
def main():
# Initialize Lightning with the model and data modules, and instruct it to parse the config yml
cli = LightningCLI(
model_class=ClimateProjectionModule,
datamodule_class=ClimateBenchDataModule,
seed_everything_default=42,
save_config_overwrite=True,
run=False,
# auto_registry=True,
parser_kwargs={"parser_mode": "omegaconf", "error_handler": None},
)
os.makedirs(cli.trainer.default_root_dir, exist_ok=True)
normalization = cli.datamodule.dataset_train.out_transform
mean_norm, std_norm = normalization.mean, normalization.std
mean_denorm, std_denorm = -mean_norm / std_norm, 1 / std_norm
cli.model.set_denormalization(mean_denorm, std_denorm)
cli.model.set_lat_lon(*cli.datamodule.get_lat_lon())
cli.model.set_pred_range(0)
cli.model.set_val_clim(None)
cli.model.set_test_clim(cli.datamodule.get_test_clim())
# fit() runs the training
cli.trainer.fit(cli.model, datamodule=cli.datamodule)
# test the trained model
cli.trainer.test(cli.model, datamodule=cli.datamodule, ckpt_path='best')
if __name__ == "__main__":
main()
|
ClimaX/src/climax/climate_projection/train.py/0
|
{
"file_path": "ClimaX/src/climax/climate_projection/train.py",
"repo_id": "ClimaX",
"token_count": 561
}
| 208 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import numpy as np
NAME_TO_VAR = {
"2m_temperature": "t2m",
"10m_u_component_of_wind": "u10",
"10m_v_component_of_wind": "v10",
"mean_sea_level_pressure": "msl",
"surface_pressure": "sp",
"toa_incident_solar_radiation": "tisr",
"total_precipitation": "tp",
"land_sea_mask": "lsm",
"orography": "orography",
"lattitude": "lat2d",
"geopotential": "z",
"u_component_of_wind": "u",
"v_component_of_wind": "v",
"temperature": "t",
"relative_humidity": "r",
"specific_humidity": "q",
}
VAR_TO_NAME = {v: k for k, v in NAME_TO_VAR.items()}
SINGLE_LEVEL_VARS = [
"2m_temperature",
"10m_u_component_of_wind",
"10m_v_component_of_wind",
"mean_sea_level_pressure",
"surface_pressure",
"toa_incident_solar_radiation",
"total_precipitation",
"land_sea_mask",
"orography",
"lattitude",
]
PRESSURE_LEVEL_VARS = [
"geopotential",
"u_component_of_wind",
"v_component_of_wind",
"temperature",
"relative_humidity",
"specific_humidity",
]
DEFAULT_PRESSURE_LEVELS = [50, 100, 150, 200, 250, 300, 400, 500, 600, 700, 850, 925, 1000]
NAME_LEVEL_TO_VAR_LEVEL = {}
for var in SINGLE_LEVEL_VARS:
NAME_LEVEL_TO_VAR_LEVEL[var] = NAME_TO_VAR[var]
for var in PRESSURE_LEVEL_VARS:
for l in DEFAULT_PRESSURE_LEVELS:
NAME_LEVEL_TO_VAR_LEVEL[var + "_" + str(l)] = NAME_TO_VAR[var] + "_" + str(l)
VAR_LEVEL_TO_NAME_LEVEL = {v: k for k, v in NAME_LEVEL_TO_VAR_LEVEL.items()}
BOUNDARIES = {
'NorthAmerica': { # 8x14
'lat_range': (15, 65),
'lon_range': (220, 300)
},
'SouthAmerica': { # 14x10
'lat_range': (-55, 20),
'lon_range': (270, 330)
},
'Europe': { # 6x8
'lat_range': (30, 65),
'lon_range': (0, 40)
},
'SouthAsia': { # 10, 14
'lat_range': (-15, 45),
'lon_range': (25, 110)
},
'EastAsia': { # 10, 12
'lat_range': (5, 65),
'lon_range': (70, 150)
},
'Australia': { # 10x14
'lat_range': (-50, 10),
'lon_range': (100, 180)
},
'Global': { # 32, 64
'lat_range': (-90, 90),
'lon_range': (0, 360)
}
}
def get_region_info(region, lat, lon, patch_size):
region = BOUNDARIES[region]
lat_range = region['lat_range']
lon_range = region['lon_range']
lat = lat[::-1] # -90 to 90 from south (bottom) to north (top)
h, w = len(lat), len(lon)
lat_matrix = np.expand_dims(lat, axis=1).repeat(w, axis=1)
lon_matrix = np.expand_dims(lon, axis=0).repeat(h, axis=0)
valid_cells = (lat_matrix >= lat_range[0]) & (lat_matrix <= lat_range[1]) & (lon_matrix >= lon_range[0]) & (lon_matrix <= lon_range[1])
h_ids, w_ids = np.nonzero(valid_cells)
h_from, h_to = h_ids[0], h_ids[-1]
w_from, w_to = w_ids[0], w_ids[-1]
patch_idx = -1
p = patch_size
valid_patch_ids = []
min_h, max_h = 1e5, -1e5
min_w, max_w = 1e5, -1e5
for i in range(0, h, p):
for j in range(0, w, p):
patch_idx += 1
if (i >= h_from) & (i + p - 1 <= h_to) & (j >= w_from) & (j + p - 1 <= w_to):
valid_patch_ids.append(patch_idx)
min_h = min(min_h, i)
max_h = max(max_h, i + p - 1)
min_w = min(min_w, j)
max_w = max(max_w, j + p - 1)
return {
'patch_ids': valid_patch_ids,
'min_h': min_h,
'max_h': max_h,
'min_w': min_w,
'max_w': max_w
}
|
ClimaX/src/climax/utils/data_utils.py/0
|
{
"file_path": "ClimaX/src/climax/utils/data_utils.py",
"repo_id": "ClimaX",
"token_count": 1843
}
| 209 |
"""
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from models.networks.base_network import BaseNetwork
from models.networks.normalization import get_nonspade_norm_layer, equal_lr
from models.networks.architecture import Attention
import util.util as util
class MultiscaleDiscriminator(BaseNetwork):
@staticmethod
def modify_commandline_options(parser, is_train):
parser.add_argument('--netD_subarch', type=str, default='n_layer',
help='architecture of each discriminator')
parser.add_argument('--num_D', type=int, default=2,
help='number of discriminators to be used in multiscale')
opt, _ = parser.parse_known_args()
# define properties of each discriminator of the multiscale discriminator
subnetD = util.find_class_in_module(opt.netD_subarch + 'discriminator',
'models.networks.discriminator')
subnetD.modify_commandline_options(parser, is_train)
return parser
def __init__(self, opt, stage1=False):
super().__init__()
self.opt = opt
self.stage1 = stage1
for i in range(opt.num_D):
subnetD = self.create_single_discriminator(opt)
self.add_module('discriminator_%d' % i, subnetD)
def create_single_discriminator(self, opt):
subarch = opt.netD_subarch
if subarch == 'n_layer':
netD = NLayerDiscriminator(opt, stage1=self.stage1)
else:
raise ValueError('unrecognized discriminator subarchitecture %s' % subarch)
return netD
def downsample(self, input):
return F.avg_pool2d(input, kernel_size=3,
stride=2, padding=[1, 1],
count_include_pad=False)
# Returns list of lists of discriminator outputs.
# The final result is of size opt.num_D x opt.n_layers_D
def forward(self, input):
result = []
segs = []
cam_logits = []
get_intermediate_features = not self.opt.no_ganFeat_loss
for name, D in self.named_children():
out, cam_logit = D(input)
cam_logits.append(cam_logit)
if not get_intermediate_features:
out = [out]
result.append(out)
input = self.downsample(input)
return result, segs, cam_logits
# Defines the PatchGAN discriminator with the specified arguments.
class NLayerDiscriminator(BaseNetwork):
@staticmethod
def modify_commandline_options(parser, is_train):
parser.add_argument('--n_layers_D', type=int, default=4,
help='# layers in each discriminator')
return parser
def __init__(self, opt, stage1=False):
super().__init__()
self.opt = opt
self.stage1 = stage1
kw = 4
#padw = int(np.ceil((kw - 1.0) / 2))
padw = int((kw - 1.0) / 2)
nf = opt.ndf
input_nc = self.compute_D_input_nc(opt)
norm_layer = get_nonspade_norm_layer(opt, opt.norm_D)
sequence = [[nn.Conv2d(input_nc, nf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, False)]]
for n in range(1, opt.n_layers_D):
nf_prev = nf
nf = min(nf * 2, 512)
stride = 1 if n == opt.n_layers_D - 1 else 2
if (((not stage1) and opt.use_attention) or (stage1 and opt.use_attention_st1)) and n == opt.n_layers_D - 1:
self.attn = Attention(nf_prev, 'spectral' in opt.norm_D)
if n == opt.n_layers_D - 1 and (not stage1):
dec = []
nc_dec = nf_prev
for _ in range(opt.n_layers_D - 1):
dec += [nn.Upsample(scale_factor=2),
norm_layer(nn.Conv2d(nc_dec, int(nc_dec//2), kernel_size=3, stride=1, padding=1)),
nn.LeakyReLU(0.2, False)]
nc_dec = int(nc_dec // 2)
dec += [nn.Conv2d(nc_dec, opt.semantic_nc, kernel_size=3, stride=1, padding=1)]
self.dec = nn.Sequential(*dec)
sequence += [[norm_layer(nn.Conv2d(nf_prev, nf, kernel_size=kw,
stride=stride, padding=padw)),
nn.LeakyReLU(0.2, False)
]]
sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]
if opt.D_cam > 0:
mult = min(2 ** (opt.n_layers_D - 1), 8)
if opt.eqlr_sn:
self.gap_fc = equal_lr(nn.Linear(opt.ndf * mult, 1, bias=False))
self.gmp_fc = equal_lr(nn.Linear(opt.ndf * mult, 1, bias=False))
else:
self.gap_fc = nn.utils.spectral_norm(nn.Linear(opt.ndf * mult, 1, bias=False))
self.gmp_fc = nn.utils.spectral_norm(nn.Linear(opt.ndf * mult, 1, bias=False))
self.conv1x1 = nn.Conv2d(opt.ndf * mult * 2, opt.ndf * mult, kernel_size=1, stride=1, bias=True)
self.leaky_relu = nn.LeakyReLU(0.2, True)
# We divide the layers into groups to extract intermediate layer outputs
for n in range(len(sequence)):
self.add_module('model' + str(n), nn.Sequential(*sequence[n]))
def compute_D_input_nc(self, opt):
input_nc = opt.label_nc + opt.output_nc
if opt.contain_dontcare_label:
input_nc += 1
return input_nc
def forward(self, input):
results = [input]
seg = None
cam_logit = None
for name, submodel in self.named_children():
if 'model' not in name:
continue
if name == 'model3':
if ((not self.stage1) and self.opt.use_attention) or (self.stage1 and self.opt.use_attention_st1):
x = self.attn(results[-1])
else:
x = results[-1]
else:
x = results[-1]
intermediate_output = submodel(x)
if self.opt.D_cam > 0 and name == 'model3':
gap = F.adaptive_avg_pool2d(intermediate_output, 1)
gap_logit = self.gap_fc(gap.view(intermediate_output.shape[0], -1))
gap_weight = list(self.gap_fc.parameters())[0]
gap = intermediate_output * gap_weight.unsqueeze(2).unsqueeze(3)
gmp = F.adaptive_max_pool2d(intermediate_output, 1)
gmp_logit = self.gmp_fc(gmp.view(intermediate_output.shape[0], -1))
gmp_weight = list(self.gmp_fc.parameters())[0]
gmp = intermediate_output * gmp_weight.unsqueeze(2).unsqueeze(3)
cam_logit = torch.cat([gap_logit, gmp_logit], 1)
intermediate_output = torch.cat([gap, gmp], 1)
intermediate_output = self.leaky_relu(self.conv1x1(intermediate_output))
results.append(intermediate_output)
get_intermediate_features = not self.opt.no_ganFeat_loss
if get_intermediate_features:
retu = results[1:]
else:
retu = results[-1]
if seg is None:
return retu, cam_logit
else:
return retu, seg, cam_logit
|
CoCosNet/models/networks/discriminator.py/0
|
{
"file_path": "CoCosNet/models/networks/discriminator.py",
"repo_id": "CoCosNet",
"token_count": 3780
}
| 210 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import numpy as np
import torch
import torchvision.utils as vutils
import sys
from collections import OrderedDict
from options.train_options import TrainOptions
import data
from util.iter_counter import IterationCounter
from util.util import print_current_errors
from trainers.pix2pix_trainer import Pix2PixTrainer
import torch.nn.functional as F
# parse options
opt = TrainOptions().parse()
# print options to help debugging
print(' '.join(sys.argv))
#torch.manual_seed(0)
# load the dataset
dataloader = data.create_dataloader(opt)
len_dataloader = len(dataloader)
dataloader.dataset[11]
# create tool for counting iterations
iter_counter = IterationCounter(opt, len(dataloader))
# create trainer for our model
trainer = Pix2PixTrainer(opt, resume_epoch=iter_counter.first_epoch)
save_root = os.path.join(os.path.dirname(opt.checkpoints_dir), 'output', opt.name)
for epoch in iter_counter.training_epochs():
opt.epoch = epoch
if not opt.maskmix:
print('inject nothing')
elif opt.maskmix and opt.noise_for_mask and epoch > opt.mask_epoch:
print('inject noise')
else:
print('inject mask')
print('real_reference_probability is :{}'.format(dataloader.dataset.real_reference_probability))
print('hard_reference_probability is :{}'.format(dataloader.dataset.hard_reference_probability))
iter_counter.record_epoch_start(epoch)
for i, data_i in enumerate(dataloader, start=iter_counter.epoch_iter):
iter_counter.record_one_iteration()
#use for Domain adaptation loss
p = min(float(i + (epoch - 1) * len_dataloader) / 50 / len_dataloader, 1)
alpha = 2. / (1. + np.exp(-10 * p)) - 1
# Training
# train generator
if i % opt.D_steps_per_G == 0:
trainer.run_generator_one_step(data_i, alpha=alpha)
# train discriminator
trainer.run_discriminator_one_step(data_i)
if iter_counter.needs_printing():
losses = trainer.get_latest_losses()
try:
print_current_errors(opt, epoch, iter_counter.epoch_iter,
losses, iter_counter.time_per_iter)
except OSError as err:
print(err)
if iter_counter.needs_displaying():
if not os.path.exists(save_root + opt.name):
os.makedirs(save_root + opt.name)
imgs_num = data_i['label'].shape[0]
if opt.dataset_mode == 'celebahq':
data_i['label'] = data_i['label'][:,::2,:,:]
elif opt.dataset_mode == 'celebahqedge':
data_i['label'] = data_i['label'][:,:1,:,:]
elif opt.dataset_mode == 'deepfashion':
data_i['label'] = data_i['label'][:,:3,:,:]
if data_i['label'].shape[1] == 3:
label = data_i['label']
else:
label = data_i['label'].expand(-1, 3, -1, -1).float() / data_i['label'].max()
cycleshow = None
if opt.warp_cycle_w > 0:
cycleshow = trainer.out['warp_cycle'] if opt.warp_patch else F.interpolate(trainer.out['warp_cycle'], scale_factor=opt.warp_stride)
if opt.two_cycle:
cycleshow = torch.cat((cycleshow, F.interpolate(trainer.out['warp_i2r'], scale_factor=opt.warp_stride), F.interpolate(trainer.out['warp_i2r2i'], scale_factor=opt.warp_stride)), 0)
if cycleshow is not None:
imgs = torch.cat((label.cpu(), data_i['ref'].cpu(), trainer.out['warp_out'].cpu(), cycleshow.cpu(), trainer.get_latest_generated().data.cpu(), data_i['image'].cpu()), 0)
else:
imgs = torch.cat((label.cpu(), data_i['ref'].cpu(), trainer.out['warp_out'].cpu(), trainer.get_latest_generated().data.cpu(), data_i['image'].cpu()), 0)
try:
vutils.save_image(imgs, save_root + opt.name + '/' + str(epoch) + '_' + str(iter_counter.total_steps_so_far) + '.png',
nrow=imgs_num, padding=0, normalize=True)
except OSError as err:
print(err)
if iter_counter.needs_saving():
print('saving the latest model (epoch %d, total_steps %d)' %
(epoch, iter_counter.total_steps_so_far))
try:
trainer.save('latest')
iter_counter.record_current_iter()
except OSError as err:
print(err)
trainer.update_learning_rate(epoch)
iter_counter.record_epoch_end()
if epoch % opt.save_epoch_freq == 0 or \
epoch == iter_counter.total_epochs:
print('saving the model at the end of epoch %d, iters %d' %
(epoch, iter_counter.total_steps_so_far))
try:
trainer.save('latest')
trainer.save(epoch)
except OSError as err:
print(err)
print('Training was successfully finished.')
|
CoCosNet/train.py/0
|
{
"file_path": "CoCosNet/train.py",
"repo_id": "CoCosNet",
"token_count": 2325
}
| 211 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from evaluator.CodeBLEU.parser import DFG_python, DFG_java, DFG_ruby, DFG_go, DFG_php, DFG_javascript, DFG_csharp
from evaluator.CodeBLEU.parser import (remove_comments_and_docstrings,
tree_to_token_index,
index_to_code_token,
tree_to_variable_index)
from tree_sitter import Language, Parser
import os
root_dir = os.path.dirname(__file__)
dfg_function = {
'python': DFG_python,
'java': DFG_java,
'ruby': DFG_ruby,
'go': DFG_go,
'php': DFG_php,
'javascript': DFG_javascript,
'c_sharp': DFG_csharp,
}
def calc_dataflow_match(references, candidate, lang):
return corpus_dataflow_match([references], [candidate], lang)
def corpus_dataflow_match(references, candidates, lang):
LANGUAGE = Language(root_dir + '/parser/my-languages.so', lang)
parser = Parser()
parser.set_language(LANGUAGE)
parser = [parser, dfg_function[lang]]
match_count = 0
total_count = 0
for i in range(len(candidates)):
references_sample = references[i]
candidate = candidates[i]
for reference in references_sample:
try:
candidate = remove_comments_and_docstrings(candidate, 'java')
except:
pass
try:
reference = remove_comments_and_docstrings(reference, 'java')
except:
pass
cand_dfg = get_data_flow(candidate, parser)
ref_dfg = get_data_flow(reference, parser)
normalized_cand_dfg = normalize_dataflow(cand_dfg)
normalized_ref_dfg = normalize_dataflow(ref_dfg)
if len(normalized_ref_dfg) > 0:
total_count += len(normalized_ref_dfg)
for dataflow in normalized_ref_dfg:
if dataflow in normalized_cand_dfg:
match_count += 1
normalized_cand_dfg.remove(dataflow)
if total_count == 0:
print(
"WARNING: There is no reference data-flows extracted from the whole corpus, and the data-flow match score degenerates to 0. Please consider ignoring this score.")
return 0
score = match_count / total_count
return score
def get_data_flow(code, parser):
try:
tree = parser[0].parse(bytes(code, 'utf8'))
root_node = tree.root_node
tokens_index = tree_to_token_index(root_node)
code = code.split('\n')
code_tokens = [index_to_code_token(x, code) for x in tokens_index]
index_to_code = {}
for idx, (index, code) in enumerate(zip(tokens_index, code_tokens)):
index_to_code[index] = (idx, code)
try:
DFG, _ = parser[1](root_node, index_to_code, {})
except:
DFG = []
DFG = sorted(DFG, key=lambda x: x[1])
indexs = set()
for d in DFG:
if len(d[-1]) != 0:
indexs.add(d[1])
for x in d[-1]:
indexs.add(x)
new_DFG = []
for d in DFG:
if d[1] in indexs:
new_DFG.append(d)
codes = code_tokens
dfg = new_DFG
except:
codes = code.split()
dfg = []
# merge nodes
dic = {}
for d in dfg:
if d[1] not in dic:
dic[d[1]] = d
else:
dic[d[1]] = (d[0], d[1], d[2], list(set(dic[d[1]][3] + d[3])), list(set(dic[d[1]][4] + d[4])))
DFG = []
for d in dic:
DFG.append(dic[d])
dfg = DFG
return dfg
def normalize_dataflow_item(dataflow_item):
var_name = dataflow_item[0]
var_pos = dataflow_item[1]
relationship = dataflow_item[2]
par_vars_name_list = dataflow_item[3]
par_vars_pos_list = dataflow_item[4]
var_names = list(set(par_vars_name_list + [var_name]))
norm_names = {}
for i in range(len(var_names)):
norm_names[var_names[i]] = 'var_' + str(i)
norm_var_name = norm_names[var_name]
relationship = dataflow_item[2]
norm_par_vars_name_list = [norm_names[x] for x in par_vars_name_list]
return (norm_var_name, relationship, norm_par_vars_name_list)
def normalize_dataflow(dataflow):
var_dict = {}
i = 0
normalized_dataflow = []
for item in dataflow:
var_name = item[0]
relationship = item[2]
par_vars_name_list = item[3]
for name in par_vars_name_list:
if name not in var_dict:
var_dict[name] = 'var_' + str(i)
i += 1
if var_name not in var_dict:
var_dict[var_name] = 'var_' + str(i)
i += 1
normalized_dataflow.append((var_dict[var_name], relationship, [var_dict[x] for x in par_vars_name_list]))
return normalized_dataflow
|
CodeBERT/CodeReviewer/code/evaluator/CodeBLEU/dataflow_match.py/0
|
{
"file_path": "CodeBERT/CodeReviewer/code/evaluator/CodeBLEU/dataflow_match.py",
"repo_id": "CodeBERT",
"token_count": 2439
}
| 212 |
import os
import torch
import logging
import argparse
import random
import numpy as np
from tqdm import tqdm
import multiprocessing
import time
from itertools import cycle
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data import ConcatDataset
from torch.utils.data.distributed import DistributedSampler
from transformers import AdamW, get_linear_schedule_with_warmup
from models import build_or_load_gen_model
from configs import add_args, set_seed, set_dist
from torch.nn.parallel import DistributedDataParallel as DDP
import torch.distributed as dist
from utils import CommentClsDataset, SimpleClsDataset
from sklearn.metrics import f1_score, accuracy_score
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
def get_loaders(data_files, args, tokenizer, pool, eval=False):
def fn(features):
return features
global_rank = args.global_rank
for data_file in data_files:
if args.raw_input:
dataset = SimpleClsDataset(tokenizer, pool, args, data_file)
else:
dataset = CommentClsDataset(tokenizer, pool, args, data_file)
data_len = len(dataset)
if global_rank == 0:
logger.info(f"Data length: {data_len}.")
if eval:
sampler = SequentialSampler(dataset)
else:
sampler = DistributedSampler(dataset)
dataloader = DataLoader(dataset, sampler=sampler, batch_size=args.train_batch_size if not eval else args.eval_batch_size, \
num_workers=args.cpu_count, collate_fn=fn)
yield dataset, sampler, dataloader
def eval_epoch_acc(args, eval_dataloader, model, tokenizer):
# Start evaluating model
logger.info(" " + "***** Running acc evaluation *****")
logger.info(" Batch size = %d", args.eval_batch_size)
model.eval()
local_rank = 0
pred, gold = [], []
with torch.no_grad():
for step, examples in enumerate(eval_dataloader, 1):
source_ids = torch.tensor(
[ex.source_ids for ex in examples], dtype=torch.long
).to(local_rank)
source_mask = source_ids.ne(tokenizer.pad_id)
logits = model(
cls=True,
input_ids=source_ids,
labels=None,
attention_mask=source_mask
)
prediction = torch.argmax(logits, dim=-1).cpu().numpy()
pred.extend(prediction)
gold.extend([ex.y for ex in examples])
acc = accuracy_score(gold, pred)
return acc
def save_model(model, optimizer, scheduler, output_dir, config):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, "module") else model
config.save_pretrained(output_dir)
output_model_file = os.path.join(output_dir, "pytorch_model.bin")
torch.save(model_to_save.state_dict(), output_model_file)
output_optimizer_file = os.path.join(output_dir, "optimizer.pt")
torch.save(
optimizer.state_dict(),
output_optimizer_file,
_use_new_zipfile_serialization=False,
)
output_scheduler_file = os.path.join(output_dir, "scheduler.pt")
torch.save(
scheduler.state_dict(),
output_scheduler_file,
_use_new_zipfile_serialization=False,
)
def main(args):
dist.init_process_group(backend="nccl")
local_rank = dist.get_rank() % args.gpu_per_node
args.global_rank = local_rank + args.node_index * args.gpu_per_node
args.local_rank = local_rank
args.world_size = dist.get_world_size()
logger.warning("Process rank: %s, global rank: %s, world size: %s, bs: %s",
args.local_rank, args.global_rank, \
torch.distributed.get_world_size(), \
args.train_batch_size)
torch.cuda.set_device(local_rank)
# t0 = time.time()
# set_dist(args)
set_seed(args)
config, model, tokenizer = build_or_load_gen_model(args)
# load last model
if os.path.exists("{}/checkpoints-last/pytorch_model.bin".format(args.output_dir)):
model.load_state_dict(
torch.load("{}/checkpoints-last/pytorch_model.bin".format(args.output_dir))
)
model = DDP(model.cuda(), device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True)
pool = multiprocessing.Pool(args.cpu_count)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(
optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon
)
args.warmup_steps = int(args.train_steps * 0.1)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=args.train_steps,
)
if os.path.exists("{}/checkpoints-last/optimizer.pt".format(args.output_dir)):
optimizer.load_state_dict(
torch.load(
"{}/checkpoints-last/optimizer.pt".format(args.output_dir),
map_location="cpu",
)
)
scheduler.load_state_dict(
torch.load(
"{}/checkpoints-last/scheduler.pt".format(args.output_dir),
map_location="cpu",
)
)
global_step = 0
save_steps = args.save_steps
train_file = args.train_filename
valid_file = args.dev_filename
if os.path.isdir(train_file):
train_files = [file for file in os.listdir(train_file) if file.startswith("cls-train-chunk") and file.endswith(".jsonl")]
else:
train_files = [train_file]
logger.warning("Train files: %s", train_files)
random.seed(args.seed)
random.shuffle(train_files)
train_files = [os.path.join(train_file, file) for file in train_files]
valid_files = [valid_file]
for epoch in range(1, args.train_epochs + 1):
# set seed for reproducible data split
save_seed = args.seed
args.seed += epoch
set_seed(args)
args.seed = save_seed
model.train()
nb_tr_examples, nb_tr_steps, tr_loss = 0, 0, 0
for _, _, train_dataloader in get_loaders(train_files, args, tokenizer, pool): # WARNING: this is an iterator, to save memory
for step, examples in enumerate(train_dataloader, 1):
if step == 1:
ex = examples[0]
logger.info(f"batch size: {len(examples)}")
logger.info(f"example source: {tokenizer.convert_ids_to_tokens(ex.source_ids)}")
source_ids = torch.tensor(
[ex.source_ids for ex in examples], dtype=torch.long
).to(local_rank)
ys = torch.tensor(
[ex.y for ex in examples], dtype=torch.long
).to(local_rank)
source_mask = source_ids.ne(tokenizer.pad_id)
loss = model(
cls=True,
input_ids=source_ids,
labels=ys,
attention_mask=source_mask
)
if args.gpu_per_node > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
tr_loss += loss.item()
nb_tr_examples += source_ids.size(0)
nb_tr_steps += 1
loss.backward()
if nb_tr_steps % args.gradient_accumulation_steps == 0:
# Update parameters
optimizer.step()
optimizer.zero_grad()
scheduler.step()
global_step += 1
if args.global_rank == 0 and global_step % args.log_steps == 0:
train_loss = round(
tr_loss * args.gradient_accumulation_steps / nb_tr_steps,
4,
)
logger.info(
"step {}/{}: Train loss {}".format(
global_step,
args.train_steps,
round(train_loss, 3),
)
)
if args.global_rank == 0 and global_step == args.train_steps:
# end training
_, _, valid_dataloader = next(get_loaders(valid_files, args, tokenizer, pool, eval=True))
acc = eval_epoch_acc(args, valid_dataloader, model, tokenizer)
output_dir = os.path.join(args.output_dir, "checkpoints-last" + "-" + str(acc)[:5])
save_model(model, optimizer, scheduler, output_dir, config)
logger.info(f"Reach max steps {args.train_steps}.")
time.sleep(5)
return
if args.global_rank == 0 and \
global_step % save_steps == 0 and \
nb_tr_steps % args.gradient_accumulation_steps == 0:
_, _, valid_dataloader = next(get_loaders(valid_files, args, tokenizer, pool, eval=True))
acc = eval_epoch_acc(args, valid_dataloader, model, tokenizer)
output_dir = os.path.join(args.output_dir, "checkpoints-" + str(global_step) + "-" + str(acc)[:5])
save_model(model, optimizer, scheduler, output_dir, config)
logger.info(
"Save the {}-step model and optimizer into {}".format(
global_step, output_dir
)
)
time.sleep(5)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
args = add_args(parser)
args.cpu_count = multiprocessing.cpu_count()
# remove long tokenization warning. ref: https://github.com/huggingface/transformers/issues/991
logging.getLogger("transformers.tokenization_utils_base").setLevel(logging.ERROR)
logger.info(args)
main(args)
logger.info("Training finished.")
# torch.multiprocessing.spawn(main, args=(args,), nprocs=torch.cuda.device_count())
|
CodeBERT/CodeReviewer/code/run_finetune_cls.py/0
|
{
"file_path": "CodeBERT/CodeReviewer/code/run_finetune_cls.py",
"repo_id": "CodeBERT",
"token_count": 5419
}
| 213 |
import re, json
import os, random
import torch, logging
from copy import deepcopy as cp
from torch.utils.data import Dataset
from tokenizers import ByteLevelBPETokenizer
from transformers import T5Tokenizer, RobertaTokenizer
import nltk
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
class MyTokenizer(object):
"""
Wrapper for ByteLevelBPETokenizer
"""
def __init__(self, vocab=None, merges=None, **kwargs):
self.tokenizer = ByteLevelBPETokenizer(vocab, merges, **kwargs)
self.update_id2token()
@staticmethod
def from_pretrained(path):
vocabp = os.path.join(path, "vocab.json")
mergesp = os.path.join(path, "merges.txt")
mytoken = MyTokenizer(vocabp, mergesp)
return mytoken
def update_id2token(self):
vocab = self.tokenizer.get_vocab()
self.id2token = {vocab[token]: token for token in vocab}
def add_special_tokens(self, dic):
for values in dic.values():
self.tokenizer.add_special_tokens(values)
self.update_id2token()
def convert_ids_to_tokens(self, ids):
vocab = self.id2token
return [vocab[i] for i in ids]
def decode(self, ids, **kwargs): ##### to be update
tokens = self.convert_ids_to_tokens(ids)
return " ".join(tokens)
def encode(self, text, **kwargs):
text = text.encode("ascii", errors="ignore").decode("ascii")
return self.tokenizer.encode(text).ids
def get_vocab(self):
return self.tokenizer.get_vocab()
def __len__(self):
return len(self.tokenizer.get_vocab())
class RefineFeatures(object):
def __init__(self, example_id, source_ids, target_ids):
self.example_id = example_id
self.source_ids = source_ids
self.target_ids = target_ids
class RefineDataset(Dataset):
def __init__(self, tokenizer, pool, args, file_path, samplenum=-1):
self.tokenizer = tokenizer
self.args = args
logger.info("Reading examples from {}".format(file_path))
examples = [json.loads(line) for line in open(file_path)]
for i in range(len(examples)):
if "id" not in examples[i]:
examples[i]["id"] = i
if samplenum > 0:
examples = examples[:samplenum]
logger.info(f"Tokenize examples: {file_path}")
self.feats = pool.map(self.tokenize, \
[(example, tokenizer, args) for example in examples])
def tokenize(self, item):
example, tokenizer, args = item
oldlines = example["old"].split("\n")
newlines = example["new"].split("\n")
oldlines = [line[1:].strip() for line in oldlines]
newlines = [line[1:].strip() for line in newlines]
oldlines = "\n".join(oldlines)
newlines = "\n".join(newlines)
oldlines = "<add>" + oldlines.replace("\n", "<add>")
newlines = "<add>" + newlines.replace("\n", "<add>")
comment = example["comment"]
srcids = self.encode_remove(tokenizer, oldlines, args)
srcids += [tokenizer.msg_id] + self.encode_remove(tokenizer, comment, args)
tgtids = self.encode_remove(tokenizer, newlines, args)
srcids, tgtids = self.pad_assert(srcids, tgtids, args, tokenizer)
return RefineFeatures(example["id"], srcids, tgtids)
@staticmethod
def process_pred_gold(pred, gold):
gold = gold.split("\n")
gold = [line[1:].strip() for line in gold]
gold = " ".join(gold)
pred = " ".join(pred.split())
pred = pred.replace("<add> ", "")
return pred, gold
def pad_assert(self, source_ids, target_ids, args, tokenizer):
source_ids = source_ids[:args.max_source_length - 2]
source_ids = [tokenizer.bos_id] + source_ids + [tokenizer.eos_id]
pad_len = args.max_source_length - len(source_ids)
source_ids += [tokenizer.pad_id] * pad_len
target_ids = target_ids[:args.max_target_length - 2]
target_ids = [tokenizer.bos_id] + target_ids + [tokenizer.eos_id]
pad_len = args.max_target_length - len(target_ids)
target_ids += [tokenizer.pad_id] * pad_len
assert len(source_ids) == args.max_source_length, "Not equal length."
assert len(target_ids) == args.max_target_length, "Not equal length."
return source_ids, target_ids
def encode_remove(self, tokenizer, text, args):
text = tokenizer.encode(text, max_length=args.max_source_length, truncation=True)
if type(tokenizer) == T5Tokenizer:
return text[:-1]
elif type(tokenizer) == RobertaTokenizer:
return text[1:-1]
elif type(tokenizer) == MyTokenizer:
return text
else:
raise NotImplementedError
def __len__(self):
return len(self.feats)
def __getitem__(self, i):
return self.feats[i]
class SimpleRefineDataset(RefineDataset):
def tokenize(self, item):
example, tokenizer, args = item
oldlines = example["old"].split("\n")
newlines = example["new"].split("\n")
oldlines = [line[1:].strip() for line in oldlines]
newlines = [line[1:].strip() for line in newlines]
oldlines = " ".join(oldlines)
newlines = " ".join(newlines)
comment = example["comment"]
srcids = self.encode_remove(tokenizer, oldlines, args)
srcids += [tokenizer.msg_id] + self.encode_remove(tokenizer, comment, args)
tgtids = self.encode_remove(tokenizer, newlines, args)
srcids, tgtids = self.pad_assert(srcids, tgtids, args, tokenizer)
return RefineFeatures(example["id"], srcids, tgtids)
@staticmethod
def process_pred_gold(pred, gold):
gold = gold.split("\n")
gold = [line[1:].strip() for line in gold]
gold = " ".join(gold)
pred = " ".join(pred.split())
return pred, gold
class Seq2SeqDataset(RefineDataset):
def tokenize(self, item):
example, tokenizer, args = item
inputs, outputs = example["old"], example["new"]
inputs = " ".join(inputs.split())
outputs = " ".join(outputs.split())
srcids = self.encode_remove(tokenizer, inputs, args)
tgtids = self.encode_remove(tokenizer, outputs, args)
srcids, tgtids = self.pad_assert(srcids, tgtids, args, tokenizer)
return RefineFeatures(example["id"], srcids, tgtids)
@staticmethod
def process_pred_gold(pred, gold):
gold = " ".join(gold.split())
pred = " ".join(pred.split())
return pred, gold
class TextDataset(Dataset):
def __init__(self, tokenizer, pool, args, file_path, samplenum=-1):
self.cnt = 0
self.tokenizer = tokenizer
self.args = args
if isinstance(tokenizer, MyTokenizer):
tokenizer_type = "mytok"
elif isinstance(tokenizer, T5Tokenizer):
tokenizer_type = ""
elif isinstance(tokenizer, RobertaTokenizer):
tokenizer_type = "rb"
else:
tokenizer_type = "unk"
savep = file_path.replace(".jsonl", tokenizer_type + ".exps")
# savep = "/home/v-zhuoli1/lzzz/processed/chunk_25.exps"
if os.path.exists(savep):
logger.info("Loading examples from {}".format(savep))
examples = torch.load(savep)
else:
logger.info("Reading examples from {}".format(file_path))
examples = read_review_examples(file_path, samplenum, tokenizer)
logger.info(f"Tokenize examples: {file_path}")
examples = pool.map(self.tokenize, \
[(example, tokenizer, args) for example in examples])
torch.save(examples, savep)
logger.info("Convert examples to features...")
self.set_start_end_ids(examples)
self.featss = pool.map(self.convert_examples_to_features, \
[(example, tokenizer, args) for example in examples])
self.feats = [feat for feats in self.featss for feat in feats] # expand the lists
def __len__(self):
return len(self.feats)
def __getitem__(self, i):
return self.feats[i]
def reset_len(self, data_len):
assert len(self.feats) >= data_len
self.feats = self.feats[:data_len]
def set_start_end_ids(self, examples):
for example in examples:
labels = example.labels
start_id = 0
end_id = len(labels) - 1
for i, label in enumerate(labels):
if label != -100: # find the first label
start_id = i
break
for i in range(len(labels) - 1, -1, -1):
label = labels[i]
if label != -100:
end_id = i
break
example.start_id = start_id
example.end_id = end_id
def tokenize(self, item):
example, tokenizer, args = item
example.input = self.encode_remove(tokenizer, example.input, args)
e0id = tokenizer.special_dict["<e0>"]
inputs = " ".join(str(id) for id in example.input)
lines = inputs.split(" " + str(e0id) + " ")
lines = [
[int(v) for v in line.split(" ") if len(v) > 0] for line in lines
]
lens = [len(line) for line in lines]
# if 0 in lens:
# logger.info("Warning: empty line in an example.")
lens = list(map(len, lines))
curlen = len(lens) + sum(lens)
left, right = 0, len(lines)
while curlen > args.max_source_length - 2:
if left % 2 == 0:
curlen -= 1 + len(lines[left])
left += 1
else:
right -= 1
curlen -= 1 + len(lines[right])
lines = lines[left:right]
labels = example.labels[left:right]
assert len(lines) + sum(map(len, lines)) <= args.max_source_length - 2, "Too long inputs in TextDataset.tokenize."
if len(lines) != len(labels):
logger.info("Not equal length in TextDataset.tokenize.")
lines = lines[:len(labels)]
labels = labels[:len(lines)]
example.lines = lines
example.labels = labels
example.msg = self.encode_remove(tokenizer, example.msg, args)
return example
def convert_examples_to_features(self, item):
example, _, _ = item
if len(example.msg) > 0:
exs = []
for _ in range(3): # up sampling
if random.random() < 0.5:
exs.append(self.genmsg_example(item))
else:
exs.append(self.daemsg_example(item))
return exs
if random.random() < 0.5:
return [self.encoder_example(item)]
return [self.decoder_example(item)]
def encoder_example(self, item):
example, tokenizer, args = item
lines = example.lines
labels = example.labels
target_ids = [tokenizer.pad_id] * args.max_target_length
source_ids, input_labels = [], []
for i, (line, label) in enumerate(zip(lines, labels)):
if i == example.start_id:
source_ids.append(tokenizer.start_id)
input_labels.append(-100)
if label != -100: # only insert special tokens at diffs, not context
source_ids.append(tokenizer.mask_id)
input_labels.append(label)
source_ids.extend(line)
input_labels.extend([-100] * len(line))
if i == example.end_id:
source_ids.append(tokenizer.end_id)
input_labels.append(-100)
assert len(input_labels) == len(source_ids), "Not equal length."
assert len(input_labels) <= args.max_source_length, f"Too long inputs: {len(input_labels)}."
source_ids = source_ids[:args.max_source_length - 2]
input_labels = input_labels[:args.max_source_length - 2]
source_ids = [tokenizer.bos_id] + source_ids + [tokenizer.eos_id]
input_labels = [-100] + input_labels + [-100]
pad_len = args.max_source_length - len(source_ids)
source_ids += [tokenizer.pad_id] * pad_len
input_labels += [-100] * pad_len
new_input_labels = []
map_dict = {0: tokenizer.del_id, 1: tokenizer.add_id, 2: tokenizer.keep_id}
for label in input_labels:
if label == -100:
new_input_labels.append(-100)
else:
new_input_labels.append(map_dict[label])
input_labels = new_input_labels
assert len(source_ids) == args.max_source_length, "Not equal length."
assert len(input_labels) == args.max_source_length, "Not equal length."
return ReviewFeatures(example.idx, source_ids, input_labels, target_ids, type="label")
def decoder_example(self, item):
example, tokenizer, args = item
lines = example.lines
labels = example.labels
input_labels = [-100] * args.max_source_length
source_ids, target_ids = [], []
SPECIAL_ID = 0
mask_idxs = random.choices(range(len(lines)), k=int(len(lines) * args.mask_rate))
id_dict = {0: tokenizer.del_id, 1: tokenizer.add_id, 2: tokenizer.keep_id}
for i, (line, label) in enumerate(zip(lines, labels)):
if i == example.start_id:
source_ids.append(tokenizer.start_id)
if label in id_dict:
source_ids.append(id_dict[label])
if i in mask_idxs:
source_ids.append(tokenizer.special_dict[f"<e{SPECIAL_ID}>"])
target_ids.append(tokenizer.special_dict[f"<e{SPECIAL_ID}>"])
target_ids.extend(line)
if SPECIAL_ID < 99: # only 0-99 ids in vocab
SPECIAL_ID += 1
else:
source_ids.extend(line)
if i == example.end_id:
source_ids.append(tokenizer.end_id)
source_ids, target_ids = self.pad_assert(source_ids, target_ids, args, tokenizer)
return ReviewFeatures(example.idx, source_ids, input_labels, target_ids, type="line")
def genmsg_example(self, item):
example, tokenizer, args = item
lines = example.lines
labels = example.labels
input_labels = [-100] * args.max_source_length
source_ids, target_ids = [], []
id_dict = {0: tokenizer.del_id, 1: tokenizer.add_id, 2: tokenizer.keep_id}
for i, (line, label) in enumerate(zip(lines, labels)):
if i == example.start_id:
source_ids.append(tokenizer.start_id)
if label != -100:
source_ids.append(id_dict[label])
source_ids.extend(line)
if i == example.end_id:
source_ids.append(tokenizer.end_id)
target_ids.append(tokenizer.msg_id)
target_ids.extend(example.msg)
assert len(source_ids) <= args.max_source_length, f"Too long inputs: {len(source_ids)}."
source_ids, target_ids = self.pad_assert(source_ids, target_ids, args, tokenizer)
return ReviewFeatures(example.idx, source_ids, input_labels, target_ids, type="genmsg")
def daemsg_example(self, item):
example, tokenizer, args = item
input_labels = [-100] * args.max_source_length
source_ids, target_ids = [], []
msg_ids = cp(example.msg)
masks = [random.random() < 0.20 for _ in range(len(msg_ids))]
if sum(masks) == 0:
idx = random.choice(range(len(msg_ids)))
masks[idx] = True
source_ids, target_ids = [], []
i = 0
SPECIAL_ID = 0
while i < len(masks):
j = i
while j < len(masks) and not masks[j]:
source_ids.append(msg_ids[j])
j += 1
if j == len(masks):
break
source_ids.append(tokenizer.special_dict[f"<e{SPECIAL_ID}>"])
target_ids.append(tokenizer.special_dict[f"<e{SPECIAL_ID}>"])
while j < len(masks) and masks[j]:
target_ids.append(msg_ids[j])
j += 1
if SPECIAL_ID < 99: # only 0-99 ids in vocab
SPECIAL_ID += 1
i = j
source_ids, target_ids = self.pad_assert(source_ids, target_ids, args, tokenizer)
return ReviewFeatures(example.idx, source_ids, input_labels, target_ids, type="daemsg")
def pad_assert(self, source_ids, target_ids, args, tokenizer):
source_ids = source_ids[:args.max_source_length - 2]
source_ids = [tokenizer.bos_id] + source_ids + [tokenizer.eos_id]
pad_len = args.max_source_length - len(source_ids)
source_ids += [tokenizer.pad_id] * pad_len
target_ids = target_ids[:args.max_target_length - 1]
target_ids = target_ids + [tokenizer.eos_id]
pad_len = args.max_target_length - len(target_ids)
target_ids += [tokenizer.pad_id] * pad_len
assert len(source_ids) == args.max_source_length, "Not equal length."
assert len(target_ids) == args.max_target_length, "Not equal length."
return source_ids, target_ids
def encode_remove(self, tokenizer, text, args):
text = tokenizer.encode(text, max_length=args.max_source_length, truncation=True)
if type(tokenizer) == T5Tokenizer:
return text[:-1]
elif type(tokenizer) == RobertaTokenizer:
return text[1:-1]
elif type(tokenizer) == MyTokenizer:
return text
else:
raise NotImplementedError
class CommentGenDataset(TextDataset):
def __init__(self, tokenizer, pool, args, file_path, samplenum=-1):
self.tokenizer = tokenizer
if isinstance(tokenizer, MyTokenizer):
tokenizer_type = "mytok"
elif isinstance(tokenizer, T5Tokenizer):
tokenizer_type = ""
elif isinstance(tokenizer, RobertaTokenizer):
tokenizer_type = "rb"
else:
tokenizer_type = "unk"
savep = file_path.replace(".jsonl", tokenizer_type + ".exps")
if os.path.exists(savep):
logger.info("Loading examples from {}".format(savep))
examples = torch.load(savep)
else:
logger.info("Reading examples from {}".format(file_path))
examples = read_review_examples(file_path, samplenum, tokenizer)
# for i in range(len(examples)):
# examples[i].msg = " ".join(nltk.word_tokenize(examples[i].msg))
logger.info(f"Tokenize examples: {file_path}")
examples = pool.map(self.tokenize, \
[(example, tokenizer, args) for example in examples])
torch.save(examples, savep)
logger.info("Convert examples to features...")
self.set_start_end_ids(examples)
self.feats = pool.map(self.convert_examples_to_features, \
[(example, tokenizer, args) for example in examples])
self.feats = [feat for feat in self.feats if feat is not None]
def convert_examples_to_features(self, item):
example, tokenizer, args = item
if len(example.msg) == 0:
return None
return self.genmsg_example(item)
class CommentClsDataset(TextDataset):
def __init__(self, tokenizer, pool, args, file_path, samplenum=-1):
self.tokenizer = tokenizer
if isinstance(tokenizer, MyTokenizer):
tokenizer_type = "mytok"
elif isinstance(tokenizer, T5Tokenizer):
tokenizer_type = ""
elif isinstance(tokenizer, RobertaTokenizer):
tokenizer_type = "rb"
else:
tokenizer_type = "unk"
savep = file_path.replace(".jsonl", tokenizer_type + ".exps")
if os.path.exists(savep):
logger.info("Loading examples from {}".format(savep))
examples = torch.load(savep)
else:
logger.info("Reading examples from {}".format(file_path))
examples = read_review_examples(file_path, samplenum, tokenizer)
logger.info(f"Tokenize examples: {file_path}")
examples = pool.map(self.tokenize, \
[(example, tokenizer, args) for example in examples])
torch.save(examples, savep)
logger.info("Convert examples to features...")
self.set_start_end_ids(examples)
self.feats = pool.map(self.convert_examples_to_features, \
[(example, tokenizer, args) for example in examples])
def convert_examples_to_features(self, item):
example, tokenizer, args = item
tmpfeature = self.genmsg_example(item)
return ClsFeatures(tmpfeature.example_id, tmpfeature.source_ids, example.y)
class SimpleClsDataset(TextDataset):
def __init__(self, tokenizer, pool, args, file_path, samplenum=-1):
self.tokenizer = tokenizer
if isinstance(tokenizer, MyTokenizer):
tokenizer_type = "mytok"
elif isinstance(tokenizer, T5Tokenizer):
tokenizer_type = ""
elif isinstance(tokenizer, RobertaTokenizer):
tokenizer_type = "rb"
else:
tokenizer_type = "unk"
savep = file_path.replace(".jsonl", tokenizer_type + ".simpexps")
if os.path.exists(savep):
logger.info("Loading examples from {}".format(savep))
self.feats = torch.load(savep)
else:
logger.info("Reading examples from {}".format(file_path))
examples = read_review_examples(file_path, samplenum, tokenizer)
logger.info(f"Tokenize examples: {file_path}")
self.feats = pool.map(self.convert_examples_to_features, \
[(example, tokenizer, args) for example in examples])
torch.save(self.feats, savep)
def convert_examples_to_features(self, item):
example, tokenizer, args = item
example.input_lines = example.input.split("<e0>")
labels_l = len(example.labels)
example.input_lines = example.input_lines[:labels_l]
for i in range(len(example.input_lines)):
if example.labels[i] == 1:
example.input_lines[i] = "+ " + example.input_lines[i]
elif example.labels[i] == 0:
example.input_lines[i] = "- " + example.input_lines[i]
example.input = " ".join(example.input_lines)
input_ids = self.encode_remove(tokenizer, example.input, args)
exceed_l = len(input_ids) - args.max_source_length + 2
if exceed_l > 0:
halfexl = (exceed_l + 1) // 2
input_ids = input_ids[halfexl:-halfexl]
source_ids = input_ids[:args.max_source_length - 2]
source_ids = [tokenizer.bos_id] + source_ids + [tokenizer.eos_id]
pad_len = args.max_source_length - len(source_ids)
source_ids += [tokenizer.pad_id] * pad_len
example_id = example.idx
y = example.y
return ClsFeatures(example_id, source_ids, y)
class SimpleGenDataset(TextDataset):
def __init__(self, tokenizer, pool, args, file_path, samplenum=-1):
self.tokenizer = tokenizer
if isinstance(tokenizer, MyTokenizer):
tokenizer_type = "mytok"
elif isinstance(tokenizer, T5Tokenizer):
tokenizer_type = ""
elif isinstance(tokenizer, RobertaTokenizer):
tokenizer_type = "rb"
else:
tokenizer_type = "unk"
savep = file_path.replace(".jsonl", tokenizer_type + ".simpgenexps")
if os.path.exists(savep):
logger.info("Loading examples from {}".format(savep))
self.feats = torch.load(savep)
else:
logger.info("Reading examples from {}".format(file_path))
data = read_jsonl(file_path)
# data = [dic for dic in data if len(dic["patch"].split("\n")) <= 20]
for i in range(len(data)):
data[i]["idx"] = i
logger.info(f"Tokenize examples: {file_path}")
# self.feats = pool.map(self.convert_examples_to_features, \
# [(dic, tokenizer, args) for dic in data])
self.feats = [self.convert_examples_to_features((dic, tokenizer, args)) for dic in data]
torch.save(self.feats, savep)
def convert_examples_to_features(self, item):
dic, tokenizer, args = item
diff, msg = dic["patch"], dic["msg"]
difflines = diff.split("\n")[1:] # remove start @@
difflines = [line for line in difflines if len(line.strip()) > 0]
map_dic = {"-": 0, "+": 1, " ": 2}
def f(s):
if s in map_dic:
return map_dic[s]
else:
return 2
labels = [f(line[0]) for line in difflines]
difflines = [line[1:].strip() for line in difflines]
inputstr = ""
for label, line in zip(labels, difflines):
if label == 1:
inputstr += "<add>" + line
elif label == 0:
inputstr += "<del>" + line
else:
inputstr += "<keep>" + line
source_ids = self.encode_remove(tokenizer, inputstr, args)
target_ids = []
target_ids.append(tokenizer.msg_id)
msg = self.encode_remove(tokenizer, dic["msg"], args)
target_ids.extend(msg)
source_ids, target_ids = self.pad_assert(source_ids, target_ids, args, tokenizer)
input_labels = [-100] * len(source_ids)
return ReviewFeatures(dic["idx"], source_ids, input_labels, target_ids, type="genmsg")
class InputFeatures(object):
"""A single training/test features for a example."""
def __init__(self, example_id, source_ids, target_ids, url=None):
self.example_id = example_id
self.source_ids = source_ids
self.target_ids = target_ids
self.url = url
class ReviewFeatures(object):
def __init__(self, example_id, source_ids, source_labels, target_ids, type):
self.example_id = example_id
self.source_ids = source_ids
self.source_labels = source_labels
self.target_ids = target_ids
assert type in ("label", "line", "genmsg", "daemsg")
self.type = type
class ClsFeatures(object):
def __init__(self, example_id, source_ids, y):
self.example_id = example_id
self.source_ids = source_ids
self.y = y
class ReviewExample(object):
"""A single training/test example."""
def __init__(
self, idx, oldf, diff, msg, cmtid, max_len, y
):
self.idx = idx # idx is useless yet
self.oldf = oldf
self.diff = diff
self.msg = msg
self.cmtid = cmtid
self.max_len = max_len
self.y = y
self.prevlines = []
self.afterlines = []
self.lines = []
self.labels = []
self.avail = False
self.input = ""
self.align_and_clean()
self.postprocess()
def postprocess(self):
if not self.avail:
return
# Warning: lines is not self.lines
# lines for rough length estimation
lines = [source_str.split() for source_str in self.lines]
inputl = len(lines) # line tag
inputl += sum(map(len, lines))
left, right = 0, len(lines)
while inputl > self.max_len:
if left % 2 == 0:
inputl -= len(lines[left]) + 1
left += 1
else:
right -= 1
inputl -= len(lines[right]) + 1
lines = lines[left:right]
self.lines = self.lines[left:right]
self.labels = self.labels[left:right]
prevlines = self.prevlines
afterlines = self.afterlines
prev_after_len = max(len(prevlines), len(afterlines))
i = 0
while inputl < self.max_len and i < prev_after_len:
if i < len(prevlines):
newl = inputl + len(prevlines[-1-i].split()) + 1
if newl > self.max_len:
break
self.lines.insert(0, prevlines[-1-i])
self.labels.insert(0, -100)
inputl = newl # tag
if i < len(afterlines):
newl = inputl + len(afterlines[i].split()) + 1
if newl > self.max_len:
break
self.lines.append(afterlines[i])
self.labels.append(-100)
inputl = newl # tag
i += 1
assert inputl <= self.max_len, "Too long inputs."
assert len(self.lines) == len(self.labels), "Not equal length."
self.input = "<e0>".join(self.lines)
self.prevlines, self.lines, self.afterlines = [], [], []
def remove_space_clean(self, line):
"""
Remove start and end empty chars.
"""
rep = " \t\r"
totallen = len(line)
i = 0
while i < totallen and line[i] in rep:
i += 1
j = totallen - 1
while j >= 0 and line[j] in rep:
j -= 1
line = line[i : j + 1]
return line
def align_and_clean(self):
oldflines = self.oldf.split("\n")
difflines = self.diff.split("\n")
first_line = difflines[0]
difflines = difflines[1:]
difflines = [line for line in difflines if line != r"\ No newline at end of file"]
regex = r"@@ -(\d+),(\d+) \+(\d+),(\d+) @@"
matchres = re.match(regex, first_line)
if matchres:
startline, rangelen, startpos, endpos = matchres.groups()
self.avail = True
else:
self.avail = False
return
startline, rangelen = int(startline) - 1, int(rangelen)
endline = startline + rangelen
self.prevlines = oldflines[:startline]
self.afterlines = oldflines[endline:]
for line in difflines:
if line.startswith("-"):
self.lines.append(line[1:])
self.labels.append(0)
elif line.startswith("+"):
self.lines.append(line[1:])
self.labels.append(1)
else:
self.lines.append(line)
self.labels.append(2)
self.prevlines = [self.remove_space_clean(line) for line in self.prevlines]
self.afterlines = [self.remove_space_clean(line) for line in self.afterlines]
self.lines = [self.remove_space_clean(line) for line in self.lines]
self.msg = self.remove_space_clean(self.msg)
self.prevlines = [line for line in self.prevlines if len(line) > 0]
self.afterlines = [line for line in self.afterlines if len(line) > 0]
# print("\n".join(self.prevlines))
# print("\n\n\n\n")
# print("\n".join(self.lines))
# print("\n\n\n\n")
# print("\n".join(self.afterlines))
# print("\n\n\n\n")
assert len(self.lines) == len(self.labels), "Not equal length in align."
topack = list(
zip(
*[
(line, label)
for line, label in zip(self.lines, self.labels)
if len(line) > 0
]
)
)
if topack == []:
self.avail = False
return
else:
self.lines, self.labels = topack
# tuple->list, convenient for later operation
self.lines = list(self.lines)
self.labels = list(self.labels)
def read_review_examples(filename, data_num=-1, tokenizer=None):
"""Read examples from filename."""
examples = []
idx = 0
with open(filename) as f:
for line in f:
try:
js = json.loads(line.strip())
except:
print("Error during reading json data.")
continue
maxl = 200
if "y" not in js:
js["y"] = 0
if "msg" in js and len(js["msg"]) > 0:
js["y"] = 1
example = ReviewExample(
idx=idx,
oldf=js["oldf"],
diff=js["patch"],
msg=js["msg"] if "msg" in js else "",
cmtid=js["cmtid"] if "cmtid" in js else "",
max_len=maxl,
y=js["y"]
)
if example.avail:
examples.append(example)
idx += 1
if idx == data_num:
break
else:
# print(f"Passing {idx} because of invalid diff.")
idx += 1
if idx == data_num:
break
return examples
def read_jsonl(path):
data = []
with open(path) as f:
for line in f:
try:
js = json.loads(line.strip())
except:
print("Error during reading json data.")
continue
data.append(js)
return data
|
CodeBERT/CodeReviewer/code/utils.py/0
|
{
"file_path": "CodeBERT/CodeReviewer/code/utils.py",
"repo_id": "CodeBERT",
"token_count": 15950
}
| 214 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch.nn as nn
import torch
class Model(nn.Module):
def __init__(self, encoder):
super(Model, self).__init__()
self.encoder = encoder
def forward(self, code_inputs=None, attn_mask=None,position_idx=None, nl_inputs=None):
if code_inputs is not None:
nodes_mask=position_idx.eq(0)
token_mask=position_idx.ge(2)
inputs_embeddings=self.encoder.embeddings.word_embeddings(code_inputs)
nodes_to_token_mask=nodes_mask[:,:,None]&token_mask[:,None,:]&attn_mask
nodes_to_token_mask=nodes_to_token_mask/(nodes_to_token_mask.sum(-1)+1e-10)[:,:,None]
avg_embeddings=torch.einsum("abc,acd->abd",nodes_to_token_mask,inputs_embeddings)
inputs_embeddings=inputs_embeddings*(~nodes_mask)[:,:,None]+avg_embeddings*nodes_mask[:,:,None]
return self.encoder(inputs_embeds=inputs_embeddings,attention_mask=attn_mask,position_ids=position_idx)[1]
else:
return self.encoder(nl_inputs,attention_mask=nl_inputs.ne(1))[1]
|
CodeBERT/GraphCodeBERT/codesearch/model.py/0
|
{
"file_path": "CodeBERT/GraphCodeBERT/codesearch/model.py",
"repo_id": "CodeBERT",
"token_count": 568
}
| 215 |
# coding=utf-8
# Copyright 2020 The Allen Institute for AI team and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch LongCoder model."""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN, gelu
from transformers.modeling_utils import PreTrainedModel
from transformers.pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from transformers.utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from transformers import LongformerConfig
logger = logging.get_logger(__name__)
@dataclass
class LongcoderBaseModelOutputWithPooling(ModelOutput):
"""
Base class for Longformer's outputs that also contains a pooling of the last hidden states.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token) further processed by a
Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence
prediction (classification) objective during pretraining.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
attention_window + 1)`, where `x` is the number of tokens with global attention mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
(succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
If the attention window contains a token with global attention, the attention weight at the corresponding
index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
accessed from `global_attentions`.
global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
where `x` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
last_hidden_state: torch.FloatTensor
pooler_output: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
global_attentions: Optional[Tuple[torch.FloatTensor]] = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
@dataclass
class LongcoderBaseModelOutput(ModelOutput):
"""
Base class for Longcoder's outputs, with potential hidden states, local and global attentions.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
attention_window + 1)`, where `x` is the number of tokens with global attention mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
(succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
If the attention window contains a token with global attention, the attention weight at the corresponding
index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
accessed from `global_attentions`.
global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
where `x` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
last_hidden_state: torch.FloatTensor
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
global_attentions: Optional[Tuple[torch.FloatTensor]] = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
def _compute_global_attention_mask(input_ids, sep_token_id, before_sep_token=True):
"""
Computes global attention mask by putting attention on all tokens before `sep_token_id` if `before_sep_token is
True` else after `sep_token_id`.
"""
question_end_index = _get_question_end_index(input_ids, sep_token_id)
question_end_index = question_end_index.unsqueeze(dim=1) # size: batch_size x 1
# bool attention mask with True in locations of global attention
attention_mask = torch.arange(input_ids.shape[1], device=input_ids.device)
if before_sep_token is True:
attention_mask = (attention_mask.expand_as(input_ids) < question_end_index).to(torch.uint8)
else:
# last token is separation token and should not be counted and in the middle are two separation tokens
attention_mask = (attention_mask.expand_as(input_ids) > (question_end_index + 1)).to(torch.uint8) * (
attention_mask.expand_as(input_ids) < input_ids.shape[-1]
).to(torch.uint8)
return attention_mask
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
class LongcoderEmbeddings(nn.Module):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
def __init__(self, config):
super().__init__()
self.config = config
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.padding_idx = config.pad_token_id
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
)
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length = 0):
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to(input_ids.device)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_ids = position_ids.clamp(max=self.config.max_position_embeddings-1)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor inputs_embeds:
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape)
class LongcoderSelfAttention(nn.Module):
def __init__(self, config, layer_id):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_heads = config.num_attention_heads
self.head_dim = int(config.hidden_size / config.num_attention_heads)
self.embed_dim = config.hidden_size
self.query = nn.Linear(config.hidden_size, self.embed_dim)
self.key = nn.Linear(config.hidden_size, self.embed_dim)
self.value = nn.Linear(config.hidden_size, self.embed_dim)
# separate projection layers for tokens with global attention
self.query_global = nn.Linear(config.hidden_size, self.embed_dim)
self.key_global = nn.Linear(config.hidden_size, self.embed_dim)
self.value_global = nn.Linear(config.hidden_size, self.embed_dim)
self.dropout = config.attention_probs_dropout_prob
self.layer_id = layer_id
attention_window = config.attention_window[self.layer_id]
assert (
attention_window % 2 == 0
), f"`attention_window` for layer {self.layer_id} has to be an even value. Given {attention_window}"
assert (
attention_window > 0
), f"`attention_window` for layer {self.layer_id} has to be positive. Given {attention_window}"
self.one_sided_attn_window_size = attention_window // 2
self.config = config
def forward(
self,
hidden_states,
attention_mask=None,
layer_head_mask=None,
is_index_masked=None,
is_index_global_attn=None,
is_global_attn=None,
output_attentions=False,
past_key_value=None,
idx=None,
bridge_hidden_states=None,
bridge_position=None,
):
"""
[`LongcoderSelfAttention`] expects *len(hidden_states)* to be multiple of *attention_window*. Padding to
*attention_window* happens in [`LongcoderModel.forward`] to avoid redoing the padding on each layer.
The *attention_mask* is changed in [`LongcoderModel.forward`] from 0, 1, 2 to:
- -10000: no attention
- 0: local attention
- +10000: global attention
"""
hidden_states = hidden_states.transpose(0, 1)
# project hidden states
query_vectors = self.query(hidden_states)
# normalize query
query_vectors /= math.sqrt(self.head_dim)
key_vectors = self.key(hidden_states)
value_vectors = self.value(hidden_states)
seq_len, batch_size, embed_dim = hidden_states.size()
assert (
embed_dim == self.embed_dim
), f"hidden_states should have embed_dim = {self.embed_dim}, but has {embed_dim}"
query_vectors = query_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1)
key_vectors = key_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1)
value_vectors = value_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1)
if past_key_value is not None:
# Inference
# ToDo: update past_key_value
key_vectors = torch.cat([past_key_value[0], key_vectors], dim=1)
value_vectors = torch.cat([past_key_value[1], value_vectors], dim=1)
key_position = torch.arange(key_vectors.size(1),device=key_vectors.device)[None,:].repeat(batch_size,1)
query_position = key_position[:,-query_vectors.size(1):]
if len(past_key_value) != 2:
key_vectors = torch.cat([past_key_value[2],key_vectors],dim=1)
value_vectors = torch.cat([past_key_value[3],value_vectors],dim=1)
is_index_global_attn = torch.cat([past_key_value[4].ne(-1),is_index_global_attn],dim=1)
key_position = torch.cat([past_key_value[4],key_position],dim=1)
attention_scores = torch.matmul(query_vectors.transpose(1,2), key_vectors.transpose(1,2).transpose(-1, -2))
attention_mask = query_position[:,:,None] - key_position[:,None,:]
attention_mask = (attention_mask>=0)&((attention_mask<=self.one_sided_attn_window_size*2) | is_index_global_attn[:,None,:])
attention_mask = ~attention_mask
attention_mask = attention_mask.type_as(query_vectors).masked_fill(
attention_mask, torch.finfo(query_vectors.dtype).min
)
attention_scores = attention_scores + attention_mask[:,None,:,:]
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
attn_output = torch.matmul(attention_probs, value_vectors.transpose(1,2))
attn_output = attn_output.permute(0, 2, 1, 3).contiguous()
new_attn_output_shape = attn_output.size()[:-2] + (self.embed_dim,)
attn_output_local = attn_output.view(new_attn_output_shape )
attn_output = attn_output_local
outputs = (attn_output,)
else:
past_key_value = [key_vectors, value_vectors]
key_vectors = torch.cat((key_vectors[:,-self.one_sided_attn_window_size:],key_vectors[:,:-self.one_sided_attn_window_size]),1)
value_vectors = torch.cat((value_vectors[:,-self.one_sided_attn_window_size:],value_vectors[:,:-self.one_sided_attn_window_size]),1)
attn_scores = self._sliding_chunks_query_key_matmul(
query_vectors, key_vectors, self.one_sided_attn_window_size
)
# values to pad for attention probs
remove_from_windowed_attention_mask = (attention_mask != 0)[:, :, None, None]
if self.config.is_decoder_only:
remove_from_windowed_attention_mask = torch.cat((remove_from_windowed_attention_mask[:,-self.one_sided_attn_window_size:],remove_from_windowed_attention_mask[:,:-self.one_sided_attn_window_size]),1)
# cast to fp32/fp16 then replace 1's with -inf
float_mask = remove_from_windowed_attention_mask.type_as(query_vectors).masked_fill(
remove_from_windowed_attention_mask, torch.finfo(query_vectors.dtype).min
)
# diagonal mask with zeros everywhere and -inf inplace of padding
diagonal_mask = self._sliding_chunks_query_key_matmul(
float_mask.new_ones(size=float_mask.size()), float_mask, self.one_sided_attn_window_size
)
# pad local attention probs
attn_scores += diagonal_mask
assert list(attn_scores.size()) == [
batch_size,
seq_len,
self.num_heads,
self.one_sided_attn_window_size * 2 + 1,
], (
f"local_attn_probs should be of size ({batch_size}, {seq_len}, {self.num_heads},"
f" {self.one_sided_attn_window_size * 2 + 1}), but is of size {attn_scores.size()}"
)
# compute local attention probs from global attention keys and contact over window dim
if is_global_attn:
# compute global attn indices required through out forward fn
(
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
) = self._get_global_attn_indices(is_index_global_attn)
# calculate global attn probs from global key
global_key_attn_scores = self._concat_with_global_key_attn_probs(
query_vectors=query_vectors,
key_vectors=past_key_value[0],
max_num_global_attn_indices=max_num_global_attn_indices,
is_index_global_attn_nonzero=is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero,
)
# concat to local_attn_probs
# (batch_size, seq_len, num_heads, extra attention count + 2*window+1)
position = torch.arange(seq_len,device=hidden_states.device)[None,:].repeat(batch_size,1)
key_position = position.new_zeros(batch_size, max_num_global_attn_indices) + 100000
key_position[is_local_index_global_attn_nonzero] = position[is_index_global_attn_nonzero]
query_position = position
mask = (query_position[:,:,None]-key_position[:,None,:]) < 0
float_mask = mask.type_as(query_vectors).masked_fill(mask, torch.finfo(query_vectors.dtype).min)
attn_scores = torch.cat((global_key_attn_scores + float_mask[:,:,None,:], attn_scores), dim=-1)
bridge_hidden_states = bridge_hidden_states.transpose(0, 1)
# normalize query
key_vectors_bridge = self.key(bridge_hidden_states)
value_vectors_bridge = self.value(bridge_hidden_states)
key_vectors_bridge = key_vectors_bridge.view(-1, batch_size, self.num_heads, self.head_dim).transpose(0, 1)
value_vectors_bridge = value_vectors_bridge.view(-1,batch_size, self.num_heads, self.head_dim).transpose(0, 1)
past_key_value.extend([key_vectors_bridge,value_vectors_bridge,bridge_position])
query_position = torch.arange(seq_len,device=hidden_states.device)[None,:].repeat(batch_size,1)
key_position = bridge_position
mask = (query_position[:,:,None]-key_position[:,None,:]) < 0
float_mask = mask.type_as(query_vectors).masked_fill(mask, torch.finfo(query_vectors.dtype).min)
bridge_key_attn_scores = torch.matmul(query_vectors.transpose(1,2), key_vectors_bridge.transpose(1,2).transpose(-1, -2)).transpose(1,2)
attn_scores = torch.cat((bridge_key_attn_scores + float_mask[:,:,None,:], attn_scores), dim=-1)
# free memory
del bridge_key_attn_scores
attn_probs = nn.functional.softmax(
attn_scores, dim=-1, dtype=torch.float32
)
# use fp32 for numerical stability
#if idx==0:
# print(key_vectors[0,2048,0])
#print(attn_probs[0,2048,0])
if layer_head_mask is not None:
assert layer_head_mask.size() == (
self.num_heads,
), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}"
attn_probs = layer_head_mask.view(1, 1, -1, 1) * attn_probs
# softmax sometimes inserts NaN if all positions are masked, replace them with 0
attn_probs = torch.masked_fill(attn_probs, is_index_masked[:, :, None, None], 0.0)
attn_probs = attn_probs.type_as(attn_scores)
# free memory
del attn_scores
# apply dropout
attn_probs = nn.functional.dropout(attn_probs, p=self.dropout, training=self.training)
# compute local attention output with global attention value and add
if is_global_attn:
# compute sum of global and local attn
attn_output = self._compute_attn_output_with_global_indices(
value_vectors=past_key_value[1],
value_vectors_global=past_key_value[3],
value_vectors_shift=value_vectors,
attn_probs=attn_probs,
max_num_global_attn_indices=max_num_global_attn_indices,
is_index_global_attn_nonzero=is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
)
# project hidden states
query_vectors_bridge = self.query_global(bridge_hidden_states)
# normalize query
query_vectors_bridge /= math.sqrt(self.head_dim)
query_vectors_bridge = query_vectors_bridge.view(-1, batch_size, self.num_heads, self.head_dim).transpose(0, 1)
key_vectors = self.key_global(torch.cat((hidden_states,bridge_hidden_states),0))
value_vectors = self.value_global(torch.cat((hidden_states,bridge_hidden_states),0))
key_vectors = key_vectors.view(-1, batch_size, self.num_heads, self.head_dim).transpose(0, 1)
value_vectors = value_vectors.view(-1,batch_size, self.num_heads, self.head_dim).transpose(0, 1)
query_position = bridge_position
key_position = torch.cat((torch.arange(seq_len,device=hidden_states.device)[None,:].repeat(batch_size,1),bridge_position),-1)
is_index_bridge_attn_tmp = torch.cat((torch.arange(seq_len,device=hidden_states.device)[None,:].repeat(batch_size,1).eq(-1),bridge_position.ne(-1)),-1)
attention_mask = query_position[:,:,None] - key_position[:,None,:]
attention_mask = (attention_mask>=0)&((attention_mask<=256) | is_index_bridge_attn_tmp[:,None,:])
attention_mask = ~attention_mask
float_mask = attention_mask.type_as(query_vectors).masked_fill(
attention_mask, torch.finfo(query_vectors.dtype).min
)
bridge_key_attn_scores = torch.matmul(query_vectors_bridge.transpose(1,2), key_vectors.transpose(1,2).transpose(-1, -2)).transpose(1,2)
attn_probs = nn.functional.softmax(
bridge_key_attn_scores + float_mask[:,:,None,:], dim=-1, dtype=torch.float32
)
attn_probs = nn.functional.dropout(attn_probs, p=self.dropout, training=self.training)
attn_output_2 = torch.matmul(attn_probs.transpose(1, 2), value_vectors.transpose(1,2)).transpose(1, 2)
else:
# compute local attn only
attn_output = self._sliding_chunks_matmul_attn_probs_value(
attn_probs, value_vectors, self.one_sided_attn_window_size
)
assert attn_output.size() == (batch_size, seq_len, self.num_heads, self.head_dim), "Unexpected size"
attn_output = attn_output.transpose(0, 1).reshape(seq_len, batch_size, embed_dim).contiguous()
if bridge_hidden_states is not None:
attn_output_2 = attn_output_2.transpose(0, 1).reshape(-1, batch_size, embed_dim).contiguous()
outputs = ([attn_output.transpose(0, 1),attn_output_2.transpose(0,1)],)
else:
outputs = (attn_output.transpose(0, 1),)
if output_attentions:
outputs += (attn_probs,)
if self.config.is_decoder_only:
outputs += (past_key_value,)
return outputs
@staticmethod
def _pad_and_transpose_last_two_dims(hidden_states_padded, padding):
"""pads rows and then flips rows and columns"""
hidden_states_padded = nn.functional.pad(
hidden_states_padded, padding
) # padding value is not important because it will be overwritten
hidden_states_padded = hidden_states_padded.view(
*hidden_states_padded.size()[:-2], hidden_states_padded.size(-1), hidden_states_padded.size(-2)
)
return hidden_states_padded
@staticmethod
def _pad_and_diagonalize(chunked_hidden_states):
"""
shift every row 1 step right, converting columns into diagonals.
Example:
```python
chunked_hidden_states: [
0.4983,
2.6918,
-0.0071,
1.0492,
-1.8348,
0.7672,
0.2986,
0.0285,
-0.7584,
0.4206,
-0.0405,
0.1599,
2.0514,
-1.1600,
0.5372,
0.2629,
]
window_overlap = num_rows = 4
```
(pad & diagonalize) => [ 0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000
0.0000, -1.8348, 0.7672, 0.2986, 0.0285, 0.0000, 0.0000 0.0000, 0.0000, -0.7584, 0.4206,
-0.0405, 0.1599, 0.0000 0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629 ]
"""
total_num_heads, num_chunks, window_overlap, hidden_dim = chunked_hidden_states.size()
chunked_hidden_states = nn.functional.pad(
chunked_hidden_states, (0, window_overlap + 1)
) # total_num_heads x num_chunks x window_overlap x (hidden_dim+window_overlap+1). Padding value is not important because it'll be overwritten
chunked_hidden_states = chunked_hidden_states.view(
total_num_heads, num_chunks, -1
) # total_num_heads x num_chunks x window_overlap*window_overlap+window_overlap
chunked_hidden_states = chunked_hidden_states[
:, :, :-window_overlap
] # total_num_heads x num_chunks x window_overlap*window_overlap
chunked_hidden_states = chunked_hidden_states.view(
total_num_heads, num_chunks, window_overlap, window_overlap + hidden_dim
)
chunked_hidden_states = chunked_hidden_states[:, :, :, :-1]
return chunked_hidden_states
@staticmethod
def _chunk(hidden_states, window_overlap, onnx_export=False):
"""convert into overlapping chunks. Chunk size = 2w, overlap size = w"""
if not onnx_export:
# non-overlapping chunks of size = 2w
hidden_states = hidden_states.view(
hidden_states.size(0),
torch.div(hidden_states.size(1), (window_overlap * 2), rounding_mode="trunc"),
window_overlap * 2,
hidden_states.size(2),
)
# use `as_strided` to make the chunks overlap with an overlap size = window_overlap
chunk_size = list(hidden_states.size())
chunk_size[1] = chunk_size[1] * 2 - 1
chunk_stride = list(hidden_states.stride())
chunk_stride[1] = chunk_stride[1] // 2
return hidden_states.as_strided(size=chunk_size, stride=chunk_stride)
# When exporting to ONNX, use this separate logic
if hidden_states.size(1) == window_overlap * 2:
# simplest case
return hidden_states.unsqueeze(1)
else:
# have to use slow implementation since as_strided, unfold and 2d-tensor indexing aren't supported (yet) in ONNX export
# TODO replace this with
# > return hidden_states.unfold(dimension=1, size=window_overlap * 2, step=window_overlap).transpose(2, 3)
# once `unfold` is supported
chunk_size = [
hidden_states.size(0),
hidden_states.size(1) // window_overlap - 1,
window_overlap * 2,
hidden_states.size(2),
]
overlapping_chunks = torch.empty(chunk_size)
for chunk in range(chunk_size[1]):
overlapping_chunks[:, chunk, :, :] = hidden_states[
:, chunk * window_overlap : chunk * window_overlap + 2 * window_overlap, :
]
return overlapping_chunks
@staticmethod
def _mask_invalid_locations(input_tensor, affected_seq_len) -> torch.Tensor:
beginning_mask_2d = input_tensor.new_ones(affected_seq_len, affected_seq_len + 1).tril().flip(dims=[0])
beginning_mask = beginning_mask_2d[None, :, None, :]
ending_mask = beginning_mask.flip(dims=(1, 3))
beginning_input = input_tensor[:, :affected_seq_len, :, : affected_seq_len + 1]
beginning_mask = beginning_mask.expand(beginning_input.size())
input_tensor[:, :affected_seq_len, :, : affected_seq_len + 1] = torch.full_like(
beginning_input, -float("inf")
).where(beginning_mask.bool(), beginning_input)
ending_input = input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1) :]
ending_mask = ending_mask.expand(ending_input.size())
input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1) :] = torch.full_like(
ending_input, -float("inf")
).where(ending_mask.bool(), ending_input)
def _sliding_chunks_query_key_matmul(self, query: torch.Tensor, key: torch.Tensor, window_overlap: int):
"""
Matrix multiplication of query and key tensors using with a sliding window attention pattern. This
implementation splits the input into overlapping chunks of size 2w (e.g. 512 for pretrained Longcoder) with an
overlap of size window_overlap
"""
batch_size, seq_len, num_heads, head_dim = query.size()
assert (
seq_len % (window_overlap * 2) == 0
), f"Sequence length should be multiple of {window_overlap * 2}. Given {seq_len}"
assert query.size() == key.size()
chunks_count = torch.div(seq_len, window_overlap, rounding_mode="trunc") - 1
# group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size window_overlap * 2
query = query.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)
key = key.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)
query = self._chunk(query, window_overlap, self.config.__dict__.get("onnx_export", False))
key = self._chunk(key, window_overlap, self.config.__dict__.get("onnx_export", False))
# matrix multiplication
# bcxd: batch_size * num_heads x chunks x 2window_overlap x head_dim
# bcyd: batch_size * num_heads x chunks x 2window_overlap x head_dim
# bcxy: batch_size * num_heads x chunks x 2window_overlap x 2window_overlap
diagonal_chunked_attention_scores = torch.einsum("bcxd,bcyd->bcxy", (query, key)) # multiply
# convert diagonals into columns
diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims(
diagonal_chunked_attention_scores, padding=(0, 0, 0, 1)
)
# allocate space for the overall attention matrix where the chunks are combined. The last dimension
# has (window_overlap * 2 + 1) columns. The first (window_overlap) columns are the window_overlap lower triangles (attention from a word to
# window_overlap previous words). The following column is attention score from each word to itself, then
# followed by window_overlap columns for the upper triangle.
diagonal_attention_scores = diagonal_chunked_attention_scores.new_zeros(
(batch_size * num_heads, chunks_count + 1, window_overlap, window_overlap * 2 + 1)
)
# copy parts from diagonal_chunked_attention_scores into the combined matrix of attentions
# - copying the main diagonal and the upper triangle
diagonal_attention_scores[:, :-1, :, window_overlap:] = diagonal_chunked_attention_scores[
:, :, :window_overlap, : window_overlap + 1
]
diagonal_attention_scores[:, -1, :, window_overlap:] = diagonal_chunked_attention_scores[
:, -1, window_overlap:, : window_overlap + 1
]
# - copying the lower triangle
diagonal_attention_scores[:, 1:, :, :window_overlap] = diagonal_chunked_attention_scores[
:, :, -(window_overlap + 1) : -1, window_overlap + 1 :
]
diagonal_attention_scores[:, 0, 1:window_overlap, 1:window_overlap] = diagonal_chunked_attention_scores[
:, 0, : window_overlap - 1, 1 - window_overlap :
]
# separate batch_size and num_heads dimensions again
diagonal_attention_scores = diagonal_attention_scores.view(
batch_size, num_heads, seq_len, 2 * window_overlap + 1
).transpose(2, 1)
self._mask_invalid_locations(diagonal_attention_scores, window_overlap)
return diagonal_attention_scores
def _sliding_chunks_matmul_attn_probs_value(
self, attn_probs: torch.Tensor, value: torch.Tensor, window_overlap: int
):
"""
Same as _sliding_chunks_query_key_matmul but for attn_probs and value tensors. Returned tensor will be of the
same shape as `attn_probs`
"""
batch_size, seq_len, num_heads, head_dim = value.size()
assert seq_len % (window_overlap * 2) == 0
assert attn_probs.size()[:3] == value.size()[:3]
assert attn_probs.size(3) == 2 * window_overlap + 1
chunks_count = torch.div(seq_len, window_overlap, rounding_mode="trunc") - 1
# group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size 2 window overlap
chunked_attn_probs = attn_probs.transpose(1, 2).reshape(
batch_size * num_heads,
torch.div(seq_len, window_overlap, rounding_mode="trunc"),
window_overlap,
2 * window_overlap + 1,
)
# group batch_size and num_heads dimensions into one
value = value.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)
# pad seq_len with w at the beginning of the sequence and another window overlap at the end
padded_value = nn.functional.pad(value, (0, 0, window_overlap, window_overlap), value=-1)
# chunk padded_value into chunks of size 3 window overlap and an overlap of size window overlap
chunked_value_size = (batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim)
chunked_value_stride = padded_value.stride()
chunked_value_stride = (
chunked_value_stride[0],
window_overlap * chunked_value_stride[1],
chunked_value_stride[1],
chunked_value_stride[2],
)
chunked_value = padded_value.as_strided(size=chunked_value_size, stride=chunked_value_stride)
chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs)
context = torch.einsum("bcwd,bcdh->bcwh", (chunked_attn_probs, chunked_value))
return context.view(batch_size, num_heads, seq_len, head_dim).transpose(1, 2)
@staticmethod
def _get_global_attn_indices(is_index_global_attn):
"""compute global attn indices required throughout forward pass"""
# helper variable
num_global_attn_indices = is_index_global_attn.long().sum(dim=1)
# max number of global attn indices in batch
max_num_global_attn_indices = num_global_attn_indices.max()
# indices of global attn
is_index_global_attn_nonzero = is_index_global_attn.nonzero(as_tuple=True)
# helper variable
is_local_index_global_attn = torch.arange(
max_num_global_attn_indices, device=is_index_global_attn.device
) < num_global_attn_indices.unsqueeze(dim=-1)
# location of the non-padding values within global attention indices
is_local_index_global_attn_nonzero = is_local_index_global_attn.nonzero(as_tuple=True)
# location of the padding values within global attention indices
is_local_index_no_global_attn_nonzero = (is_local_index_global_attn == 0).nonzero(as_tuple=True)
return (
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
)
def _concat_with_global_key_attn_probs(
self,
key_vectors,
query_vectors,
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
):
batch_size = key_vectors.shape[0]
# create only global key vectors
key_vectors_only_global = key_vectors.new_zeros(
batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim
)
key_vectors_only_global[is_local_index_global_attn_nonzero] = key_vectors[is_index_global_attn_nonzero]
# (batch_size, seq_len, num_heads, max_num_global_attn_indices)
attn_probs_from_global_key = torch.einsum("blhd,bshd->blhs", (query_vectors, key_vectors_only_global))
# need to transpose since ONNX export only supports consecutive indexing: https://pytorch.org/docs/stable/onnx.html#writes-sets
attn_probs_from_global_key = attn_probs_from_global_key.transpose(1, 3)
attn_probs_from_global_key[
is_local_index_no_global_attn_nonzero[0], is_local_index_no_global_attn_nonzero[1], :, :
] = torch.finfo(attn_probs_from_global_key.dtype).min
attn_probs_from_global_key = attn_probs_from_global_key.transpose(1, 3)
return attn_probs_from_global_key
def _compute_attn_output_with_global_indices(
self,
value_vectors,
value_vectors_global,
value_vectors_shift,
attn_probs,
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
):
batch_size = attn_probs.shape[0]
# cut local attn probs to global only
attn_probs_only_global = attn_probs.narrow(-1, 0, max_num_global_attn_indices+value_vectors_global.size(1))
# get value vectors for global only
value_vectors_only_global = value_vectors.new_zeros(
batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim
)
value_vectors_only_global[is_local_index_global_attn_nonzero] = value_vectors[is_index_global_attn_nonzero]
value_vectors_only_global = torch.cat((value_vectors_global,value_vectors_only_global),1)
# use `matmul` because `einsum` crashes sometimes with fp16
# attn = torch.einsum('blhs,bshd->blhd', (selected_attn_probs, selected_v))
# compute attn output only global
attn_output_only_global = torch.matmul(
attn_probs_only_global.transpose(1, 2).clone(), value_vectors_only_global.transpose(1, 2).clone()
).transpose(1, 2)
# reshape attn probs
attn_probs_without_global = attn_probs.narrow(
-1, max_num_global_attn_indices+value_vectors_global.size(1), attn_probs.size(-1) - max_num_global_attn_indices-value_vectors_global.size(1)
).contiguous()
# compute attn output with global
attn_output_without_global = self._sliding_chunks_matmul_attn_probs_value(
attn_probs_without_global, value_vectors_shift, self.one_sided_attn_window_size
)
return attn_output_only_global + attn_output_without_global
def _compute_global_attn_output_from_hidden(
self,
hidden_states,
max_num_global_attn_indices,
layer_head_mask,
is_local_index_global_attn_nonzero,
is_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
is_index_masked,
attention_mask
):
seq_len, batch_size = hidden_states.shape[:2]
# prepare global hidden states
global_attn_hidden_states = hidden_states.new_zeros(max_num_global_attn_indices, batch_size, self.embed_dim)
global_attn_hidden_states[is_local_index_global_attn_nonzero[::-1]] = hidden_states[
is_index_global_attn_nonzero[::-1]
]
# global key, query, value
global_query_vectors_only_global = self.query_global(global_attn_hidden_states)
global_key_vectors = self.key_global(hidden_states)
global_value_vectors = self.value_global(hidden_states)
past_key_value = [global_key_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1),
global_value_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1)]
# normalize
global_query_vectors_only_global /= math.sqrt(self.head_dim)
# reshape
global_query_vectors_only_global = (
global_query_vectors_only_global.contiguous()
.view(max_num_global_attn_indices, batch_size * self.num_heads, self.head_dim)
.transpose(0, 1)
) # (batch_size * self.num_heads, max_num_global_attn_indices, head_dim)
global_key_vectors = (
global_key_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1)
) # batch_size * self.num_heads, seq_len, head_dim)
global_value_vectors = (
global_value_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1)
) # batch_size * self.num_heads, seq_len, head_dim)
# compute attn scores
global_attn_scores = torch.bmm(global_query_vectors_only_global, global_key_vectors.transpose(1, 2))
assert list(global_attn_scores.size()) == [
batch_size * self.num_heads,
max_num_global_attn_indices,
seq_len,
], (
"global_attn_scores have the wrong size. Size should be"
f" {(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)}, but is"
f" {global_attn_scores.size()}."
)
global_attn_scores = global_attn_scores.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len)
# need to transpose since ONNX export only supports consecutive indexing: https://pytorch.org/docs/stable/onnx.html#writes-sets
global_attn_scores = global_attn_scores.transpose(1, 2)
global_attn_scores[
is_local_index_no_global_attn_nonzero[0], is_local_index_no_global_attn_nonzero[1], :, :
] = torch.finfo(global_attn_scores.dtype).min
global_attn_scores = global_attn_scores.transpose(1, 2)
global_attn_scores = global_attn_scores.masked_fill(
is_index_masked[:, None, None, :],
torch.finfo(global_attn_scores.dtype).min,
)
global_attn_scores += attention_mask[:,None,:,:]
global_attn_scores = global_attn_scores.view(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)
# compute global attn probs
global_attn_probs_float = nn.functional.softmax(
global_attn_scores, dim=-1, dtype=torch.float32
) # use fp32 for numerical stability
# apply layer head masking
if layer_head_mask is not None:
assert layer_head_mask.size() == (
self.num_heads,
), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}"
global_attn_probs_float = layer_head_mask.view(1, -1, 1, 1) * global_attn_probs_float.view(
batch_size, self.num_heads, max_num_global_attn_indices, seq_len
)
global_attn_probs_float = global_attn_probs_float.view(
batch_size * self.num_heads, max_num_global_attn_indices, seq_len
)
global_attn_probs = nn.functional.dropout(
global_attn_probs_float.type_as(global_attn_scores), p=self.dropout, training=self.training
)
# global attn output
global_attn_output = torch.bmm(global_attn_probs, global_value_vectors)
assert list(global_attn_output.size()) == [
batch_size * self.num_heads,
max_num_global_attn_indices,
self.head_dim,
], (
"global_attn_output tensor has the wrong size. Size should be"
f" {(batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim)}, but is"
f" {global_attn_output.size()}."
)
global_attn_probs = global_attn_probs.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len)
global_attn_output = global_attn_output.view(
batch_size, self.num_heads, max_num_global_attn_indices, self.head_dim
)
return global_attn_output, global_attn_probs,past_key_value
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
class LongcoderSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class LongcoderAttention(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.self = LongcoderSelfAttention(config, layer_id)
self.output = LongcoderSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
layer_head_mask=None,
is_index_masked=None,
is_index_global_attn=None,
is_global_attn=None,
output_attentions=False,
past_key_value=None,
idx=None,
bridge_hidden_states=None,
bridge_position=None
):
self_outputs = self.self(
hidden_states,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
is_index_masked=is_index_masked,
is_index_global_attn=is_index_global_attn,
is_global_attn=is_global_attn,
output_attentions=output_attentions,
past_key_value=past_key_value,
idx=idx,
bridge_hidden_states=bridge_hidden_states,
bridge_position=bridge_position
)
attn_output = self.output(self_outputs[0][0], hidden_states)
if bridge_hidden_states is not None:
bridge_attn_output = self.output(self_outputs[0][1], bridge_hidden_states)
attn_output = (attn_output,bridge_attn_output)
outputs = (attn_output,) + self_outputs[1:]
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate
class LongcoderIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput
class LongcoderOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class LongcoderLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.attention = LongcoderAttention(config, layer_id)
self.intermediate = LongcoderIntermediate(config)
self.output = LongcoderOutput(config)
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
def forward(
self,
hidden_states,
attention_mask=None,
layer_head_mask=None,
is_index_masked=None,
is_index_global_attn=None,
is_global_attn=None,
output_attentions=False,
past_key_value=None,
idx=None,
bridge_hidden_states=None,
bridge_position=None,
):
self_attn_outputs = self.attention(
hidden_states,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
is_index_masked=is_index_masked,
is_index_global_attn=is_index_global_attn,
is_global_attn=is_global_attn,
output_attentions=output_attentions,
past_key_value=past_key_value,
idx=idx,
bridge_hidden_states=bridge_hidden_states,
bridge_position=bridge_position,
)
attn_output = self_attn_outputs[0]
outputs = self_attn_outputs[1:]
if bridge_hidden_states is not None:
layer_output = apply_chunking_to_forward(
self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attn_output[0]
)
bridge_layer_output = apply_chunking_to_forward(
self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim,attn_output[1]
)
layer_output = (layer_output,bridge_layer_output)
else:
layer_output = apply_chunking_to_forward(
self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attn_output
)
outputs = (layer_output,) + outputs
return outputs
def ff_chunk(self, attn_output):
intermediate_output = self.intermediate(attn_output)
layer_output = self.output(intermediate_output, attn_output)
return layer_output
class LongcoderEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([LongcoderLayer(config, layer_id=i) for i in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
padding_len=0,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
past_key_values=None,
bridge_inputs=None,
bridge_position=None
):
is_index_masked = attention_mask < 0
is_index_global_attn = attention_mask > 0
is_global_attn = is_index_global_attn.flatten().any().item()
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None # All local attentions.
all_global_attentions = () if (output_attentions and is_global_attn) else None
next_decoder_cache = [] if self.config.is_decoder_only else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
assert head_mask.size()[0] == (
len(self.layer)
), f"The head_mask should be specified for {len(self.layer)} layers, but it is for {head_mask.size()[0]}."
for idx, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
past_key_value = past_key_values[idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, is_global_attn, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
head_mask[idx] if head_mask is not None else None,
is_index_masked,
is_index_global_attn,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask=attention_mask,
layer_head_mask=head_mask[idx] if head_mask is not None else None,
is_index_masked=is_index_masked,
is_index_global_attn=is_index_global_attn,
is_global_attn=is_global_attn,
output_attentions=output_attentions,
past_key_value=past_key_value,
idx=idx,
bridge_hidden_states = bridge_inputs,
bridge_position = bridge_position,
)
if bridge_inputs is not None:
hidden_states,bridge_inputs = layer_outputs[0]
else:
hidden_states = layer_outputs[0]
if self.config.is_decoder_only:
if padding_len > 0:
next_decoder_cache.append([x[:, :-padding_len] if idx<=1 else x for idx, x in enumerate(layer_outputs[-1])])
else:
next_decoder_cache.append([layer_outputs[-1]])
if output_attentions:
# bzs x seq_len x num_attn_heads x (num_global_attn + attention_window_len + 1) => bzs x num_attn_heads x seq_len x (num_global_attn + attention_window_len + 1)
all_attentions = all_attentions + (layer_outputs[1].transpose(1, 2),)
if is_global_attn:
# bzs x num_attn_heads x num_global_attn x seq_len => bzs x num_attn_heads x seq_len x num_global_attn
all_global_attentions = all_global_attentions + (layer_outputs[2].transpose(2, 3),)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# undo padding
if padding_len > 0:
# unpad `hidden_states` because the calling function is expecting a length == input_ids.size(1)
hidden_states = hidden_states[:, :-padding_len]
if output_hidden_states:
all_hidden_states = tuple([state[:, :-padding_len] for state in all_hidden_states])
if output_attentions:
all_attentions = tuple([state[:, :, :-padding_len, :] for state in all_attentions])
if not return_dict:
return tuple(
v for v in [hidden_states, all_hidden_states, all_attentions, all_global_attentions] if v is not None
)
return LongcoderBaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_attentions,
global_attentions=all_global_attentions,
past_key_values=next_decoder_cache,
)
class LongcoderPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = LongformerConfig
base_model_prefix = "Longcoder"
supports_gradient_checkpointing = True
_keys_to_ignore_on_load_unexpected = [r"position_ids"]
_no_split_modules = ["LongcoderSelfAttention"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, LongcoderEncoder):
module.gradient_checkpointing = value
LONGCODER_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`LongformerConfig`]): Model configuration class with all the parameters of the
model. Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
LONGCODER_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`LongcoderTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
global_attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
Mask to decide the attention given on each token, local attention or global attention. Tokens with global
attention attends to all other tokens, and all other tokens attend to them. This is important for
task-specific finetuning because it makes the model more flexible at representing the task. For example,
for classification, the <s> token should be given global attention. For QA, all question tokens should also
have global attention. Please refer to the [Longcoder paper](https://arxiv.org/abs/2004.05150) for more
details. Mask values selected in `[0, 1]`:
- 0 for local attention (a sliding window attention),
- 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).
head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Longcoder Model outputting raw hidden-states without any specific head on top.",
LONGCODER_START_DOCSTRING,
)
class LongcoderModel(LongcoderPreTrainedModel):
"""
This class copied code from [`RobertaModel`] and overwrote standard self-attention with Longcoder self-attention
to provide the ability to process long sequences following the self-attention approach described in [Longcoder:
the Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, and Arman Cohan.
Longcoder self-attention combines a local (sliding window) and global attention to extend to long documents
without the O(n^2) increase in memory and compute.
The self-attention module `LongcoderSelfAttention` implemented here supports the combination of local and global
attention but it lacks support for autoregressive attention and dilated attention. Autoregressive and dilated
attention are more relevant for autoregressive language modeling than finetuning on downstream tasks. Future
release will add support for autoregressive attention, but the support for dilated attention requires a custom CUDA
kernel to be memory and compute efficient.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
if isinstance(config.attention_window, int):
assert config.attention_window % 2 == 0, "`config.attention_window` has to be an even value"
assert config.attention_window > 0, "`config.attention_window` has to be positive"
config.attention_window = [config.attention_window] * config.num_hidden_layers # one value per layer
else:
assert len(config.attention_window) == config.num_hidden_layers, (
"`len(config.attention_window)` should equal `config.num_hidden_layers`. "
f"Expected {config.num_hidden_layers}, given {len(config.attention_window)}"
)
self.embeddings = LongcoderEmbeddings(config)
self.encoder = LongcoderEncoder(config)
self.pooler = None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def _pad_to_window_size(
self,
input_ids: torch.Tensor,
attention_mask: torch.Tensor,
token_type_ids: torch.Tensor,
position_ids: torch.Tensor,
inputs_embeds: torch.Tensor,
pad_token_id: int,
):
"""A helper function to pad tokens and mask to work with implementation of Longcoder self-attention."""
# padding
attention_window = (
self.config.attention_window
if isinstance(self.config.attention_window, int)
else max(self.config.attention_window)
)
assert attention_window % 2 == 0, f"`attention_window` should be an even value. Given {attention_window}"
input_shape = input_ids.shape if input_ids is not None else inputs_embeds.shape
batch_size, seq_len = input_shape[:2]
padding_len = (attention_window - seq_len % attention_window) % attention_window
if self.config.is_decoder_only and padding_len < attention_window//2:
padding_len += attention_window
if padding_len > 0:
#logger.info(
# f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of "
# f"`config.attention_window`: {attention_window}"
#)
if input_ids is not None:
input_ids = nn.functional.pad(input_ids, (0, padding_len), value=pad_token_id)
if position_ids is not None:
# pad with position_id = pad_token_id as in modeling_roberta.RobertaEmbeddings
position_ids = nn.functional.pad(position_ids, (0, padding_len), value=pad_token_id)
if inputs_embeds is not None:
input_ids_padding = inputs_embeds.new_full(
(batch_size, padding_len),
self.config.pad_token_id,
dtype=torch.long,
)
inputs_embeds_padding = self.embeddings(input_ids_padding)
inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_padding], dim=-2)
attention_mask = nn.functional.pad(
attention_mask, (0, padding_len), value=0
) # no attention on the padding tokens
token_type_ids = nn.functional.pad(token_type_ids, (0, padding_len), value=0) # pad with token_type_id = 0
return padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds
def _merge_to_attention_mask(self, attention_mask: torch.Tensor, global_attention_mask: torch.Tensor):
# Longcoder self attention expects attention mask to have 0 (no attn), 1 (local attn), 2 (global attn)
# (global_attention_mask + 1) => 1 for local attention, 2 for global attention
# => final attention_mask => 0 for no attention, 1 for local attention 2 for global attention
if attention_mask is not None:
attention_mask = attention_mask * (global_attention_mask + 1)
else:
# simply use `global_attention_mask` as `attention_mask`
# if no `attention_mask` is given
attention_mask = global_attention_mask + 1
return attention_mask
@staticmethod
def _get_global_attn_indices(is_index_global_attn):
"""compute global attn indices required throughout forward pass"""
# helper variable
num_global_attn_indices = is_index_global_attn.long().sum(dim=1)
# max number of global attn indices in batch
max_num_global_attn_indices = num_global_attn_indices.max()
# indices of global attn
is_index_global_attn_nonzero = is_index_global_attn.nonzero(as_tuple=True)
# helper variable
is_local_index_global_attn = torch.arange(
max_num_global_attn_indices, device=is_index_global_attn.device
) < num_global_attn_indices.unsqueeze(dim=-1)
# location of the non-padding values within global attention indices
is_local_index_global_attn_nonzero = is_local_index_global_attn.nonzero(as_tuple=True)
# location of the padding values within global attention indices
is_local_index_no_global_attn_nonzero = (is_local_index_global_attn == 0).nonzero(as_tuple=True)
return (
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
)
@add_start_docstrings_to_model_forward(LONGCODER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=LongcoderBaseModelOutputWithPooling, config_class="LongformerConfig")
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
global_attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
past_key_values=None
) -> Union[Tuple, LongcoderBaseModelOutputWithPooling]:
r"""
Returns:
Examples:
```python
>>> import torch
>>> from transformers import LongcoderModel, LongcoderTokenizer
>>> model = LongcoderModel.from_pretrained("allenai/Longcoder-base-4096")
>>> tokenizer = LongcoderTokenizer.from_pretrained("allenai/Longcoder-base-4096")
>>> SAMPLE_TEXT = " ".join(["Hello world! "] * 1000) # long input document
>>> input_ids = torch.tensor(tokenizer.encode(SAMPLE_TEXT)).unsqueeze(0) # batch of size 1
>>> attention_mask = torch.ones(
... input_ids.shape, dtype=torch.long, device=input_ids.device
... ) # initialize to local attention
>>> global_attention_mask = torch.zeros(
... input_ids.shape, dtype=torch.long, device=input_ids.device
... ) # initialize to global attention to be deactivated for all tokens
>>> global_attention_mask[
... :,
... [
... 1,
... 4,
... 21,
... ],
... ] = 1 # Set global attention to random tokens for the sake of this example
>>> # Usually, set global attention based on the task. For example,
>>> # classification: the <s> token
>>> # QA: question tokens
>>> # LM: potentially on the beginning of sentences and paragraphs
>>> outputs = model(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask)
>>> sequence_output = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
past_key_values_length = past_key_values[0][0].shape[1] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones((input_shape[0] ,input_shape[1]+past_key_values_length), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
assert input_shape[1]+past_key_values_length <= self.config.max_position_embeddings-2
# merge `global_attention_mask` and `attention_mask`
if global_attention_mask is not None:
attention_mask = self._merge_to_attention_mask(attention_mask, global_attention_mask)
if past_key_values is None:
padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds = self._pad_to_window_size(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
pad_token_id=self.config.pad_token_id,
)
else:
padding_len = 0
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)[
:, 0, 0, :
]
embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length = past_key_values_length
)
if past_key_values is None:
position = torch.arange(embedding_output.size(1),device=embedding_output.device)[None,:].repeat(embedding_output.size(0),1)
bridge_token_mask = (position + 1) % 256 == 0
(
max_num_bridge_attn_indices,
is_index_bridge_attn_nonzero,
is_local_index_bridge_attn_nonzero,
is_local_index_no_bridge_attn_nonzero,
) = self._get_global_attn_indices(bridge_token_mask)
batch_size = embedding_output.size(0)
bridge_position = position.new_zeros(batch_size, max_num_bridge_attn_indices) + 100000
bridge_position[is_local_index_bridge_attn_nonzero] = position[is_index_bridge_attn_nonzero]
bridge_inputs = embedding_output.new_zeros(batch_size, max_num_bridge_attn_indices,embedding_output.size(-1))
bridge_inputs[is_local_index_bridge_attn_nonzero] = self.embeddings(input_ids=bridge_position*0, position_ids=bridge_position, past_key_values_length = 0)[is_local_index_bridge_attn_nonzero]
else:
bridge_position = None
bridge_inputs = None
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
padding_len=padding_len,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
past_key_values=past_key_values,
bridge_inputs=bridge_inputs,
bridge_position=bridge_position,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return LongcoderBaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
global_attentions=encoder_outputs.global_attentions,
past_key_values=encoder_outputs.past_key_values
)
|
CodeBERT/LongCoder/longcoder.py/0
|
{
"file_path": "CodeBERT/LongCoder/longcoder.py",
"repo_id": "CodeBERT",
"token_count": 35772
}
| 216 |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss.
"""
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import pickle
import random
import re
import shutil
import json
import numpy as np
import torch
import multiprocessing
from tqdm import tqdm
from sklearn.metrics import recall_score,precision_score,f1_score
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
from torch.utils.data.distributed import DistributedSampler
from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
RobertaConfig, RobertaModel, RobertaTokenizer)
from model import Model
logger = logging.getLogger(__name__)
cpu_cont = 16
def get_example(item):
url1,url2,label,tokenizer,args,cache,url_to_code = item
if url1 in cache:
code1 = cache[url1].copy()
else:
try:
code = ' '.join(url_to_code[url1].split())
except:
code = ""
code1 = tokenizer.tokenize(code)
if url2 in cache:
code2 = cache[url2].copy()
else:
try:
code = ' '.join(url_to_code[url2].split())
except:
code = ""
code2 = tokenizer.tokenize(code)
return convert_examples_to_features(code1,code2,label,url1,url2,tokenizer,args,cache)
class InputFeatures(object):
"""A single training/test features for a example."""
def __init__(self,
input_tokens,
input_ids,
label,
url1,
url2
):
self.input_tokens = input_tokens
self.input_ids = input_ids
self.label=label
self.url1=url1
self.url2=url2
def convert_examples_to_features(code1_tokens,code2_tokens,label,url1,url2,tokenizer,args,cache):
"""convert examples to token ids"""
code1_tokens = code1_tokens[:args.block_size-4]
code1_tokens = [tokenizer.cls_token,"<encoder-only>",tokenizer.sep_token]+code1_tokens+[tokenizer.sep_token]
code2_tokens = code2_tokens[:args.block_size-4]
code2_tokens = [tokenizer.cls_token,"<encoder-only>",tokenizer.sep_token]+code2_tokens+[tokenizer.sep_token]
code1_ids = tokenizer.convert_tokens_to_ids(code1_tokens)
padding_length = args.block_size - len(code1_ids)
code1_ids += [tokenizer.pad_token_id]*padding_length
code2_ids = tokenizer.convert_tokens_to_ids(code2_tokens)
padding_length = args.block_size - len(code2_ids)
code2_ids += [tokenizer.pad_token_id]*padding_length
source_tokens = code1_tokens+code2_tokens
source_ids = code1_ids+code2_ids
return InputFeatures(source_tokens,source_ids,label,url1,url2)
class TextDataset(Dataset):
def __init__(self, tokenizer, args, file_path, pool=None):
postfix = file_path.split('/')[-1].split('.txt')[0]
self.examples = []
index_filename = file_path
logger.info("Creating features from index file at %s ", index_filename)
url_to_code = {}
with open('/'.join(index_filename.split('/')[:-1])+'/data.jsonl') as f:
for line in f:
line = line.strip()
js = json.loads(line)
url_to_code[js['idx']] = js['func']
data = []
cache = {}
f = open(index_filename)
with open(index_filename) as f:
for line in f:
line = line.strip()
url1,url2,label = line.split('\t')
if url1 not in url_to_code or url2 not in url_to_code:
continue
if label == '0':
label = 0
else:
label = 1
data.append((url1,url2,label,tokenizer, args,cache,url_to_code))
if 'valid' in postfix:
data = random.sample(data,int(len(data)*0.1))
self.examples = pool.map(get_example,tqdm(data,total=len(data)))
if 'train' in postfix:
for idx, example in enumerate(self.examples[:3]):
logger.info("*** Example ***")
logger.info("idx: {}".format(idx))
logger.info("label: {}".format(example.label))
logger.info("input_tokens: {}".format([x.replace('\u0120','_') for x in example.input_tokens]))
logger.info("input_ids: {}".format(' '.join(map(str, example.input_ids))))
def __len__(self):
return len(self.examples)
def __getitem__(self, item):
return torch.tensor(self.examples[item].input_ids),torch.tensor(self.examples[item].label)
def set_seed(seed=42):
random.seed(seed)
os.environ['PYHTONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def train(args, train_dataset, model, tokenizer,pool):
""" Train the model """
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler,
batch_size=args.train_batch_size,num_workers=4,pin_memory=True)
args.max_steps = args.num_train_epochs * len( train_dataloader)
args.save_steps = args.max_steps // 10
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.max_steps*0.1,
num_training_steps=args.max_steps)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.train_batch_size // args.n_gpu )
logger.info(" Total train batch size = %d", args.train_batch_size)
logger.info(" Total optimization steps = %d", args.max_steps)
losses, best_f1 = [], 0
model.zero_grad()
for idx in range(args.num_train_epochs):
for step, batch in enumerate(train_dataloader):
inputs = batch[0].to(args.device)
labels = batch[1].to(args.device)
model.train()
loss,logits = model(inputs,labels)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
losses.append(loss.item())
if (step+1)% 100==0:
logger.info("epoch {} step {} loss {}".format(idx,step+1,round(np.mean(losses[-100:]),4)))
optimizer.step()
optimizer.zero_grad()
scheduler.step()
if len(losses) % args.save_steps == 0:
results = evaluate(args, model, tokenizer,args.eval_data_file,pool)
for key, value in results.items():
logger.info(" %s = %s", key, round(value,4))
if results['eval_f1'] > best_f1:
best_f1 = results['eval_f1']
logger.info(" "+"*"*20)
logger.info(" Best f1:%s",round(best_f1,4))
logger.info(" "+"*"*20)
checkpoint_prefix = 'checkpoint-best-f1'
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model,'module') else model
output_dir = os.path.join(output_dir, '{}'.format('model.bin'))
torch.save(model_to_save.state_dict(), output_dir)
logger.info("Saving model checkpoint to %s", output_dir)
def evaluate(args, model, tokenizer, data_file, pool):
""" Evaluate the model """
eval_output_dir = args.output_dir
eval_dataset = TextDataset(tokenizer, args, data_file,pool)
if not os.path.exists(eval_output_dir):
os.makedirs(eval_output_dir)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size,num_workers=4)
# Eval!
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
logits = []
y_trues = []
for batch in eval_dataloader:
inputs = batch[0].to(args.device)
labels = batch[1].to(args.device)
with torch.no_grad():
lm_loss,cos_sim = model(inputs,labels)
eval_loss += lm_loss.mean().item()
logits.append(cos_sim.cpu().numpy())
y_trues.append(labels.cpu().numpy())
nb_eval_steps += 1
logits = np.concatenate(logits,0)
y_trues = np.concatenate(y_trues,0)
y_preds = logits>0.5
recall=recall_score(y_trues, y_preds)
precision=precision_score(y_trues, y_preds)
f1=f1_score(y_trues, y_preds)
result = {
"eval_recall": float(recall),
"eval_precision": float(precision),
"eval_f1": float(f1),
}
return result
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--train_data_file", default=None, type=str,
help="The input training data file (a jsonl file).")
parser.add_argument("--eval_data_file", default=None, type=str,
help="An optional input evaluation data file to evaluate the perplexity on (a jsonl file).")
parser.add_argument("--test_data_file", default=None, type=str,
help="An optional input test data file to evaluate the perplexity on (a jsonl file).")
parser.add_argument("--model_name_or_path", default=None, type=str,
help="The model checkpoint for weights initialization.")
parser.add_argument("--block_size", default=-1, type=int,
help="Optional input sequence length after tokenization.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_test", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--train_batch_size", default=4, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--eval_batch_size", default=4, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=1, type=int,
help="Total number of training epochs to perform.")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
pool = multiprocessing.Pool(cpu_cont)
#print arguments
args = parser.parse_args()
#set log
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',level=logging.INFO )
#set device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.n_gpu = torch.cuda.device_count()
args.device = device
logger.info("device: %s, n_gpu: %s",device, args.n_gpu)
# Set seed
set_seed(args.seed)
#build model
tokenizer = RobertaTokenizer.from_pretrained(args.model_name_or_path)
config = RobertaConfig.from_pretrained(args.model_name_or_path)
model = RobertaModel.from_pretrained(args.model_name_or_path)
model = Model(model,config,tokenizer,args)
logger.info("Training/evaluation parameters %s", args)
model.to(args.device)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Training
if args.do_train:
train_dataset = TextDataset(tokenizer, args, args.train_data_file, pool=pool)
train(args, train_dataset, model, tokenizer, pool)
# Evaluation
results = {}
if args.do_eval:
checkpoint_prefix = 'checkpoint-best-f1/model.bin'
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
model_to_load = model.module if hasattr(model, 'module') else model
model_to_load.load_state_dict(torch.load(output_dir))
result = evaluate(args, model, tokenizer, args.eval_data_file, pool = pool)
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(round(result[key]*100 if "map" in key else result[key],2)))
if args.do_test:
checkpoint_prefix = 'checkpoint-best-f1/model.bin'
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
model_to_load = model.module if hasattr(model, 'module') else model
model_to_load.load_state_dict(torch.load(output_dir))
result = evaluate(args, model, tokenizer, args.test_data_file, pool = pool)
logger.info("***** Test results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(round(result[key]*100 if "map" in key else result[key],2)))
if __name__ == "__main__":
main()
|
CodeBERT/UniXcoder/downstream-tasks/clone-detection/BCB/run.py/0
|
{
"file_path": "CodeBERT/UniXcoder/downstream-tasks/clone-detection/BCB/run.py",
"repo_id": "CodeBERT",
"token_count": 7374
}
| 217 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch.nn as nn
import torch
class Model(nn.Module):
def __init__(self, encoder):
super(Model, self).__init__()
self.encoder = encoder
def forward(self, code_inputs=None, nl_inputs=None):
if code_inputs is not None:
outputs = self.encoder(code_inputs,attention_mask=code_inputs.ne(1))[0]
outputs = (outputs*code_inputs.ne(1)[:,:,None]).sum(1)/code_inputs.ne(1).sum(-1)[:,None]
return torch.nn.functional.normalize(outputs, p=2, dim=1)
else:
outputs = self.encoder(nl_inputs,attention_mask=nl_inputs.ne(1))[0]
outputs = (outputs*nl_inputs.ne(1)[:,:,None]).sum(1)/nl_inputs.ne(1).sum(-1)[:,None]
return torch.nn.functional.normalize(outputs, p=2, dim=1)
|
CodeBERT/UniXcoder/downstream-tasks/code-search/model.py/0
|
{
"file_path": "CodeBERT/UniXcoder/downstream-tasks/code-search/model.py",
"repo_id": "CodeBERT",
"token_count": 410
}
| 218 |
# coding=utf-8
# Copyright 2020 Microsoft and the Hugging Face Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch DeBERTa-v2 model. """
import pdb
import math
from collections.abc import Sequence
import numpy as np
import torch
from torch import _softmax_backward_data, nn
from torch.nn import CrossEntropyLoss, LayerNorm
from transformers.activations import ACT2FN
from transformers.file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.modeling_outputs import (
BaseModelOutput,
MaskedLMOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import logging
from transformers.models.deberta_v2.configuration_deberta_v2 import DebertaV2Config
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "DebertaV2Config"
_TOKENIZER_FOR_DOC = "DebertaV2Tokenizer"
_CHECKPOINT_FOR_DOC = "microsoft/deberta-v2-xlarge"
DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = [
"microsoft/deberta-v2-xlarge",
"microsoft/deberta-v2-xxlarge",
"microsoft/deberta-v2-xlarge-mnli",
"microsoft/deberta-v2-xxlarge-mnli",
]
# Copied from transformers.models.deberta.modeling_deberta.ContextPooler
class ContextPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size)
self.dropout = StableDropout(config.pooler_dropout)
self.config = config
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
context_token = hidden_states[:, 0]
context_token = self.dropout(context_token)
pooled_output = self.dense(context_token)
pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output)
return pooled_output
@property
def output_dim(self):
return self.config.hidden_size
# Copied from transformers.models.deberta.modeling_deberta.XSoftmax with deberta->deberta_v2
class XSoftmax(torch.autograd.Function):
"""
Masked Softmax which is optimized for saving memory
Args:
input (:obj:`torch.tensor`): The input tensor that will apply softmax.
mask (:obj:`torch.IntTensor`): The mask matrix where 0 indicate that element will be ignored in the softmax calculation.
dim (int): The dimension that will apply softmax
Example::
>>> import torch
>>> from transformers.models.deberta_v2.modeling_deberta_v2 import XSoftmax
>>> # Make a tensor
>>> x = torch.randn([4,20,100])
>>> # Create a mask
>>> mask = (x>0).int()
>>> y = XSoftmax.apply(x, mask, dim=-1)
"""
@staticmethod
def forward(self, input, mask, dim):
self.dim = dim
rmask = ~(mask.bool())
output = input.masked_fill(rmask, float("-inf"))
output = torch.softmax(output, self.dim)
output.masked_fill_(rmask, 0)
self.save_for_backward(output)
return output
@staticmethod
def backward(self, grad_output):
(output,) = self.saved_tensors
inputGrad = _softmax_backward_data(grad_output, output, self.dim, output)
return inputGrad, None, None
# Copied from transformers.models.deberta.modeling_deberta.DropoutContext
class DropoutContext(object):
def __init__(self):
self.dropout = 0
self.mask = None
self.scale = 1
self.reuse_mask = True
# Copied from transformers.models.deberta.modeling_deberta.get_mask
def get_mask(input, local_context):
if not isinstance(local_context, DropoutContext):
dropout = local_context
mask = None
else:
dropout = local_context.dropout
dropout *= local_context.scale
mask = local_context.mask if local_context.reuse_mask else None
if dropout > 0 and mask is None:
mask = (1 - torch.empty_like(input).bernoulli_(1 - dropout)).bool()
if isinstance(local_context, DropoutContext):
if local_context.mask is None:
local_context.mask = mask
return mask, dropout
# Copied from transformers.models.deberta.modeling_deberta.XDropout
class XDropout(torch.autograd.Function):
"""Optimized dropout function to save computation and memory by using mask operation instead of multiplication."""
@staticmethod
def forward(ctx, input, local_ctx):
mask, dropout = get_mask(input, local_ctx)
ctx.scale = 1.0 / (1 - dropout)
if dropout > 0:
ctx.save_for_backward(mask)
return input.masked_fill(mask, 0) * ctx.scale
else:
return input
@staticmethod
def backward(ctx, grad_output):
if ctx.scale > 1:
(mask,) = ctx.saved_tensors
return grad_output.masked_fill(mask, 0) * ctx.scale, None
else:
return grad_output, None
# Copied from transformers.models.deberta.modeling_deberta.StableDropout
class StableDropout(nn.Module):
"""
Optimized dropout module for stabilizing the training
Args:
drop_prob (float): the dropout probabilities
"""
def __init__(self, drop_prob):
super().__init__()
self.drop_prob = drop_prob
self.count = 0
self.context_stack = None
def forward(self, x):
"""
Call the module
Args:
x (:obj:`torch.tensor`): The input tensor to apply dropout
"""
if self.training and self.drop_prob > 0:
return XDropout.apply(x, self.get_context())
return x
def clear_context(self):
self.count = 0
self.context_stack = None
def init_context(self, reuse_mask=True, scale=1):
if self.context_stack is None:
self.context_stack = []
self.count = 0
for c in self.context_stack:
c.reuse_mask = reuse_mask
c.scale = scale
def get_context(self):
if self.context_stack is not None:
if self.count >= len(self.context_stack):
self.context_stack.append(DropoutContext())
ctx = self.context_stack[self.count]
ctx.dropout = self.drop_prob
self.count += 1
return ctx
else:
return self.drop_prob
# Copied from transformers.models.deberta.modeling_deberta.DebertaSelfOutput with DebertaLayerNorm->LayerNorm
class DebertaV2SelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
self.dropout = StableDropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.deberta.modeling_deberta.DebertaAttention with Deberta->DebertaV2
class DebertaV2Attention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = DisentangledSelfAttention(config)
self.output = DebertaV2SelfOutput(config)
self.config = config
def forward(
self,
hidden_states,
attention_mask,
return_att=False,
query_states=None,
relative_pos=None,
rel_embeddings=None,
):
self_output = self.self(
hidden_states,
attention_mask,
return_att,
query_states=query_states,
relative_pos=relative_pos,
rel_embeddings=rel_embeddings,
)
if return_att:
self_output, att_matrix = self_output
if query_states is None:
query_states = hidden_states
attention_output = self.output(self_output, query_states)
if return_att:
return (attention_output, att_matrix)
else:
return attention_output
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->DebertaV2
class DebertaV2Intermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.deberta.modeling_deberta.DebertaOutput with DebertaLayerNorm->LayerNorm
class DebertaV2Output(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
self.dropout = StableDropout(config.hidden_dropout_prob)
self.config = config
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.deberta.modeling_deberta.DebertaLayer with Deberta->DebertaV2
class DebertaV2Layer(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = DebertaV2Attention(config)
self.intermediate = DebertaV2Intermediate(config)
self.output = DebertaV2Output(config)
def forward(
self,
hidden_states,
attention_mask,
return_att=False,
query_states=None,
relative_pos=None,
rel_embeddings=None,
):
attention_output = self.attention(
hidden_states,
attention_mask,
return_att=return_att,
query_states=query_states,
relative_pos=relative_pos,
rel_embeddings=rel_embeddings,
)
if return_att:
attention_output, att_matrix = attention_output
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
if return_att:
return (layer_output, att_matrix)
else:
return layer_output
class ConvLayer(nn.Module):
def __init__(self, config):
super().__init__()
kernel_size = getattr(config, "conv_kernel_size", 3)
groups = getattr(config, "conv_groups", 1)
self.conv_act = getattr(config, "conv_act", "tanh")
self.conv = nn.Conv1d(
config.hidden_size, config.hidden_size, kernel_size, padding=(kernel_size - 1) // 2, groups=groups
)
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
self.dropout = StableDropout(config.hidden_dropout_prob)
self.config = config
def forward(self, hidden_states, residual_states, input_mask):
out = self.conv(hidden_states.permute(0, 2, 1).contiguous()).permute(0, 2, 1).contiguous()
rmask = (1 - input_mask).bool()
out.masked_fill_(rmask.unsqueeze(-1).expand(out.size()), 0)
out = ACT2FN[self.conv_act](self.dropout(out))
layer_norm_input = residual_states + out
output = self.LayerNorm(layer_norm_input).to(layer_norm_input)
if input_mask is None:
output_states = output
else:
if input_mask.dim() != layer_norm_input.dim():
if input_mask.dim() == 4:
input_mask = input_mask.squeeze(1).squeeze(1)
input_mask = input_mask.unsqueeze(2)
input_mask = input_mask.to(output.dtype)
output_states = output * input_mask
return output_states
class DebertaV2Encoder(nn.Module):
"""Modified BertEncoder with relative position bias support"""
def __init__(self, config):
super().__init__()
self.layer = nn.ModuleList([DebertaV2Layer(config) for _ in range(config.num_hidden_layers)])
self.relative_attention = getattr(config, "relative_attention", False)
if self.relative_attention:
self.max_relative_positions = getattr(config, "max_relative_positions", -1)
if self.max_relative_positions < 1:
self.max_relative_positions = config.max_position_embeddings
self.position_buckets = getattr(config, "position_buckets", -1)
pos_ebd_size = self.max_relative_positions * 2
if self.position_buckets > 0:
pos_ebd_size = self.position_buckets * 2
self.rel_embeddings = nn.Embedding(pos_ebd_size, config.hidden_size)
self.norm_rel_ebd = [x.strip() for x in getattr(config, "norm_rel_ebd", "none").lower().split("|")]
if "layer_norm" in self.norm_rel_ebd:
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=True)
self.conv = ConvLayer(config) if getattr(config, "conv_kernel_size", 0) > 0 else None
def get_rel_embedding(self):
rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None
if rel_embeddings is not None and ("layer_norm" in self.norm_rel_ebd):
rel_embeddings = self.LayerNorm(rel_embeddings)
return rel_embeddings
def get_attention_mask(self, attention_mask):
if attention_mask.dim() <= 2:
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1)
attention_mask = attention_mask.byte()
elif attention_mask.dim() == 3:
attention_mask = attention_mask.unsqueeze(1)
return attention_mask
def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
if self.relative_attention and relative_pos is None:
q = query_states.size(-2) if query_states is not None else hidden_states.size(-2)
relative_pos = build_relative_position(
q, hidden_states.size(-2), bucket_size=self.position_buckets, max_position=self.max_relative_positions
)
return relative_pos
def forward(
self,
hidden_states,
attention_mask,
output_hidden_states=True,
output_attentions=False,
query_states=None,
relative_pos=None,
return_dict=True,
):
if attention_mask.dim() <= 2:
input_mask = attention_mask
else:
input_mask = (attention_mask.sum(-2) > 0).byte()
attention_mask = self.get_attention_mask(attention_mask)
relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
if isinstance(hidden_states, Sequence):
next_kv = hidden_states[0]
else:
next_kv = hidden_states
rel_embeddings = self.get_rel_embedding()
output_states = next_kv
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (output_states,)
output_states = layer_module(
next_kv,
attention_mask,
output_attentions,
query_states=query_states,
relative_pos=relative_pos,
rel_embeddings=rel_embeddings,
)
if output_attentions:
output_states, att_m = output_states
if i == 0 and self.conv is not None:
output_states = self.conv(hidden_states, output_states, input_mask)
if query_states is not None:
query_states = output_states
if isinstance(hidden_states, Sequence):
next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None
else:
next_kv = output_states
if output_attentions:
all_attentions = all_attentions + (att_m,)
if output_hidden_states:
all_hidden_states = all_hidden_states + (output_states,)
if not return_dict:
return tuple(v for v in [output_states, all_hidden_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=output_states, hidden_states=all_hidden_states, attentions=all_attentions
)
def make_log_bucket_position(relative_pos, bucket_size, max_position):
sign = np.sign(relative_pos)
mid = bucket_size // 2
abs_pos = np.where((relative_pos < mid) & (relative_pos > -mid), mid - 1, np.abs(relative_pos))
log_pos = np.ceil(np.log(abs_pos / mid) / np.log((max_position - 1) / mid) * (mid - 1)) + mid
bucket_pos = np.where(abs_pos <= mid, relative_pos, log_pos * sign).astype(np.int)
return bucket_pos
def build_relative_position(query_size, key_size, bucket_size=-1, max_position=-1):
"""
Build relative position according to the query and key
We assume the absolute position of query :math:`P_q` is range from (0, query_size) and the absolute position of key
:math:`P_k` is range from (0, key_size), The relative positions from query to key is :math:`R_{q \\rightarrow k} =
P_q - P_k`
Args:
query_size (int): the length of query
key_size (int): the length of key
bucket_size (int): the size of position bucket
max_position (int): the maximum allowed absolute position
Return:
:obj:`torch.LongTensor`: A tensor with shape [1, query_size, key_size]
"""
q_ids = np.arange(0, query_size)
k_ids = np.arange(0, key_size)
rel_pos_ids = q_ids[:, None] - np.tile(k_ids, (q_ids.shape[0], 1))
if bucket_size > 0 and max_position > 0:
rel_pos_ids = make_log_bucket_position(rel_pos_ids, bucket_size, max_position)
rel_pos_ids = torch.tensor(rel_pos_ids, dtype=torch.long)
rel_pos_ids = rel_pos_ids[:query_size, :]
rel_pos_ids = rel_pos_ids.unsqueeze(0)
return rel_pos_ids
@torch.jit.script
# Copied from transformers.models.deberta.modeling_deberta.c2p_dynamic_expand
def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):
return c2p_pos.expand([query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)])
@torch.jit.script
# Copied from transformers.models.deberta.modeling_deberta.p2c_dynamic_expand
def p2c_dynamic_expand(c2p_pos, query_layer, key_layer):
return c2p_pos.expand([query_layer.size(0), query_layer.size(1), key_layer.size(-2), key_layer.size(-2)])
@torch.jit.script
# Copied from transformers.models.deberta.modeling_deberta.pos_dynamic_expand
def pos_dynamic_expand(pos_index, p2c_att, key_layer):
return pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2)))
class DisentangledSelfAttention(nn.Module):
"""
Disentangled self-attention module
Parameters:
config (:obj:`DebertaV2Config`):
A model config class instance with the configuration to build a new model. The schema is similar to
`BertConfig`, for more details, please refer :class:`~transformers.DebertaV2Config`
"""
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
_attention_head_size = config.hidden_size // config.num_attention_heads
self.attention_head_size = getattr(config, "attention_head_size", _attention_head_size)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
self.key_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
self.value_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
self.share_att_key = getattr(config, "share_att_key", False)
self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []
self.relative_attention = getattr(config, "relative_attention", False)
if self.relative_attention:
self.position_buckets = getattr(config, "position_buckets", -1)
self.max_relative_positions = getattr(config, "max_relative_positions", -1)
if self.max_relative_positions < 1:
self.max_relative_positions = config.max_position_embeddings
self.pos_ebd_size = self.max_relative_positions
if self.position_buckets > 0:
self.pos_ebd_size = self.position_buckets
self.pos_dropout = StableDropout(config.hidden_dropout_prob)
if not self.share_att_key:
if "c2p" in self.pos_att_type or "p2p" in self.pos_att_type:
self.pos_key_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
if "p2c" in self.pos_att_type or "p2p" in self.pos_att_type:
self.pos_query_proj = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = StableDropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x, attention_heads):
new_x_shape = x.size()[:-1] + (attention_heads, -1)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3).contiguous().view(-1, x.size(1), x.size(-1))
def forward(
self,
hidden_states,
attention_mask,
return_att=False,
query_states=None,
relative_pos=None,
rel_embeddings=None,
):
"""
Call the module
Args:
hidden_states (:obj:`torch.FloatTensor`):
Input states to the module usually the output from previous layer, it will be the Q,K and V in
`Attention(Q,K,V)`
attention_mask (:obj:`torch.ByteTensor`):
An attention mask matrix of shape [`B`, `N`, `N`] where `B` is the batch size, `N` is the maximum
sequence length in which element [i,j] = `1` means the `i` th token in the input can attend to the `j`
th token.
return_att (:obj:`bool`, optional):
Whether return the attention matrix.
query_states (:obj:`torch.FloatTensor`, optional):
The `Q` state in `Attention(Q,K,V)`.
relative_pos (:obj:`torch.LongTensor`):
The relative position encoding between the tokens in the sequence. It's of shape [`B`, `N`, `N`] with
values ranging in [`-max_relative_positions`, `max_relative_positions`].
rel_embeddings (:obj:`torch.FloatTensor`):
The embedding of relative distances. It's a tensor of shape [:math:`2 \\times
\\text{max_relative_positions}`, `hidden_size`].
"""
if query_states is None:
query_states = hidden_states
query_layer = self.transpose_for_scores(self.query_proj(query_states), self.num_attention_heads)
key_layer = self.transpose_for_scores(self.key_proj(hidden_states), self.num_attention_heads)
value_layer = self.transpose_for_scores(self.value_proj(hidden_states), self.num_attention_heads)
rel_att = None
# Take the dot product between "query" and "key" to get the raw attention scores.
scale_factor = 1
if "c2p" in self.pos_att_type:
scale_factor += 1
if "p2c" in self.pos_att_type:
scale_factor += 1
if "p2p" in self.pos_att_type:
scale_factor += 1
scale = math.sqrt(query_layer.size(-1) * scale_factor)
attention_scores = torch.bmm(query_layer, key_layer.transpose(-1, -2)) / scale
if self.relative_attention:
rel_embeddings = self.pos_dropout(rel_embeddings)
rel_att = self.disentangled_attention_bias(
query_layer, key_layer, relative_pos, rel_embeddings, scale_factor
)
if rel_att is not None:
attention_scores = attention_scores + rel_att
attention_scores = attention_scores
attention_scores = attention_scores.view(
-1, self.num_attention_heads, attention_scores.size(-2), attention_scores.size(-1)
)
# bsz x height x length x dimension
attention_probs = XSoftmax.apply(attention_scores, attention_mask, -1)
attention_probs = self.dropout(attention_probs)
context_layer = torch.bmm(
attention_probs.view(-1, attention_probs.size(-2), attention_probs.size(-1)), value_layer
)
context_layer = (
context_layer.view(-1, self.num_attention_heads, context_layer.size(-2), context_layer.size(-1))
.permute(0, 2, 1, 3)
.contiguous()
)
new_context_layer_shape = context_layer.size()[:-2] + (-1,)
context_layer = context_layer.view(*new_context_layer_shape)
if return_att:
return (context_layer, attention_probs)
else:
return context_layer
def disentangled_attention_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):
if relative_pos is None:
q = query_layer.size(-2)
relative_pos = build_relative_position(
q, key_layer.size(-2), bucket_size=self.position_buckets, max_position=self.max_relative_positions
)
if relative_pos.dim() == 2:
relative_pos = relative_pos.unsqueeze(0).unsqueeze(0)
elif relative_pos.dim() == 3:
relative_pos = relative_pos.unsqueeze(1)
# bsz x height x query x key
elif relative_pos.dim() != 4:
raise ValueError(f"Relative position ids must be of dim 2 or 3 or 4. {relative_pos.dim()}")
att_span = self.pos_ebd_size
relative_pos = relative_pos.long().to(query_layer.device)
rel_embeddings = rel_embeddings[self.pos_ebd_size - att_span : self.pos_ebd_size + att_span, :].unsqueeze(0)
if self.share_att_key:
pos_query_layer = self.transpose_for_scores(
self.query_proj(rel_embeddings), self.num_attention_heads
).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1)
pos_key_layer = self.transpose_for_scores(self.key_proj(rel_embeddings), self.num_attention_heads).repeat(
query_layer.size(0) // self.num_attention_heads, 1, 1
)
else:
if "c2p" in self.pos_att_type or "p2p" in self.pos_att_type:
pos_key_layer = self.transpose_for_scores(
self.pos_key_proj(rel_embeddings), self.num_attention_heads
).repeat(
query_layer.size(0) // self.num_attention_heads, 1, 1
) # .split(self.all_head_size, dim=-1)
if "p2c" in self.pos_att_type or "p2p" in self.pos_att_type:
pos_query_layer = self.transpose_for_scores(
self.pos_query_proj(rel_embeddings), self.num_attention_heads
).repeat(
query_layer.size(0) // self.num_attention_heads, 1, 1
) # .split(self.all_head_size, dim=-1)
score = 0
# content->position
if "c2p" in self.pos_att_type:
scale = math.sqrt(pos_key_layer.size(-1) * scale_factor)
c2p_att = torch.bmm(query_layer, pos_key_layer.transpose(-1, -2))
c2p_pos = torch.clamp(relative_pos + att_span, 0, att_span * 2 - 1)
c2p_att = torch.gather(
c2p_att,
dim=-1,
index=c2p_pos.squeeze(0).expand([query_layer.size(0), query_layer.size(1), relative_pos.size(-1)]),
)
score += c2p_att / scale
# position->content
if "p2c" in self.pos_att_type or "p2p" in self.pos_att_type:
scale = math.sqrt(pos_query_layer.size(-1) * scale_factor)
if key_layer.size(-2) != query_layer.size(-2):
r_pos = build_relative_position(
key_layer.size(-2),
key_layer.size(-2),
bucket_size=self.position_buckets,
max_position=self.max_relative_positions,
).to(query_layer.device)
r_pos = r_pos.unsqueeze(0)
else:
r_pos = relative_pos
p2c_pos = torch.clamp(-r_pos + att_span, 0, att_span * 2 - 1)
if query_layer.size(-2) != key_layer.size(-2):
pos_index = relative_pos[:, :, :, 0].unsqueeze(-1)
if "p2c" in self.pos_att_type:
p2c_att = torch.bmm(key_layer, pos_query_layer.transpose(-1, -2))
p2c_att = torch.gather(
p2c_att,
dim=-1,
index=p2c_pos.squeeze(0).expand([query_layer.size(0), key_layer.size(-2), key_layer.size(-2)]),
).transpose(-1, -2)
if query_layer.size(-2) != key_layer.size(-2):
p2c_att = torch.gather(
p2c_att,
dim=-2,
index=pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2))),
)
score += p2c_att / scale
# position->position
if "p2p" in self.pos_att_type:
pos_query = pos_query_layer[:, :, att_span:, :]
p2p_att = torch.matmul(pos_query, pos_key_layer.transpose(-1, -2))
p2p_att = p2p_att.expand(query_layer.size()[:2] + p2p_att.size()[2:])
if query_layer.size(-2) != key_layer.size(-2):
p2p_att = torch.gather(
p2p_att,
dim=-2,
index=pos_index.expand(query_layer.size()[:2] + (pos_index.size(-2), p2p_att.size(-1))),
)
p2p_att = torch.gather(
p2p_att,
dim=-1,
index=c2p_pos.expand(
[query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)]
),
)
score += p2p_att
return score
# Copied from transformers.models.deberta.modeling_deberta.DebertaEmbeddings with DebertaLayerNorm->LayerNorm
class DebertaV2Embeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
pad_token_id = getattr(config, "pad_token_id", 0)
self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
self.word_embeddings = nn.Embedding(config.vocab_size, self.embedding_size, padding_idx=pad_token_id)
self.position_biased_input = getattr(config, "position_biased_input", True)
if not self.position_biased_input:
self.position_embeddings = None
else:
self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.embedding_size)
if config.type_vocab_size > 0:
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, self.embedding_size)
if self.embedding_size != config.hidden_size:
self.embed_proj = nn.Linear(self.embedding_size, config.hidden_size, bias=False)
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
self.dropout = StableDropout(config.hidden_dropout_prob)
self.config = config
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, mask=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if self.position_embeddings is not None:
position_embeddings = self.position_embeddings(position_ids.long())
else:
position_embeddings = torch.zeros_like(inputs_embeds)
embeddings = inputs_embeds
if self.position_biased_input:
embeddings += position_embeddings
if self.config.type_vocab_size > 0:
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings += token_type_embeddings
if self.embedding_size != self.config.hidden_size:
embeddings = self.embed_proj(embeddings)
embeddings = self.LayerNorm(embeddings)
if mask is not None:
if mask.dim() != embeddings.dim():
if mask.dim() == 4:
mask = mask.squeeze(1).squeeze(1)
mask = mask.unsqueeze(2)
mask = mask.to(embeddings.dtype)
embeddings = embeddings * mask
embeddings = self.dropout(embeddings)
return embeddings
# Copied from transformers.models.deberta.modeling_deberta.DebertaPreTrainedModel with Deberta->DebertaV2
class DebertaV2PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = DebertaV2Config
base_model_prefix = "deberta"
_keys_to_ignore_on_load_missing = ["position_ids"]
_keys_to_ignore_on_load_unexpected = ["position_embeddings"]
def __init__(self, config):
super().__init__(config)
self._register_load_state_dict_pre_hook(self._pre_load_hook)
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _pre_load_hook(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
"""
Removes the classifier if it doesn't have the correct number of labels.
"""
self_state = self.state_dict()
if (
("classifier.weight" in self_state)
and ("classifier.weight" in state_dict)
and self_state["classifier.weight"].size() != state_dict["classifier.weight"].size()
):
logger.warning(
f"The checkpoint classifier head has a shape {state_dict['classifier.weight'].size()} and this model "
f"classifier head has a shape {self_state['classifier.weight'].size()}. Ignoring the checkpoint "
f"weights. You should train your model on new data."
)
del state_dict["classifier.weight"]
if "classifier.bias" in state_dict:
del state_dict["classifier.bias"]
DEBERTA_START_DOCSTRING = r"""
The DeBERTa model was proposed in `DeBERTa: Decoding-enhanced BERT with Disentangled Attention
<https://arxiv.org/abs/2006.03654>`_ by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build on top of
BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two
improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.```
Parameters:
config (:class:`~transformers.DebertaV2Config`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
DEBERTA_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.DebertaV2Tokenizer`. See
:func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`{0}`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.",
DEBERTA_START_DOCSTRING,
)
# Copied from transformers.models.deberta.modeling_deberta.DebertaModel with Deberta->DebertaV2
class DebertaV2Model(DebertaV2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embeddings = DebertaV2Embeddings(config)
self.encoder = DebertaV2Encoder(config)
self.z_steps = 0
self.config = config
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, new_embeddings):
self.embeddings.word_embeddings = new_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
raise NotImplementedError("The prune function is not implemented in DeBERTa model.")
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
embedding_output = self.embeddings(
input_ids=input_ids,
token_type_ids=token_type_ids,
position_ids=position_ids,
mask=attention_mask,
inputs_embeds=inputs_embeds,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask,
output_hidden_states=True,
output_attentions=output_attentions,
return_dict=return_dict,
)
encoded_layers = encoder_outputs[1]
if self.z_steps > 1:
hidden_states = encoded_layers[-2]
layers = [self.encoder.layer[-1] for _ in range(self.z_steps)]
query_states = encoded_layers[-1]
rel_embeddings = self.encoder.get_rel_embedding()
attention_mask = self.encoder.get_attention_mask(attention_mask)
rel_pos = self.encoder.get_rel_pos(embedding_output)
for layer in layers[1:]:
query_states = layer(
hidden_states,
attention_mask,
return_att=False,
query_states=query_states,
relative_pos=rel_pos,
rel_embeddings=rel_embeddings,
)
encoded_layers.append(query_states)
sequence_output = encoded_layers[-1]
if not return_dict:
return (sequence_output,) + encoder_outputs[(1 if output_hidden_states else 2) :]
return BaseModelOutput(
last_hidden_state=sequence_output,
hidden_states=encoder_outputs.hidden_states if output_hidden_states else None,
attentions=encoder_outputs.attentions,
)
@add_start_docstrings("""DeBERTa Model with a `language modeling` head on top. """, DEBERTA_START_DOCSTRING)
# Copied from transformers.models.deberta.modeling_deberta.DebertaForMaskedLM with Deberta->DebertaV2
class DebertaV2ForMaskedLM(DebertaV2PreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
self.deberta = DebertaV2Model(config)
self.cls = DebertaV2OnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.deberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# copied from transformers.models.bert.BertPredictionHeadTransform with bert -> deberta
class DebertaV2PredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
# copied from transformers.models.bert.BertLMPredictionHead with bert -> deberta
class DebertaV2LMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = DebertaV2PredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
# copied from transformers.models.bert.BertOnlyMLMHead with bert -> deberta
class DebertaV2OnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = DebertaV2LMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
@add_start_docstrings(
"""
DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
DEBERTA_START_DOCSTRING,
)
# Copied from transformers.models.deberta.modeling_deberta.DebertaForSequenceClassification with Deberta->DebertaV2
class DebertaV2ForSequenceClassification(DebertaV2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
num_labels = getattr(config, "num_labels", 2)
self.num_labels = num_labels
self.deberta = DebertaV2Model(config)
self.pooler = ContextPooler(config)
output_dim = self.pooler.output_dim
self.classifier = nn.Linear(output_dim, num_labels)
drop_out = getattr(config, "cls_dropout", None)
drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
self.dropout = StableDropout(drop_out)
self.init_weights()
def get_input_embeddings(self):
return self.deberta.get_input_embeddings()
def set_input_embeddings(self, new_embeddings):
self.deberta.set_input_embeddings(new_embeddings)
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.deberta(
input_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
encoder_layer = outputs[0]
pooled_output = self.pooler(encoder_layer)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.num_labels == 1:
# regression task
loss_fn = nn.MSELoss()
logits = logits.view(-1).to(labels.dtype)
loss = loss_fn(logits, labels.view(-1))
elif labels.dim() == 1 or labels.size(-1) == 1:
label_index = (labels >= 0).nonzero()
labels = labels.long()
if label_index.size(0) > 0:
labeled_logits = torch.gather(logits, 0, label_index.expand(label_index.size(0), logits.size(1)))
labels = torch.gather(labels, 0, label_index.view(-1))
loss_fct = CrossEntropyLoss()
loss = loss_fct(labeled_logits.view(-1, self.num_labels).float(), labels.view(-1))
else:
loss = torch.tensor(0).to(logits)
else:
log_softmax = nn.LogSoftmax(-1)
loss = -((log_softmax(logits) * labels).sum(-1)).mean()
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
else:
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
DEBERTA_START_DOCSTRING,
)
# Copied from transformers.models.deberta.modeling_deberta.DebertaForTokenClassification with Deberta->DebertaV2
class DebertaV2ForTokenClassification(DebertaV2PreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.deberta = DebertaV2Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.deberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
# pdb.set_trace()
# code change begin
loss_weights = torch.zeros(len(self.config.label2id)).to(self.device)
loss_weights[self.config.label2id["SOLUTION-CORRECT"]] = self.config.task_specific_params["solution_correct_loss_weight"]
loss_weights[self.config.label2id["SOLUTION-INCORRECT"]] = self.config.task_specific_params["solution_incorrect_loss_weight"]
loss_weights[self.config.label2id["STEP-CORRECT"]] = self.config.task_specific_params["step_correct_loss_weight"]
loss_weights[self.config.label2id["STEP-INCORRECT"]] = self.config.task_specific_params["step_incorrect_loss_weight"]
loss_weights[self.config.label2id["O"]] = self.config.task_specific_params["other_label_loss_weight"]
# code change end
loss_fct = CrossEntropyLoss(weight=loss_weights)
# pdb.set_trace()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
DEBERTA_START_DOCSTRING,
)
# Copied from transformers.models.deberta.modeling_deberta.DebertaForQuestionAnswering with Deberta->DebertaV2
class DebertaV2ForQuestionAnswering(DebertaV2PreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.deberta = DebertaV2Model(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.deberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[1:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
|
CodeT/DIVERSE/code/src/deberta_model.py/0
|
{
"file_path": "CodeT/DIVERSE/code/src/deberta_model.py",
"repo_id": "CodeT",
"token_count": 28029
}
| 219 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: config.sample.py
Description: unittest configuration for Python SDK of the Cognitive Face API.
- Copy `config.sample.py` to `config.py`.
- Change the `BASE_URL` if necessary.
- Assign the `KEY` with a valid Subscription Key.
"""
# Subscription Key for calling the Cognitive Face API.
KEY = ''
# Base URL for calling the Cognitive Face API.
# default is 'https://westus.api.cognitive.microsoft.com/face/v1.0/'
BASE_URL = ''
# Time (in seconds) for sleep between each call to avoid exceeding quota.
# Default to 3 as free subscription have limit of 20 calls per minute.
TIME_SLEEP = 3
|
Cognitive-Face-Python/cognitive_face/tests/config.sample.py/0
|
{
"file_path": "Cognitive-Face-Python/cognitive_face/tests/config.sample.py",
"repo_id": "Cognitive-Face-Python",
"token_count": 201
}
| 220 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: util.py
Description: util module for Python SDK sample.
"""
from threading import Thread
import io
import operator
import os.path
from PIL import Image
import wx
try:
import cognitive_face as CF
except ImportError:
import sys
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, ROOT_DIR)
import cognitive_face as CF
IMAGE_WILDCARD = 'Image files (*.jpg, *.png)|*.jpg; *.png'
INNER_PANEL_WIDTH = 710
MAX_IMAGE_SIZE = 300
MAX_THUMBNAIL_SIZE = 75
STYLE = wx.SIMPLE_BORDER
SUBSCRIPTION_KEY_FILENAME = 'Subscription.txt'
ENDPOINT_FILENAME = 'Endpoint.txt'
ORIENTATION_TAG = 274
LOG_FACE_LIST_REQUEST = (
'Request: Face List {} will be used for build person database. '
'Checking whether group exists.')
LOG_FACE_LIST_NOT_EXIST = 'Response: Face List {} does not exist before.'
LOG_FACE_LIST_EXIST = 'Response: Face List {} exists.'
LABEL_FACE = ('{}, {} years old\n'
'Hair: {}, Facial Hair: {}\n'
'Makeup: {}, Emotion: {}\n'
'Occluded: {}, Exposure: {}\n'
'{}\n{}\n')
class SubscriptionKey(object):
"""Subscription Key."""
@classmethod
def get(cls):
"""Get the subscription key."""
if not hasattr(cls, 'key'):
cls.key = ''
if not cls.key:
if os.path.isfile(SUBSCRIPTION_KEY_FILENAME):
with io.open(SUBSCRIPTION_KEY_FILENAME, encoding='utf-8') as fin:
cls.key = fin.read().strip()
else:
cls.key = ''
CF.Key.set(cls.key)
return cls.key
@classmethod
def set(cls, key):
"""Set the subscription key."""
cls.key = key
with io.open(SUBSCRIPTION_KEY_FILENAME, 'w', encoding='utf-8') as fout:
fout.write(key)
CF.Key.set(cls.key)
@classmethod
def delete(cls):
"""Delete the subscription key."""
cls.key = ''
if os.path.isfile(SUBSCRIPTION_KEY_FILENAME):
os.remove(SUBSCRIPTION_KEY_FILENAME)
CF.Key.set(cls.key)
class Endpoint(object):
"""Endpoint."""
@classmethod
def get(cls):
"""Get the endpoint."""
if not hasattr(cls, 'endpoint'):
cls.endpoint = ''
if not cls.endpoint:
if os.path.isfile(ENDPOINT_FILENAME):
with io.open(ENDPOINT_FILENAME, encoding='utf-8') as fin:
cls.endpoint = fin.read().strip()
else:
cls.endpoint = CF.BaseUrl.get()
CF.BaseUrl.set(cls.endpoint)
return cls.endpoint
@classmethod
def set(cls, endpoint):
"""Set the endpoint."""
cls.endpoint = endpoint
with io.open(ENDPOINT_FILENAME, 'w', encoding='utf-8') as fout:
fout.write(endpoint)
CF.BaseUrl.set(cls.endpoint)
@classmethod
def delete(cls):
"""Delete the endpoint."""
cls.endpoint = ''
if os.path.isfile(ENDPOINT_FILENAME):
os.remove(ENDPOINT_FILENAME)
CF.BaseUrl.set(CF.util.DEFAULT_BASE_URL)
def scale_image(img, size=MAX_IMAGE_SIZE):
"""Scale the wx.Image."""
width = img.GetWidth()
height = img.GetHeight()
if width > height:
new_width = size
new_height = size * height / width
else:
new_height = size
new_width = size * width / height
img = img.Scale(new_width, new_height)
return img
def rotate_image(path):
"""Rotate the image from path and return wx.Image."""
img = Image.open(path)
try:
exif = img._getexif()
if exif[ORIENTATION_TAG] == 3:
img = img.rotate(180, expand=True)
elif exif[ORIENTATION_TAG] == 6:
img = img.rotate(270, expand=True)
elif exif[ORIENTATION_TAG] == 8:
img = img.rotate(90, expand=True)
except:
pass
return pil_image_to_wx_image(img)
def draw_bitmap_rectangle(bitmap, faces):
"""Draw rectangle on bitmap."""
dc = wx.MemoryDC(bitmap.bmp)
dc.SetPen(wx.BLUE_PEN)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.SetTextBackground('black')
dc.SetTextForeground('white')
dc.SetBackgroundMode(wx.SOLID)
dc.SetFont(
wx.Font(8, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL,
wx.FONTWEIGHT_BOLD))
for face in faces:
dc.DrawRectangle(
face.rect.left * bitmap.scale, face.rect.top * bitmap.scale,
face.rect.width * bitmap.scale, face.rect.height * bitmap.scale)
if face.name:
text_width, text_height = dc.GetTextExtent(face.name)
dc.DrawText(face.name, face.rect.left * bitmap.scale,
face.rect.top * bitmap.scale - text_height)
dc.SelectObject(wx.NullBitmap)
bitmap.bitmap.SetBitmap(bitmap.bmp)
def pil_image_to_wx_image(pil_image):
"""Convert from PIL image to wx image."""
wx_image = wx.Image(pil_image.width, pil_image.height)
wx_image.SetData(pil_image.convert("RGB").tobytes())
return wx_image
def key_with_max_value(item):
"""Get the key with maximum value in a dict."""
return max(item.items(), key=operator.itemgetter(1))[0]
def async(func):
"""Async wrapper."""
def wrapper(*args, **kwargs):
"""Async wrapper."""
thr = Thread(target=func, args=args, kwargs=kwargs)
thr.start()
return wrapper
|
Cognitive-Face-Python/sample/util.py/0
|
{
"file_path": "Cognitive-Face-Python/sample/util.py",
"repo_id": "Cognitive-Face-Python",
"token_count": 2584
}
| 221 |
export CUDA_VISIBLE_DEVICES=0
python t5_run_train.py \
--model_name_or_path ./checkpoint/Com/MainExp_pretrain_set1_seed1/checkpoint-100000 \
--subtask Com \
--method MainExp \
--train_file finetune \
--max_steps 50000 \
--save_steps 50000 \
--batch_size 8 \
--ebatch_size 16 \
--gas 1 \
--seed 1 \
--set set1
|
ContextualSP/abstraction_probing/code/t5_code/Com_MainExp_finetune.sh/0
|
{
"file_path": "ContextualSP/abstraction_probing/code/t5_code/Com_MainExp_finetune.sh",
"repo_id": "ContextualSP",
"token_count": 123
}
| 222 |
import subprocess
import argparse
import os
def run_command(bash_command):
process = subprocess.Popen(bash_command.split())
output, error = process.communicate()
print(error)
print(output)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_name_or_path", type=str, default="", help="model_name_or_path")
parser.add_argument("--output_dir", type=str, default="./checkpoint/", help="output dir")
parser.add_argument("--validation_file", type=str, default='test', help="validation file")
parser.add_argument("--ebatch_size", type=int, default=16, help="eval batch_size")
parser.add_argument("--device_num", type=int, default=1, help="device_num")
parser.add_argument("--seed", type=int, default=1, help="seed")
parser.add_argument("--checkpoint_step", type=int, default=None, help="checkpoint_step")
# parser.add_argument("--log_metrics", type=str, default='False', help="log_metrics")
# parser.add_argument("--log_label", type=str, default='False', help="log_label")
# parser.add_argument("--log_metrics_only", type=str, default='False', help="log_metrics_only")
parser.add_argument("--num_beams", type=int, default=5, help="num_beams")
parser.add_argument("--subtask", type=str, default='Com', help="subtask")
parser.add_argument("--set", type=str, default='set1', help="subtask")
# parser.add_argument("--with_constraint", type=str, default='True', help="with_constraint")
args = parser.parse_args()
print("START training")
run_command("printenv")
validation_file = '../../data/' + args.subtask + '/' + args.set + '/' + args.validation_file + '.json'
# .../data/Com/set1/test.json
cmd = f"""
python -m torch.distributed.launch --nproc_per_node {args.device_num} --master_port=12345 t5_eval_model.py \
--model_name_or_path {args.model_name_or_path} \
--output_dir {args.output_dir} \
--do_eval \
--validation_file {validation_file} \
--per_device_train_batch_size 8 \
--per_device_eval_batch_size {args.ebatch_size} \
--overwrite_output_dir \
--gradient_accumulation_steps 1 \
--max_steps 100000 \
--logging_steps 10 \
--learning_rate 1e-5 \
--save_steps 100000 \
--eval_steps 100000 \
--evaluation_strategy steps \
--freeze_model_parameter \
--weight_decay 1e-2 \
--label_smoothing_factor 0.1 \
--lr_scheduler_type constant \
--fp16 False \
--predict_with_generate \
--dev_split -1 \
--num_beams {args.num_beams} \
--seed 1 \
--adafactor False \
--max_source_length 1024 \
--max_target_length 1024 \
--log_metrics True \
--log_label True \
--eval_type test \
--log_metrics_only False \
--with_constraint True
"""
print("RUN {}".format(cmd))
run_command(cmd)
|
ContextualSP/abstraction_probing/code/t5_code/t5_run_eval.py/0
|
{
"file_path": "ContextualSP/abstraction_probing/code/t5_code/t5_run_eval.py",
"repo_id": "ContextualSP",
"token_count": 1231
}
| 223 |
description: Adapter Differentiation for MT-NLU Job on AMLK8s
target:
service: amlk8s
# run "amlt target list amlk8s" to list the names of available AMLK8s targets
name: itpeusp100cl
vc: resrchvc
environment:
image: python:3.6
registry: docker.io # any public registry can be specified here
setup:
- pip install -r requirements.txt
code:
# local directory of the code. this will be uploaded to the server.
# $CONFIG_DIR is expanded to the directory of this config file
local_dir: $CONFIG_DIR
# data:
# data upload is not required for this example
# list of jobs to run, we run 1 jobs in this example
jobs:
- name: mtnlu-diff
sku: G1
command:
- sudo apt-get install git -y
- sudo git clone https://github.com/WowCZ/adapter-transformers.git
- cd adapter-transformers
- pip install .
- cd ..
- sh scripts/adapter_diff_train.sh -tr mnli,cola,qnli,qqp,rte,sst,stsb,wnli -te mnli_matched,cola,qnli,qqp,rte,sst,stsb,wnli -ls 1000 -ss 2000 > /mnt/chenzhi/LOGS/mtnlu_diff_mid0_ite90.log
|
ContextualSP/adaptershare/adapter_diff_train.yaml/0
|
{
"file_path": "ContextualSP/adaptershare/adapter_diff_train.yaml",
"repo_id": "ContextualSP",
"token_count": 380
}
| 224 |
#!/usr/bin/env bash
##############################################################
# This script is used to download resources for MT-DNN experiments
##############################################################
BERT_DIR=$(pwd)/mt_dnn_models
if [ ! -d ${BERT_DIR} ]; then
echo "Create a folder BERT_DIR"
mkdir ${BERT_DIR}
fi
## Download MT-DNN models
wget https://mrc.blob.core.windows.net/mt-dnn-model/mt_dnn_base.pt -O "${BERT_DIR}/mt_dnn_base_uncased.pt"
wget https://mrc.blob.core.windows.net/mt-dnn-model/mt_dnn_large.pt -O "${BERT_DIR}/mt_dnn_large_uncased.pt"
## MT-DNN-KD
wget https://mrc.blob.core.windows.net/mt-dnn-model/mt_dnn_kd_large_cased.pt -O "${BERT_DIR}/mt_dnn_kd_large_cased.pt"
if [ "$1" == "model_only" ]; then
exit 1
fi
DATA_DIR=$(pwd)/data
if [ ! -d ${DATA_DIR} ]; then
echo "Create a folder $DATA_DIR"
mkdir ${DATA_DIR}
fi
## DOWNLOAD GLUE DATA
## Please refer glue-baseline install requirments or other issues.
git clone https://github.com/nyu-mll/jiant-v1-legacy.git
cd jiant-v1-legacy
python scripts/download_glue_data.py --data_dir $DATA_DIR --tasks all
cd ..
rm -rf jiant-v1-legacy
#########################
## DOWNLOAD SciTail
cd $DATA_DIR
wget http://data.allenai.org.s3.amazonaws.com/downloads/SciTailV1.1.zip
unzip SciTailV1.1.zip
mv SciTailV1.1 SciTail
# remove zip files
rm *.zip
## Download preprocessed SciTail/SNLI data for domain adaptation
cd $DATA_DIR
DOMAIN_ADP="domain_adaptation"
echo "Create a folder $DATA_DIR"
mkdir ${DOMAIN_ADP}
wget https://mrc.blob.core.windows.net/mt-dnn-model/data.zip
unzip data.zip
mv data/* ${DOMAIN_ADP}
rm -rf data.zip
rm -rf data
## Download SQuAD & SQuAD v2.0 data
cd $DATA_DIR
mkdir "squad"
cd ..
wget https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json -O squad/train.json
wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json -O squad/dev.json
mkdir "squad_v2"
wget https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json -O squad_v2/train.json
wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json -O squad_v2/dev.json
# NER
mkdir "ner"
wget https://raw.githubusercontent.com/synalp/NER/master/corpus/CoNLL-2003/eng.train -O "ner/train.txt"
wget https://raw.githubusercontent.com/synalp/NER/master/corpus/CoNLL-2003/eng.testa -O "ner/valid.txt"
wget https://raw.githubusercontent.com/synalp/NER/master/corpus/CoNLL-2003/eng.testb -O "ner/test.txt"
# SuperGLUE
wget https://dl.fbaipublicfiles.com/glue/superglue/data/v2/combined.zip -O superglue.zip
unzip superglue.zip
cd ..
|
ContextualSP/adaptershare/download.sh/0
|
{
"file_path": "ContextualSP/adaptershare/download.sh",
"repo_id": "ContextualSP",
"token_count": 1059
}
| 225 |
#!/usr/bin/env bash
###############################
# Training script for GLUE.
# It supports single and multi-task training
# By Xiaodong
###############################
set -e
if [[ $# -lt 6 ]]; then
echo "It requires 6 args to run the script and the current # of bash args: $#"
echo "run_glue_finetune.sh <data_dir> <model_type> <model_size> <task> <batch-size> <num_gpus>"
exit 1
fi
data_dir=$1
echo "Data dir: ${data_dir}"
model_type=$2
echo "Model type: ${model_type}"
model_size=$3
echo "Model size: ${model_size}"
# training set
task=$4
echo $task
batch_size=${5:-"16"}
num_gpus=${6:-"1"}
echo "GPU counts: ${num_gpus}"
export ROOT_DIR="glue_app"
export EPOCH=3
export LR="5e-5"
export OPTIM="adamax"
export TASK_DEF="experiments/glue/glue_task_def.yml"
export BS=${batch_size}
echo ${TASK_DEF}
train_dataset=${task}
test_dataset=${task}
# train task
if [ ${task} == "mnli" ]; then
test_dataset="mnli_matched,mnli_mismatched"
elif [ ${task} == "mtdnn" ]; then
train_dataset="mnli,rte,qqp,qnli,mrpc,sst,cola,stsb"
test_dataset="mnli_matched,mnli_mismatched,rte"
else
test_dataset=${task}
fi
echo "Training data: ${train_dataset}_train.json"
echo "Dev data: ${test_dataset}_dev.json"
if [ ${model_type} == "bert" ]; then
MD="bert-${model_size}-uncased"
DD="bert-${model_size}-uncased"
ED=1
elif [ ${model_type} == "roberta" ]; then
MD="roberta-${model_size}"
DD="roberta-${model_size}"
ED=2
elif [ ${model_type} == "deberta" ]; then
MD="microsoft/deberta-${model_size}"
DD="microsoft/deberta-${model_size}"
ED=6
elif [ ${model_type} == "t5e" ]; then
MD="t5-${model_size}"
DD="t5-${model_size}"
ED=8
elif [ ${model_type} == "electra" ]; then
MD="google/electra-${model_size}-discriminator"
DD="bert-base-uncased"
ED=7
elif [ ${model_type} == "mtdnn" ]; then
MD="mt_dnn_modes/mt_dnn_${model_size}_uncased.pt"
DD="bert-${model_size}-uncased"
ED=1
else
echo "Unknown model ${model_type}"
exit 1
fi
output_dir="${ROOT_DIR}/${task}/${DD}"
echo $output_dir
mkdir -p ${output_dir}
if [[ -f "${output_dir}/model*.pt" ]]; then
rm "${output_dir}/model*.pt"
rm "${output_dir}/config.json"
fi
echo "Training ${task} tokenized by ${DD} with ${MD}"
LOG_FILE="${output_dir}/mt-dnn-train.log"
#
if [ ${num_gpus} -ge 2 ]; then
# multi gpu training
# DDP config
export MASTER_ADDR=localhost
export MASTER_PORT="8787"
export NNODES=1
export NODE_RANK=0
export GPUS_PER_NODE=${num_gpus}
export WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
export DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT"
python -m torch.distributed.launch $DISTRIBUTED_ARGS train.py --data_dir=${data_dir}/${DD} --task_def=${TASK_DEF} --train_dataset=${train_dataset} --test_dataset=${test_dataset} --init_checkpoint=${MD} --batch_size=${BS} --learning_rate=${LR} --epochs=${EPOCH} --encoder_type=${ED} --optimizer=${OPTIM} --output_dir=${output_dir} --log_file=${LOG_FILE}
else
python train.py --data_dir=${data_dir}/${DD} --task_def=${TASK_DEF} --train_dataset=${train_dataset} --test_dataset=${test_dataset} --init_checkpoint=${MD} --batch_size=${BS} --learning_rate=${LR} --epochs=${EPOCH} --encoder_type=${ED} --optimizer=${OPTIM} --output_dir=${output_dir} --log_file=${LOG_FILE}
fi
|
ContextualSP/adaptershare/experiments/glue/run_glue_finetuning.sh/0
|
{
"file_path": "ContextualSP/adaptershare/experiments/glue/run_glue_finetuning.sh",
"repo_id": "ContextualSP",
"token_count": 1396
}
| 226 |
import json
from sklearn.metrics import accuracy_score
import argparse
def compute_acc(predicts, labels):
return 100.0 * accuracy_score(labels, predicts)
def load(path):
with open(path, "r") as f:
return json.load(f)
def compute(scores, labels):
lang_map = labels["lang_map"]
label_map = labels["label_map"]
uids = scores["uids"]
predictions = scores["predictions"]
grounds = []
machines = []
predictions_map = {}
for uid, pred in enumerate(predictions):
uid = str(uid)
grounds.append(pred)
machines.append(label_map[uid])
predictions_map[uid] = pred
metrics = {"all": compute_acc(machines, grounds)}
print("total size: {}".format(len(machines)))
for lan, subuids in lang_map.items():
sub_machine = [predictions_map[i] for i in subuids]
sub_ground = [label_map[i] for i in subuids]
metrics[lan] = compute_acc(sub_machine, sub_ground)
print("size of {}: {}".format(lan, len(sub_machine)))
print(metrics)
parser = argparse.ArgumentParser()
parser.add_argument("--fscore", type=str, required=True)
parser.add_argument("--fcat", type=str, required=True)
args = parser.parse_args()
# score_path = "models/xnli_dev_scores_0.json"
# label_path = "data/XNLI/xnli_dev_cat.json"
score_path = args.fscore
label_path = args.fcat
scores = load(score_path)
labels = load(label_path)
compute(scores, labels)
|
ContextualSP/adaptershare/experiments/xnli/xnli_eval.py/0
|
{
"file_path": "ContextualSP/adaptershare/experiments/xnli/xnli_eval.py",
"repo_id": "ContextualSP",
"token_count": 567
}
| 227 |
# coding=utf-8
# Copyright (c) Microsoft. All rights reserved.
import copy
import imp
import sys, os
import torch
import tasks
import logging
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import *
from data_utils.utils import AverageMeter
from mt_dnn.loss import LOSS_REGISTRY
from mt_dnn.matcher import SANBertNetwork
from mt_dnn.perturbation import SmartPerturbation
from mt_dnn.loss import *
from mt_dnn.optim import AdamaxW
from data_utils.task_def import TaskType, EncoderModelType
from experiments.exp_def import TaskDef
from data_utils.my_statics import DUMPY_STRING_FOR_EMPTY_ANS
from transformers.modeling_utils import unwrap_model
from transformers import PreTrainedModel
logger = logging.getLogger(__name__)
class MTDNNModel(object):
def __init__(self, opt, device=None, state_dict=None, num_train_step=-1, adapter=False, adapter_args=None, task_name='adapter'):
self.config = opt
self.updates = (
state_dict["updates"] if state_dict and "updates" in state_dict else 0
)
self.adapter = adapter
self.local_updates = 0
self.device = device
self.train_loss = AverageMeter()
self.adv_loss = AverageMeter()
self.emb_val = AverageMeter()
self.eff_perturb = AverageMeter()
self.initial_from_local = True if state_dict else False
model = SANBertNetwork(opt, initial_from_local=self.initial_from_local, adapter_args=adapter_args, adapter=adapter, task_name=task_name)
self.total_param = sum(
[p.nelement() for p in model.parameters() if p.requires_grad]
)
if opt["cuda"]:
if self.config["local_rank"] != -1:
model = model.to(self.device)
else:
model = model.to(self.device)
self.network = model
if state_dict:
missing_keys, unexpected_keys = self.network.load_state_dict(
state_dict["state"], strict=False
)
optimizer_parameters = self._get_param_groups()
self._setup_optim(optimizer_parameters, state_dict, num_train_step)
self.optimizer.zero_grad()
# if self.config["local_rank"] not in [-1, 0]:
# torch.distributed.barrier()
if self.config["local_rank"] != -1:
self.mnetwork = torch.nn.parallel.DistributedDataParallel(
self.network,
device_ids=[self.config["local_rank"]],
output_device=self.config["local_rank"],
find_unused_parameters=True,
)
elif self.config["multi_gpu_on"]:
self.mnetwork = nn.DataParallel(self.network)
else:
self.mnetwork = self.network
self._setup_lossmap(self.config)
self._setup_kd_lossmap(self.config)
self._setup_adv_lossmap(self.config)
self._setup_adv_training(self.config)
self._setup_tokenizer()
def _setup_adv_training(self, config):
self.adv_teacher = None
if config.get("adv_train", False):
self.adv_teacher = SmartPerturbation(
config["adv_epsilon"],
config["multi_gpu_on"],
config["adv_step_size"],
config["adv_noise_var"],
config["adv_p_norm"],
config["adv_k"],
config["fp16"],
config["encoder_type"],
loss_map=self.adv_task_loss_criterion,
norm_level=config["adv_norm_level"],
)
def _get_param_groups(self):
no_decay = ["bias", "gamma", "beta", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_parameters = [
{
"params": [
p
for n, p in self.network.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.01,
},
{
"params": [
p
for n, p in self.network.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
return optimizer_parameters
def _setup_optim(self, optimizer_parameters, state_dict=None, num_train_step=-1):
if self.config['optimizer'] == 'sgd':
self.optimizer = optim.SGD(optimizer_parameters, self.config['learning_rate'],
weight_decay=self.config['weight_decay'])
elif self.config['optimizer'] == 'adamax':
self.optimizer = AdamaxW(optimizer_parameters,
lr=self.config['learning_rate'],
weight_decay=self.config['weight_decay'])
elif self.config['optimizer'] == 'adam':
self.optimizer = optim.AdamW(optimizer_parameters,
lr=self.config['learning_rate'],
weight_decay=self.config['weight_decay'])
else:
raise RuntimeError('Unsupported optimizer: %s' % self.config['optimizer'])
if state_dict and 'optimizer' in state_dict:
self.optimizer.load_state_dict(state_dict['optimizer'])
if state_dict and "optimizer" in state_dict:
self.optimizer.load_state_dict(state_dict["optimizer"])
# if self.config["fp16"]:
# try:
# from apex import amp
# global amp
# except ImportError:
# raise ImportError(
# "Please install apex from https://www.github.com/nvidia/apex to use fp16 training."
# )
# model, optimizer = amp.initialize(
# self.network, self.optimizer, opt_level=self.config["fp16_opt_level"]
# )
# self.network = model
# self.optimizer = optimizer
# # set up scheduler
self.scheduler = None
scheduler_type = self.config['scheduler_type']
warmup_steps = self.config['warmup'] * num_train_step
if scheduler_type == 3:
from transformers import get_polynomial_decay_schedule_with_warmup
self.scheduler = get_polynomial_decay_schedule_with_warmup(
self.optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=num_train_step
)
if scheduler_type == 2:
from transformers import get_constant_schedule_with_warmup
self.scheduler = get_constant_schedule_with_warmup(
self.optimizer,
num_warmup_steps=warmup_steps
)
elif scheduler_type == 1:
from transformers import get_cosine_schedule_with_warmup
self.scheduler = get_cosine_schedule_with_warmup(
self.optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=num_train_step
)
else:
from transformers import get_linear_schedule_with_warmup
self.scheduler = get_linear_schedule_with_warmup(
self.optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=num_train_step
)
def _setup_lossmap(self, config):
task_def_list = config["task_def_list"]
self.task_loss_criterion = []
for idx, task_def in enumerate(task_def_list):
cs = task_def.loss
lc = LOSS_REGISTRY[cs](name="Loss func of task {}: {}".format(idx, cs))
self.task_loss_criterion.append(lc)
def _setup_kd_lossmap(self, config):
task_def_list = config["task_def_list"]
self.kd_task_loss_criterion = []
if config.get("mkd_opt", 0) > 0:
for idx, task_def in enumerate(task_def_list):
cs = task_def.kd_loss
assert cs is not None
lc = LOSS_REGISTRY[cs](
name="KD Loss func of task {}: {}".format(idx, cs)
)
self.kd_task_loss_criterion.append(lc)
def _setup_adv_lossmap(self, config):
task_def_list = config["task_def_list"]
self.adv_task_loss_criterion = []
if config.get("adv_train", False):
for idx, task_def in enumerate(task_def_list):
cs = task_def.adv_loss
assert cs is not None
lc = LOSS_REGISTRY[cs](
name="Adv Loss func of task {}: {}".format(idx, cs)
)
self.adv_task_loss_criterion.append(lc)
def _setup_tokenizer(self):
try:
from transformers import AutoTokenizer
self.tokenizer = AutoTokenizer.from_pretrained(
self.config["init_checkpoint"],
cache_dir=self.config["transformer_cache"],
)
except:
self.tokenizer = None
def _to_cuda(self, tensor):
if tensor is None:
return tensor
if isinstance(tensor, list) or isinstance(tensor, tuple):
# y = [e.cuda(non_blocking=True) for e in tensor]
y = [e.to(self.device) for e in tensor]
for e in y:
e.requires_grad = False
else:
# y = tensor.cuda(non_blocking=True)
y = tensor.to(self.device)
y.requires_grad = False
return y
def compute_loss(self, batch_meta, batch_data):
self.network.train()
y = batch_data[batch_meta["label"]]
y = self._to_cuda(y) if self.config["cuda"] else y
if batch_meta["task_def"]["task_type"] == TaskType.SeqenceGeneration:
seq_length = y.size(1)
y = y.view(-1)
task_id = batch_meta["task_id"]
inputs = batch_data[: batch_meta["input_len"]]
if len(inputs) == 3:
inputs.append(None)
inputs.append(None)
inputs.append(task_id)
if "y_token_id" in batch_meta:
inputs.append(batch_data[batch_meta["y_token_id"]])
weight = None
if self.config.get("weighted_on", False):
if self.config["cuda"]:
weight = batch_data[batch_meta["factor"]].cuda(non_blocking=True)
else:
weight = batch_data[batch_meta["factor"]]
# fw to get logits
logits = self.mnetwork(*inputs)
# compute loss
loss = 0
if self.task_loss_criterion[task_id] and (y is not None):
loss_criterion = self.task_loss_criterion[task_id]
if (
isinstance(loss_criterion, RankCeCriterion)
and batch_meta["pairwise_size"] > 1
):
# reshape the logits for ranking.
loss = self.task_loss_criterion[task_id](
logits,
y,
weight,
ignore_index=-1,
pairwise_size=batch_meta["pairwise_size"],
)
elif batch_meta["task_def"]["task_type"] == TaskType.SeqenceGeneration:
weight = (
(
1.0
/ torch.sum(
(y > -1).float().view(-1, seq_length), 1, keepdim=True
)
)
.repeat(1, seq_length)
.view(-1)
)
loss = self.task_loss_criterion[task_id](
logits, y, weight, ignore_index=-1
)
else:
loss = self.task_loss_criterion[task_id](
logits, y, weight, ignore_index=-1
)
# compute kd loss
if self.config.get("mkd_opt", 0) > 0 and ("soft_label" in batch_meta):
soft_labels = batch_meta["soft_label"]
soft_labels = (
self._to_cuda(soft_labels) if self.config["cuda"] else soft_labels
)
kd_lc = self.kd_task_loss_criterion[task_id]
kd_loss = (
kd_lc(logits, soft_labels, weight, ignore_index=-1) if kd_lc else 0
)
loss = loss + kd_loss
# adv training
if self.config.get("adv_train", False) and self.adv_teacher:
# task info
task_type = batch_meta["task_def"]["task_type"]
adv_inputs = (
[self.mnetwork, logits]
+ inputs
+ [task_type, batch_meta.get("pairwise_size", 1)]
)
adv_loss, emb_val, eff_perturb = self.adv_teacher.forward(*adv_inputs)
loss = loss + self.config["adv_alpha"] * adv_loss
batch_size = batch_data[batch_meta["token_id"]].size(0)
# rescale loss as dynamic batching
if self.config["bin_on"]:
loss = loss * (1.0 * batch_size / self.config["batch_size"])
if self.config["local_rank"] != -1:
# print('Rank ', self.config['local_rank'], ' loss ', loss)
copied_loss = copy.deepcopy(loss.data)
torch.distributed.all_reduce(copied_loss)
copied_loss = copied_loss / self.config["world_size"]
self.train_loss.update(copied_loss.item(), batch_size)
else:
self.train_loss.update(loss.item(), batch_size)
if self.config.get("adv_train", False) and self.adv_teacher:
if self.config["local_rank"] != -1:
copied_adv_loss = copy.deepcopy(adv_loss.data)
torch.distributed.all_reduce(copied_adv_loss)
copied_adv_loss = copied_adv_loss / self.config["world_size"]
self.adv_loss.update(copied_adv_loss.item(), batch_size)
copied_emb_val = copy.deepcopy(emb_val.data)
torch.distributed.all_reduce(copied_emb_val)
copied_emb_val = copied_emb_val / self.config["world_size"]
self.emb_val.update(copied_emb_val.item(), batch_size)
copied_eff_perturb = copy.deepcopy(eff_perturb.data)
torch.distributed.all_reduce(copied_eff_perturb)
copied_eff_perturb = copied_eff_perturb / self.config["world_size"]
self.eff_perturb.update(copied_eff_perturb.item(), batch_size)
else:
self.adv_loss.update(adv_loss.item(), batch_size)
self.emb_val.update(emb_val.item(), batch_size)
self.eff_perturb.update(eff_perturb.item(), batch_size)
# scale loss
loss = loss / self.config.get("grad_accumulation_step", 1)
return loss
def update(self, batch_meta, batch_data):
loss = self.compute_loss(batch_meta, batch_data)
# if self.config["fp16"]:
# with amp.scale_loss(loss, self.optimizer) as scaled_loss:
# scaled_loss.backward()
# else:
loss.backward()
self.local_updates += 1
if self.local_updates % self.config.get("grad_accumulation_step", 1) == 0:
if self.config["global_grad_clipping"] > 0:
# if self.config["fp16"]:
# torch.nn.utils.clip_grad_norm_(
# amp.master_params(self.optimizer),
# self.config["global_grad_clipping"],
# )
# else:
torch.nn.utils.clip_grad_norm_(
self.network.parameters(), self.config["global_grad_clipping"]
)
self.updates += 1
# reset number of the grad accumulation
self.optimizer.step()
self.optimizer.zero_grad()
if self.scheduler:
self.scheduler.step()
def encode(self, batch_meta, batch_data):
self.network.eval()
inputs = batch_data[:3]
sequence_output = self.network.encode(*inputs)[0]
return sequence_output
# TODO: similar as function extract, preserve since it is used by extractor.py
# will remove after migrating to transformers package
def extract(self, batch_meta, batch_data):
self.network.eval()
# 'token_id': 0; 'segment_id': 1; 'mask': 2
inputs = batch_data[:3]
all_encoder_layers, pooled_output = self.mnetwork.bert(*inputs)
return all_encoder_layers, pooled_output
def predict(self, batch_meta, batch_data):
self.network.eval()
task_id = batch_meta["task_id"]
task_def = TaskDef.from_dict(batch_meta["task_def"])
task_type = task_def.task_type
task_obj = tasks.get_task_obj(task_def)
inputs = batch_data[: batch_meta["input_len"]]
if len(inputs) == 3:
inputs.append(None)
inputs.append(None)
inputs.append(task_id)
if task_type == TaskType.SeqenceGeneration:
# y_idx, #3 -> gen
inputs.append(None)
inputs.append(3)
score = self.mnetwork(*inputs)
if task_obj is not None:
score, predict = task_obj.test_predict(score)
elif task_type == TaskType.Ranking:
score = score.contiguous().view(-1, batch_meta["pairwise_size"])
assert task_type == TaskType.Ranking
score = F.softmax(score, dim=1)
score = score.data.cpu()
score = score.numpy()
predict = np.zeros(score.shape, dtype=int)
positive = np.argmax(score, axis=1)
for idx, pos in enumerate(positive):
predict[idx, pos] = 1
predict = predict.reshape(-1).tolist()
score = score.reshape(-1).tolist()
return score, predict, batch_meta["true_label"]
elif task_type == TaskType.SeqenceLabeling:
mask = batch_data[batch_meta["mask"]]
score = score.contiguous()
score = score.data.cpu()
score = score.numpy()
predict = np.argmax(score, axis=1).reshape(mask.size()).tolist()
valied_lenght = mask.sum(1).tolist()
final_predict = []
for idx, p in enumerate(predict):
final_predict.append(p[: valied_lenght[idx]])
score = score.reshape(-1).tolist()
return score, final_predict, batch_meta["label"]
elif task_type == TaskType.Span or task_type == TaskType.SpanYN:
predictions = []
features = []
for idx, offset in enumerate(batch_meta["offset_mapping"]):
token_is_max_context = (
batch_meta["token_is_max_context"][idx]
if batch_meta.get("token_is_max_context", None)
else None
)
sample_id = batch_meta["uids"][idx]
if "label" in batch_meta:
feature = {
"offset_mapping": offset,
"token_is_max_context": token_is_max_context,
"uid": sample_id,
"context": batch_meta["context"][idx],
"answer": batch_meta["answer"][idx],
"label": batch_meta["label"][idx],
}
else:
feature = {
"offset_mapping": offset,
"token_is_max_context": token_is_max_context,
"uid": sample_id,
"context": batch_meta["context"][idx],
"answer": batch_meta["answer"][idx],
}
if "null_ans_index" in batch_meta:
feature["null_ans_index"] = batch_meta["null_ans_index"]
features.append(feature)
start, end = score
start = start.contiguous()
start = start.data.cpu()
start = start.numpy().tolist()
end = end.contiguous()
end = end.data.cpu()
end = end.numpy().tolist()
return (start, end), predictions, features
elif task_type == TaskType.SeqenceGeneration:
predicts = self.tokenizer.batch_decode(score, skip_special_tokens=True)
predictions = {}
golds = {}
for idx, predict in enumerate(predicts):
sample_id = batch_meta["uids"][idx]
answer = batch_meta["answer"][idx]
predict = predict.strip()
if predict == DUMPY_STRING_FOR_EMPTY_ANS:
predict = ""
predictions[sample_id] = predict
golds[sample_id] = answer
score = score.contiguous()
score = score.data.cpu()
score = score.numpy().tolist()
return score, predictions, golds
elif task_type == TaskType.ClozeChoice:
score = score.contiguous().view(-1)
score = score.data.cpu()
score = score.numpy()
copy_score = score.tolist()
answers = batch_meta["answer"]
choices = batch_meta["choice"]
chunks = batch_meta["pairwise_size"]
uids = batch_meta["uids"]
predictions = {}
golds = {}
for chunk in chunks:
uid = uids[0]
answer = eval(answers[0])
choice = eval(choices[0])
answers = answers[chunk:]
choices = choices[chunk:]
current_p = score[:chunk]
score = score[chunk:]
positive = np.argmax(current_p)
predict = choice[positive]
predictions[uid] = predict
golds[uid] = answer
return copy_score, predictions, golds
else:
raise ValueError("Unknown task_type: %s" % task_type)
return score, predict, batch_meta["label"]
def save(self, filename):
if isinstance(self.mnetwork, torch.nn.parallel.DistributedDataParallel):
model = self.mnetwork.module
else:
model = self.network
if not self.adapter:
# network_state = dict([(k, v.cpu()) for k, v in self.network.state_dict().items()])
network_state = dict([(k, v.cpu()) for k, v in model.state_dict().items()])
params = {
"state": network_state,
"optimizer": self.optimizer.state_dict(),
"config": self.config,
}
torch.save(params, filename)
logger.info("model saved to {}".format(filename))
else:
model.bert.save_all_adapters('/'.join(filename.split('/')[:-1]))
network_state = dict([(k, v.cpu()) for k, v in model.state_dict().items() if 'bert' not in k])
params = {
"state": network_state,
"optimizer": self.optimizer.state_dict(),
"config": self.config,
}
torch.save(params, filename)
logger.info("model saved to {}".format(filename))
def load(self, checkpoint):
model_state_dict = torch.load(checkpoint)
if "state" in model_state_dict:
self.network.load_state_dict(model_state_dict["state"], strict=False)
if "optimizer" in model_state_dict:
self.optimizer.load_state_dict(model_state_dict["optimizer"])
if "config" in model_state_dict:
self.config.update(model_state_dict["config"])
if isinstance(self.mnetwork, torch.nn.parallel.DistributedDataParallel):
model = self.mnetwork.module
else:
model = self.network
if self.adapter:
self._load_adapters(model.bert, '/'.join(checkpoint.split('/')[:-1]))
def cuda(self):
self.network.cuda()
def _load_adapters(self, model, resume_from_checkpoint):
adapter_loaded = False
for file_name in os.listdir(resume_from_checkpoint):
if os.path.isdir(os.path.join(resume_from_checkpoint, file_name)):
if "," not in file_name and "adapter_config.json" in os.listdir(
os.path.join(resume_from_checkpoint, file_name)
):
model.load_adapter(os.path.join(os.path.join(resume_from_checkpoint, file_name)))
adapter_loaded = True
return adapter_loaded
|
ContextualSP/adaptershare/mt_dnn/model.py/0
|
{
"file_path": "ContextualSP/adaptershare/mt_dnn/model.py",
"repo_id": "ContextualSP",
"token_count": 12656
}
| 228 |
from utils.data_types import SQLTokenType
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import BertModel
from collections import defaultdict
from typing import Dict, List
from utils.data_iter import MetaIndex
from models.nn_utils import *
class WTQAlignmentModel(nn.Module):
def __init__(self, bert_version: str, dropout_prob: float) -> None:
super().__init__()
self.bert = BertModel.from_pretrained(bert_version)
self.hidden_size = get_bert_hidden_size(bert_version)
self.linear_out_col = nn.Linear(self.hidden_size, 2)
self.align_pointer = AttentivePointer(self.hidden_size)
self.dropout = nn.Dropout(dropout_prob)
def forward(self, **inputs) -> Dict:
bert_outputs = self.bert(
inputs['input_token_ids'],
token_type_ids=inputs['input_token_types'],
attention_mask=inputs['input_token_ids'].ne(0))["last_hidden_state"]
bert_outputs = self.dropout(bert_outputs)
batched_col_logits = []
batched_align_weights = []
batched_question_outputs, batched_entity_outputs = [], []
for batch_idx in range(len(bert_outputs)):
meta_index: MetaIndex = inputs['meta_index'][batch_idx]
question_outputs = bert_outputs[batch_idx][1: meta_index.question_sep_index + 1]
batched_question_outputs += [question_outputs[:-1]]
col_outputs = bert_outputs[batch_idx][meta_index.col_encode_indices]
col_logits = self.linear_out_col(col_outputs)
batched_col_logits += [col_logits]
entity_outputs = col_outputs
_, alignment_weights = self.align_pointer.forward(
entity_outputs.unsqueeze(0),
question_outputs.unsqueeze(0),
question_outputs.unsqueeze(0))
batched_align_weights += [alignment_weights.squeeze(0)[:, :-1]]
batched_entity_outputs.append({SQLTokenType.column: col_outputs})
outputs = {
'column_logits': batched_col_logits,
'alignment_weights': batched_align_weights,
'question_outputs': batched_question_outputs,
'entity_outputs': batched_entity_outputs,
}
return outputs
def compute_loss(self, **inputs):
outputs = self.forward(**inputs)
column_logits = outputs['column_logits']
total_loss = 0
identify_loss = self._calculate_identification_loss(column_logits, **inputs)
total_loss += identify_loss
outputs['identify_loss'] = identify_loss
alignment_loss_weight = inputs['align_loss_weight'] if 'align_loss_weight' in inputs else 0.0
if alignment_loss_weight > 1e-3:
align_loss = self._calculate_alignment_loss(column_logits, outputs['alignment_weights'], **inputs)
total_loss += align_loss * alignment_loss_weight
outputs['align_loss'] = align_loss
outputs['loss'] = total_loss
return outputs
def _calculate_identification_loss(self, col_logits, **inputs):
col_labels = inputs['column_labels']
assert len(col_labels) == len(col_logits)
total_loss = 0
criterion = LabelSmoothingLoss(0.05) if inputs['label_smoothing'] else nn.CrossEntropyLoss()
for batch_idx in range(len(col_labels)):
total_loss += criterion(col_logits[batch_idx], col_labels[batch_idx])
return total_loss / len(col_labels)
def _calculate_alignment_loss(self, col_logits, align_weights, **inputs):
assert len(col_logits) == len(align_weights)
total_alignment_loss = 0
for batch_idx in range(len(col_logits)):
meta_index: MetaIndex = inputs['meta_index'][batch_idx]
# question_length = meta_index.num_question_tokens
question_length = meta_index.question_sep_index - 1
col_labels = inputs['column_labels'][batch_idx]
with torch.no_grad():
masking_inputs = self._generate_masking_inputs(
input_token_ids=inputs['input_token_ids'][batch_idx].detach(),
input_token_types=inputs['input_token_types'][batch_idx].detach(),
meta_index=meta_index,
example=inputs['example'][batch_idx])
masking_scores = self._run_masking_outputs(
input_token_ids=masking_inputs['input_token_ids'],
input_token_types=masking_inputs['input_token_types'],
meta_index=meta_index)
masking_rewards = self._calculate_masking_rewards(
labels={'col': col_labels},
base_scores={'col': F.softmax(col_logits[batch_idx], dim=-1)},
masking_scores=masking_scores,
masking_spans=masking_inputs['masking_spans'],
meta_index=meta_index)
total_alignment_loss += F.binary_cross_entropy(align_weights[batch_idx],
col_labels.to(torch.float).repeat_interleave(
question_length).view(-1, question_length),
weight=masking_rewards['col'])
return total_alignment_loss / len(col_logits)
@staticmethod
def _generate_masking_inputs(input_token_ids: torch.Tensor, input_token_types: torch.Tensor, meta_index: MetaIndex,
example: Dict):
all_masking_input_token_ids, all_masking_spans = [], []
for i, j, _ in example['masking_ngrams']:
p_start, p_end = meta_index.question_spans[i][1], meta_index.question_spans[j][2]
masking_input_token_ids = input_token_ids.clone()
masking_input_token_ids[p_start:p_end + 1] = 100 # unk
all_masking_input_token_ids += [masking_input_token_ids]
all_masking_spans += [(i, j)]
return {
'input_token_ids': torch.stack(all_masking_input_token_ids, dim=0),
'input_token_types': torch.stack([input_token_types for _ in all_masking_spans]),
'meta_index': [meta_index for _ in all_masking_spans],
'masking_spans': all_masking_spans
}
def _run_masking_outputs(self, input_token_ids: torch.Tensor, input_token_types: torch.Tensor,
meta_index: MetaIndex):
bert_outputs = \
self.bert(input_token_ids, attention_mask=input_token_ids.ne(0), token_type_ids=input_token_types)[
"last_hidden_state"]
col_outputs = bert_outputs[:, meta_index.col_encode_indices]
col_scores = F.softmax(self.linear_out_col(col_outputs), dim=-1)
return {'col': col_scores}
def _calculate_masking_rewards(self,
labels: Dict[str, torch.LongTensor],
base_scores: Dict[str, torch.Tensor],
masking_scores: Dict[str, torch.Tensor],
masking_spans: List[Tuple[int, int]],
meta_index: MetaIndex,
default_weight: float = 0.1,
):
num_question_subword = meta_index.question_sep_index - 1
masking_rewards = {}
for e_type in ['col']:
e_labels, e_base_scores, e_masking_scores = labels[e_type], base_scores[e_type], masking_scores[e_type]
reward = torch.zeros((len(e_labels), num_question_subword), device=e_labels.device)
for idx in range(len(e_labels)):
label = e_labels[idx].item()
if label == 0:
reward[idx] = default_weight
continue
ngram_rewards = defaultdict(list)
for m_i, (start, end) in enumerate(masking_spans):
score_diff = (e_base_scores[idx, label] - e_masking_scores[m_i, idx, label]).clamp(0, 1).item()
for j in range(start, end + 1):
ngram_rewards[j].append(score_diff)
for q_idx in range(num_question_subword):
reward[idx, q_idx] = sum(ngram_rewards[q_idx]) / len(
ngram_rewards[q_idx]) if q_idx in ngram_rewards else 0.0
masking_rewards[e_type] = reward
return masking_rewards
|
ContextualSP/awakening_latent_grounding/models/wtq_align.py/0
|
{
"file_path": "ContextualSP/awakening_latent_grounding/models/wtq_align.py",
"repo_id": "ContextualSP",
"token_count": 4270
}
| 229 |
import os
from multiprocessing import Pool
import recognizers_suite as Recognizers
from Levenshtein import ratio
from recognizers_suite import Culture
from utils.data_types import *
def is_float(value: str) -> bool:
try:
float(value)
return True
except:
return False
def is_adjective(value: str) -> bool:
return value in ['old', 'older', 'oldest', 'young', 'youngest', 'younger', 'heavy', 'heavier', 'heaviest']
def permutate_ngrams(tokens: List[str], sep: str = ' ') -> List[Tuple[int, int, str]]:
ngrams = []
for i in range(len(tokens)):
for j in range(i, len(tokens)):
ngram = sep.join(tokens[i:j + 1]).lower()
ngrams.append((i, j, ngram))
return ngrams
class Vocab:
id2tokens: Dict[int, str]
token2ids: Dict[str, int]
def __init__(self, tokens: List[str], special_tokens: List[str]) -> None:
token2ids = {}
for token in special_tokens:
assert token not in token2ids
token2ids[token] = len(token2ids)
for token in tokens:
assert token not in token2ids
token2ids[token] = len(token2ids)
self.token2ids = token2ids
self.id2tokens = {idx: token for token, idx in token2ids.items()}
def __len__(self) -> int:
return len(self.token2ids)
def lookup_id(self, token: str, default_token: str = UNK_Token) -> int:
if token in self.token2ids:
return self.token2ids[token]
if default_token in self.token2ids:
return self.token2ids[default_token]
raise ValueError("Token {} not found in vocab".format(token))
def lookup_token(self, idx: int) -> str:
return self.id2tokens[idx]
@classmethod
def from_file(cls, path: str, special_tokens: List[str] = [SOS_Token, EOS_Token], min_freq: int = 5):
tokens = []
assert os.path.exists(path), '{} not found'.format(path)
with open(path, 'r', encoding='utf-8') as fr:
for line in fr:
items = line.split('\t')
if len(items) != 2:
raise ValueError()
token = items[0]
freq = int(items[1])
if freq >= min_freq:
tokens.append(token)
return Vocab(tokens, special_tokens)
class NGramMatcher(object):
def __init__(self, ngram_tokens: Tuple[str, List[str]], sep: str = ' ') -> None:
self.sep = sep
self.ngrams_dict = self._initialize_ngrams(ngram_tokens)
def _initialize_ngrams(self, ngram_tokens: Tuple[str, List[str]]) -> Dict[str, List[Tuple[str, int, int]]]:
ngrams_dict = defaultdict(list)
for key, tokens in ngram_tokens:
for i, j, ngram in permutate_ngrams(tokens):
ngrams_dict[ngram].append((key, i, j))
return ngrams_dict
def match(self, query_tokens: List[str]):
all_matches = []
for i, j, ngram in permutate_ngrams(query_tokens):
if ngram not in self.ngrams_dict:
continue
for key, k_i, k_j in self.ngrams_dict[ngram]:
all_matches.append((i, j, key, k_i, k_j))
non_overlaps = []
for q_i, q_j, key, k_i, k_j in sorted(all_matches, key=lambda x: x[1] - x[0], reverse=True):
is_overlap = False
for q_i2, q_j2, key2, k_i2, k_j2 in non_overlaps:
if key == key2 and q_i2 <= q_i and q_j2 >= q_j:
is_overlap = True
break
if not is_overlap:
non_overlaps.append((q_i, q_j, key, k_i, k_j))
return non_overlaps
def merge_tokens_with_index_mappings(tokens: List[str]):
tokens_str = ''
start_map, end_map = {}, {}
for i, token in enumerate(tokens):
if i > 0:
tokens_str += ' '
start_map[len(tokens_str)] = i
tokens_str += token
end_map[len(tokens_str) - 1] = i
return tokens_str, (start_map, end_map)
def parse_number_value(value: str):
try:
i = int(value)
return i
except:
pass
try:
f = float(value)
return f
except:
pass
raise ValueError("{} can't be parsed to number".format(value))
def recognize_numbers(tokens: List[str], enable_ordinal: bool = True, culture: Culture = Culture.English):
numbers = []
tokens_str, (start_map, end_map) = merge_tokens_with_index_mappings(tokens)
results = Recognizers.recognize_number(tokens_str, culture)
if enable_ordinal:
results += Recognizers.recognize_ordinal(tokens_str, culture)
for result in results:
if result.start not in start_map or result.end not in end_map:
continue
start, end = start_map[result.start], end_map[result.end]
if result.resolution is not None and 'value' in result.resolution:
value = parse_number_value(result.resolution['value'])
if result.type_name == 'ordinal' and value == '0':
continue
numbers.append((start, end, value, result.type_name))
return numbers
@dataclass
class ValueMatch:
column: str
value: str
start: int
end: int
score: float
label: bool = False
def __str__(self) -> str:
return f'{self.value}[{self.start}:{self.end}]/{self.column}/{self.score:.3f}/{self.label}'
def to_json(self):
return self.__dict__
@classmethod
def from_json(cls, obj: Dict):
return ValueMatch(**obj)
def lookup_values(input) -> List[ValueMatch]:
query_tokens, column, values, threshold = input
column_matches = []
for value in values:
for i, j, ngram in permutate_ngrams(query_tokens):
if j - i > 6:
continue
score = ratio(value.replace(" ", ""), ngram.replace(" ", "").lower())
if score < threshold:
continue
column_matches.append(ValueMatch(column=column, value='{}'.format(value), start=i, end=j, score=score))
return list(sorted(column_matches, key=lambda x: x.score, reverse=True))
class ValueMatcher:
def __init__(self, columns: List[Tuple[str, str, List[object]]]):
self.columns = []
for column_name, column_type, values in columns:
distinct_values = list(set([str(x).lower().strip() for x in values])) # [:200]
self.columns.append((column_name, column_type, distinct_values))
def match_text_values(self, query_tokens: List[str], threshold: float, top_k: int = 3) -> List[ValueMatch]:
ngram_matches = []
pool = Pool(16)
inputs = [(query_tokens, column, values, threshold) for column, data_type, values in self.columns if
data_type == 'text']
column_ngram_matches = pool.map(lookup_values, inputs)
pool.close()
pool.join()
for matches in column_ngram_matches:
ngram_matches += matches[:top_k]
non_overlap_matches: List[ValueMatch] = []
for match in sorted(ngram_matches, key=lambda x: x.score * 1000 + x.end - x.start, reverse=True):
is_overlap = False
for match2 in non_overlap_matches:
if not (match.start > match2.end or match.end < match2.start or (match2.score - match.score) < 1e-2):
is_overlap = True
break
if match2.start <= match.start and match2.end >= match.end and (
(match2.end - match2.start) > (match.end - match.start)):
is_overlap = True
break
if not is_overlap:
non_overlap_matches.append(match)
# match substring/
for i, token in enumerate(query_tokens):
is_string = False
if i - 1 >= 0 and query_tokens[i - 1] == '\'' \
and i + 1 < len(query_tokens) and query_tokens[i + 1] == '\'':
is_string = True
if not is_string:
continue
for column, data_type, _ in self.columns:
if data_type == 'text':
non_overlap_matches.append(
ValueMatch(column=column, value=token.lower(), start=i, end=i, score=0.5))
return non_overlap_matches
def match_number_values(self, query_tokens: List[str]) -> List[ValueMatch]:
numbers = recognize_numbers(query_tokens, False, Culture.English)
matches = []
for (start, end, value, _) in numbers:
matches.append(ValueMatch(column='*', value=value, start=start, end=end, score=1.0))
for column, data_type, _ in self.columns:
if data_type not in ['number', 'int', 'real']:
continue
matches.append(ValueMatch(column=column, value=value, start=start, end=end, score=1.0))
return matches
def match(self, query_tokens: List[str], threshold: float, top_k: int = 3) -> List[ValueMatch]:
all_matches = []
text_matches = self.match_text_values(query_tokens, threshold, top_k)
all_matches = text_matches
number_matches = self.match_number_values(query_tokens)
for num_match in number_matches:
is_overlap = False
for match in text_matches:
if match.start <= num_match.start and match.end >= num_match.end and (
(match.end - match.start) > (num_match.end - num_match.start)):
is_overlap = True
break
if not is_overlap:
all_matches.append(num_match)
return all_matches
|
ContextualSP/awakening_latent_grounding/utils/nlp_utils.py/0
|
{
"file_path": "ContextualSP/awakening_latent_grounding/utils/nlp_utils.py",
"repo_id": "ContextualSP",
"token_count": 4625
}
| 230 |
import torch
from torch import nn
class BinaryTreeLstmCell(nn.Module):
def __init__(self, hidden_dim, dropout_prob=None):
super().__init__()
self.h_dim = hidden_dim
self.linear = nn.Linear(in_features=2 * self.h_dim, out_features=5 * self.h_dim)
if dropout_prob is not None:
self.dropout = nn.Dropout(dropout_prob)
self.reset_parameters()
def reset_parameters(self):
nn.init.orthogonal_(self.linear.weight)
# add some positive bias for the forget gates [b_g, b_i, b_f, b_f, b_o] = [0, 0, 1, 1, 0]
nn.init.constant_(self.linear.bias, val=0)
nn.init.constant_(self.linear.bias[2 * self.h_dim:4 * self.h_dim], val=1)
def forward(self, h_l, c_l, h_r, c_r):
h_lr = torch.cat([h_l, h_r], dim=-1)
g, i, f_l, f_r, o = self.linear(h_lr).chunk(chunks=5, dim=-1)
g, i, f_l, f_r, o = g.tanh_(), i.sigmoid_(), f_l.sigmoid_(), f_r.sigmoid_(), o.sigmoid_()
if hasattr(self, "dropout"):
c = i * self.dropout(g) + f_l * c_l + f_r * c_r
else:
c = i * g + f_l * c_l + f_r * c_r
h = o * c.tanh_()
return h, c
|
ContextualSP/compositional_generalization/modules/BinaryTreeLstmCell.py/0
|
{
"file_path": "ContextualSP/compositional_generalization/modules/BinaryTreeLstmCell.py",
"repo_id": "ContextualSP",
"token_count": 622
}
| 231 |
{
"random_seed": 42,
"numpy_seed": 42,
"pytorch_seed": 42,
"dataset_reader": {
"type": "rewrite",
"lazy": false,
"super_mode": "before",
"joint_encoding": true,
"extra_stop_words": ["的", "是", "我", "了", "去"]
},
"train_data_path": "D:\\users\\v-qianl\\Unified-FollowUp\\dataset\\MultiDialogue\\train.txt",
"validation_data_path": "D:\\users\\v-qianl\\Unified-FollowUp\\dataset\\MultiDialogue\\valid.txt",
"model": {
"type": "rewrite",
"word_embedder": {
"tokens": {
"type": "embedding",
"embedding_dim": 100,
"trainable": true,
"padding_index": 0
}
},
"text_encoder": {
"type": "lstm",
"input_size": 100,
"hidden_size": 200,
"bidirectional": true,
"num_layers": 1
},
"inp_drop_rate": 0.2,
"out_drop_rate": 0.2,
"feature_sel": 83,
"loss_weights": [0.2, 0.2, 0.6],
"super_mode": "before",
"unet_down_channel": 64
},
"iterator": {
"type": "basic",
"batch_size": 16
},
"validation_iterator": {
"type": "basic",
"batch_size": 16
},
"trainer": {
"num_epochs": 100,
"cuda_device": 0,
"patience": 10,
"validation_metric": "+F3",
"optimizer": {
"type": "adam",
"lr": 1e-3
},
"num_serialized_models_to_keep": 10,
"should_log_learning_rate": true
}
}
|
ContextualSP/incomplete_utterance_rewriting/configs/multi.jsonnet/0
|
{
"file_path": "ContextualSP/incomplete_utterance_rewriting/configs/multi.jsonnet",
"repo_id": "ContextualSP",
"token_count": 618
}
| 232 |
#!/usr/bin/env bash
export model_file=../checkpoints/run_task
export config_file=../configs/task.jsonnet
export train_data_path=../dataset/Task/train.txt
export validation_data_path=../dataset/Task/dev.txt
export pretrained_file=../glove/glove.6B.100d.txt
export seed=1
allennlp train -s ${model_file} ${config_file} \
--include-package data_reader \
--include-package model \
-o "{\"random_seed\":\"${seed}\",\"numpy_seed\":\"${seed}\",\"pytorch_seed\":\"${seed}\", \"train_data_path\":\"${train_data_path}\",\"validation_data_path\":\"${validation_data_path}\",\"model.word_embedder.tokens.pretrained_file\":\"${pretrained_file}\"}"
|
ContextualSP/incomplete_utterance_rewriting/src/train_task.sh/0
|
{
"file_path": "ContextualSP/incomplete_utterance_rewriting/src/train_task.sh",
"repo_id": "ContextualSP",
"token_count": 228
}
| 233 |
# coding=utf8
from collections import deque, namedtuple
# we'll use infinity as a default distance to nodes.
inf = float('inf')
Edge = namedtuple('Edge', 'start, end, cost')
def make_edge(start, end, cost=1):
return Edge(start, end, cost)
class Graph:
def __init__(self, edges):
# let's check that the data is right
wrong_edges = [i for i in edges if len(i) not in [2, 3]]
if wrong_edges:
raise ValueError('Wrong edges data: {}'.format(wrong_edges))
self.edges = [make_edge(*edge) for edge in edges]
@property
def vertices(self):
res = []
for edge in self.edges:
if edge.start not in res:
res.append(edge.start)
if edge.end not in res:
res.append(edge.end)
return res
def get_node_pairs(self, n1, n2, both_ends=True):
if both_ends:
node_pairs = [[n1, n2], [n2, n1]]
else:
node_pairs = [[n1, n2]]
return node_pairs
def remove_edge(self, n1, n2, both_ends=True):
node_pairs = self.get_node_pairs(n1, n2, both_ends)
edges = self.edges[:]
for edge in edges:
if [edge.start, edge.end] in node_pairs:
self.edges.remove(edge)
def add_edge(self, n1, n2, cost=1, both_ends=True):
node_pairs = self.get_node_pairs(n1, n2, both_ends)
for edge in self.edges:
if [edge.start, edge.end] in node_pairs:
return ValueError('Edge {} {} already exists'.format(n1, n2))
self.edges.append(Edge(start=n1, end=n2, cost=cost))
if both_ends:
self.edges.append(Edge(start=n2, end=n1, cost=cost))
@property
def neighbours(self):
neighbours = {vertex: set() for vertex in self.vertices}
for edge in self.edges:
neighbours[edge.start].add((edge.end, edge.cost))
return neighbours
def dijkstra(self, source, dest):
assert source in self.vertices, 'Such source node doesn\'t exist'
assert dest in self.vertices, 'Such source node doesn\'t exis'
# 1. Mark all nodes unvisited and store them.
# 2. Set the distance to zero for our initial node
# and to infinity for other nodes.
distances = {vertex: inf for vertex in self.vertices}
previous_vertices = {
vertex: None for vertex in self.vertices
}
distances[source] = 0
vertices = self.vertices.copy()
while vertices:
# 3. Select the unvisited node with the smallest distance,
# it's current node now.
current_vertex = min(
vertices, key=lambda vertex: distances[vertex])
# 6. Stop, if the smallest distance
# among the unvisited nodes is infinity.
if distances[current_vertex] == inf:
break
# 4. Find unvisited neighbors for the current node
# and calculate their distances through the current node.
for neighbour, cost in self.neighbours[current_vertex]:
alternative_route = distances[current_vertex] + cost
# Compare the newly calculated distance to the assigned
# and save the smaller one.
if alternative_route < distances[neighbour]:
distances[neighbour] = alternative_route
previous_vertices[neighbour] = current_vertex
# 5. Mark the current node as visited
# and remove it from the unvisited set.
vertices.remove(current_vertex)
path, current_vertex = deque(), dest
while previous_vertices[current_vertex] is not None:
path.appendleft(current_vertex)
current_vertex = previous_vertices[current_vertex]
if path:
path.appendleft(current_vertex)
return path
|
ContextualSP/interactive_text_to_sql/src/context/graph.py/0
|
{
"file_path": "ContextualSP/interactive_text_to_sql/src/context/graph.py",
"repo_id": "ContextualSP",
"token_count": 1776
}
| 234 |
# coding: utf-8
# from pattern.en import lemma
import spacy
sp_english = spacy.load('en_core_web_sm')
STOP_WORD_LIST = [_.strip() for _ in open('data/common/stop_words.txt', 'r', encoding='utf-8').readlines() if _[0] != '#']
TEMPLATE_KEYWORDS = ['find', 'out', 'the', 'common', 'part', 'of', 'set', 'and', 'everyone', 'in', 'but', 'not',
'satisfying', 'between', 'like'] + STOP_WORD_LIST
class AverageMeter(object):
def __init__(self):
self.reset()
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def lemma_token(token):
token = token.lower()
# token_lemma = lemma(token)
token_lemma = sp_english(token)[0].lemma_
if token_lemma:
return token_lemma.lower()
else:
return token.lower()
|
ContextualSP/interactive_text_to_sql/src/utils/utils.py/0
|
{
"file_path": "ContextualSP/interactive_text_to_sql/src/utils/utils.py",
"repo_id": "ContextualSP",
"token_count": 499
}
| 235 |
# import cPickle as pickle
import pickle
import codecs
import contextlib
import gzip
import json
import os
import random
import shutil
import subprocess
import sys
import time
from queue import Queue, Empty
from abc import ABCMeta, abstractmethod
from collections import Mapping, OrderedDict
from os.path import join
from threading import Thread
import jsonpickle
import numpy as np
from fabric.api import local, settings
from fabric.context_managers import hide
class MultiStream(object):
def __init__(self, *streams):
self.streams = streams
def write(self, msg):
for s in self.streams:
s.write(msg)
s.flush()
def flush(self):
for s in self.streams:
s.flush()
class redirect_stream(object):
"""Inside this context manager, inputs to a target stream are redirected to a replacement stream instead."""
def __init__(self, replacement):
"""Redirect.
Args:
replacement: replace the target stream with this stream.
"""
self._replacement = replacement
@property
def target_stream(self):
"""Get the target stream."""
raise NotImplementedError
@target_stream.setter
def target_stream(self, s):
"""Set the target stream."""
raise NotImplementedError
def __enter__(self):
self._original = self.target_stream # save the original stream
self.target_stream = self._replacement
def __exit__(self, exc_type, exc_value, traceback):
self._replacement.flush()
self.target_stream = self._original # put the original stream back
class redirect_stdout(redirect_stream):
@property
def target_stream(self):
return sys.stdout
@target_stream.setter
def target_stream(self, s):
sys.stdout = s
class redirect_stderr(redirect_stream):
@property
def target_stream(self):
return sys.stderr
@target_stream.setter
def target_stream(self, s):
sys.stderr = s
class save_stdout(object):
def __init__(self, save_dir):
makedirs(save_dir)
save_file = lambda filename: open(join(save_dir, filename), 'a')
self._f_out = save_file('stdout.txt')
self._f_err = save_file('stderr.txt')
self._redirects = [redirect_stdout(MultiStream(self._f_out, sys.stdout)),
redirect_stderr(MultiStream(self._f_err, sys.stderr))]
def __enter__(self):
for r in self._redirects:
r.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
for r in self._redirects:
r.__exit__(exc_type, exc_val, exc_tb)
self._f_out.close()
self._f_err.close()
def utfopen(path, mode):
"""Open a file with UTF-8 encoding."""
return codecs.open(path, mode, encoding='utf-8')
def save(obj, path):
with open(path, 'wb') as f:
pickle.dump(obj, f)
def load(path):
with open(path, 'rb') as f:
return pickle.load(f)
def work_in_sandbox(directory):
"""Create a sandbox directory, and set cwd to sandbox.
Deletes any existing sandbox directory!
Args:
directory: directory in which to put sandbox directory
"""
os.chdir(directory)
p = 'sandbox'
if os.path.exists(p): # remove if already exists
shutil.rmtree(p)
os.makedirs(p)
os.chdir(p)
print((os.getcwd()))
def makedirs(directory):
"""If directory does not exist, make it.
Args:
directory (str): a path to a directory. Cannot be the empty path.
"""
if directory != '' and not os.path.exists(directory):
os.makedirs(directory)
def reset_state():
# Reset all random seeds, as well as TensorFlow default graph
random.seed(0)
np.random.seed(0)
import tensorflow as tf
from tensorflow.python.framework import ops
tf.set_random_seed(0)
ops.reset_default_graph()
class EmptyFile(object):
"""Delivers a never-ending stream of empty strings."""
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __iter__(self):
return self
def __next__(self):
return ''
def read_files(*file_paths):
files = []
for i, p in enumerate(file_paths):
if p:
files.append(open(p, mode="r"))
print(('Opened:', p))
else:
files.append(EmptyFile())
print(('WARNING: no path provided for file {} in list.'.format(i)))
with contextlib.nested(*files) as entered_files:
for lines in zip(*entered_files):
yield lines
class MultiFileWriter(object):
def __init__(self, *file_paths):
self.file_paths = file_paths
def __enter__(self):
self.files = [open(fp, 'w') for fp in self.file_paths]
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for file in self.files:
file.__exit__(exc_type, exc_val, exc_tb)
def write(self, lines):
assert len(lines) == len(self.files)
for f, line in zip(self.files, lines):
f.write(line)
def open_or_create(path, *args, **kwargs):
"""Open a file or create it, if it does not exist.
Args:
path (str): path to file
gz (bool): whether to use GZIP or not. Defaults to False.
Returns:
file object
"""
gz = kwargs.pop('gz', False)
open_file = gzip.open if gz else open
if not os.path.isfile(path):
with open_file(path, 'w'):
pass # create file
return open_file(path, *args, **kwargs)
class Process(object):
def __init__(self, cmd, cwd=None):
self._proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, cwd=cwd)
def read(self, timeout=float('inf')):
def enqueue_output(out, queue):
for c in iter(lambda: out.read(1), ''):
queue.put(c)
q = Queue()
t = Thread(target=enqueue_output, args=(self._proc.stdout, q))
t.daemon = True # thread dies with the program
t.start()
last_yield_time = time.time()
while True:
try:
yield q.get(timeout=0.001)
last_yield_time = time.time()
except Empty:
# if 1 millisecond passes without new item on queue...
if not self.alive:
# break if process has died
break
if time.time() - last_yield_time > timeout:
# break if time is up
break
def read_lines(self, timeout=float('inf')):
chars = []
for c in self.read(timeout):
chars.append(c)
if c == '\n':
yield ''.join(chars[:-1])
chars = []
@property
def pid(self):
return self._proc.pid
@property
def alive(self):
code = self._proc.poll()
return code is None
def terminate(self):
return self._proc.terminate()
def wait(self):
return self._proc.wait()
def shell(cmd, cwd=None, verbose=False, debug=False):
"""Execute a command just like you would at the command line.
Attempts to print output from the command with as little buffering as possible.
http://stackoverflow.com/questions/18421757/live-output-from-subprocess-command
Args:
cmd (str): command to execute, just as you would enter at the command line
cwd (str): current working directory to execute the command
verbose (bool): whether to print out the results of the command
debug (bool): if True, command is not actually executed. Typically used with verbose=True.
Returns:
all output from the command
"""
if verbose:
print(cmd)
if debug:
return
output = []
process = Process(cmd, cwd)
for c in process.read():
output.append(c)
if verbose:
sys.stdout.write(c)
sys.stdout.flush()
status = process.wait()
if status != 0:
raise RuntimeError('Error, exit code: {}'.format(status))
# TODO: make sure we get all output
return ''.join(output)
def local_bash(command, capture=False):
"""Just like fabric.api.local, but with shell='/bin/bash'."""
return local(command, capture, shell='/bin/bash')
class JSONPicklable(object, metaclass=ABCMeta):
"""Uses jsonpickle to convert any picklable object to and from JSON."""
@abstractmethod
def __getstate__(self):
"""Return a variable with enough information to reconstruct the object."""
pass
@abstractmethod
def __setstate__(self, state):
"""Use the variable from __getstate__ to restore the object.
Note that pickle created this object without calling __init__.
So, a common strategy is to manually call self.__init__(...) inside this function, using the information
provided by `state`.
"""
pass
def to_json_str(self):
return jsonpickle.encode(self)
@classmethod
def from_json_str(self, s):
return jsonpickle.decode(s)
def to_json(self):
"""Use jsonpickle to convert this object to JSON."""
s = self.to_json_str()
d = json.loads(s) # convert str to dict
return d
@classmethod
def from_json(cls, d):
"""Use jsonpickle to convert JSON into an object."""
s = json.dumps(d)
obj = cls.from_json_str(s)
return obj
def to_file(self, path):
with open(path, 'w') as f:
json.dump(self.to_json(), f)
@classmethod
def from_file(self, path):
with open(path, 'r') as f:
d = json.load(f)
return JSONPicklable.from_json(d)
class InitPicklable(object):
def __new__(cls, *args, **kwargs):
obj = super(InitPicklable, cls).__new__(cls)
obj.__initargs = args, kwargs
return obj
def __getstate__(self):
return self.__initargs
def __setstate__(self, state):
args, kwargs = state
self.__init__(*args, **kwargs)
def sub_dirs(root_dir):
"""Return a list of all sub-directory paths.
Example:
>> root_dir = '/Users/Kelvin/data'
>> sub_dirs(root_dir)
['/Users/Kelvin/data/a', '/Users/Kelvin/data/b']
"""
dir_paths = []
for path in os.listdir(root_dir):
full_path = join(root_dir, path)
if os.path.isdir(full_path):
dir_paths.append(full_path)
return dir_paths
class IntegerDirectories(Mapping):
"""Keep track of directories with names of the form "{integer}_{something}" or just "{integer}".
Used for organizing experiment directories.
"""
def __init__(self, root_dir):
self.root_dir = root_dir
makedirs(root_dir)
@property
def _ints_to_paths(self):
ints_to_paths = {}
for p in sub_dirs(self.root_dir):
name = os.path.basename(p)
try:
i = int(name.split('_')[0])
if i in ints_to_paths:
raise IOError("Multiple directories with the same integer prefix: {} and {}".format(
ints_to_paths[i], p))
ints_to_paths[i] = p
except ValueError:
# the first element was not an integer
pass
# put into an ordered dict
ordered = OrderedDict()
for i in sorted(ints_to_paths):
ordered[i] = ints_to_paths[i]
return ordered
def __len__(self):
return len(self._ints_to_paths)
@property
def largest_int(self):
"""Largest int among the integer directories."""
if len(self._ints_to_paths) == 0:
return None
return max(self._ints_to_paths)
def new_dir(self, name=None):
"""Create a new directory and return its path."""
if self.largest_int is None:
idx = 0
else:
idx = self.largest_int + 1
path = join(self.root_dir, str(idx))
if name:
path = '{}_{}'.format(path, name) # add name as suffix
makedirs(path)
return path
def __getitem__(self, i):
"""Get the path to experiment i.
Raises:
KeyError, if experiment folder does not exist.
"""
if i not in self._ints_to_paths:
raise KeyError("Experiment #{} not found".format(i))
return self._ints_to_paths[i]
def __iter__(self):
return iter(self._ints_to_paths)
def rsync(src_path, dest_path, src_host=None, dest_host=None, delete=False):
"""Sync a file/directory from one machine to another machine.
Args:
src_path (str): a file or directory on the source machine.
dest_path (str): the corresponding file or directory on the target machine.
src_host (str): the address of the source machine. Default is local machine.
dest_host (str): the address of the target machine. Default is local machine.
delete (bool): default is False. If True, deletes any extraneous files at the destination not
present at the source!
Options used:
-r: recurse into directories
-l: copy symlinks as symlinks
-v: verbose
-z: compress files during transfer
-t: preserve times (needed for rsync to recognize that files haven't changed since last update!)
--delete: delete any extraneous files at the destination
--progress: show progress
"""
if os.path.isdir(src_path):
if src_path[:-1] != '/':
src_path += '/' # add missing trailing slash
def format_address(host, path):
if host is None:
return path
else:
return '{}:{}'.format(host, path)
cmds = ["rsync", "-rlvzt", "--progress"]
if delete:
cmds.append('--delete')
cmds.append(format_address(src_host, src_path))
cmds.append(format_address(dest_host, dest_path))
cmd = ' '.join(cmds)
local(cmd)
class Tmux(object):
def __init__(self, name, cwd=None):
"""Create a tmux session.
Args:
name (str): name of the new session
cwd (str): initial directory of the session
Options used:
-d: do not attach to the new session
-s: specify a name for the session
"""
self.name = name
with settings(hide('warnings'), warn_only=True):
result = local("tmux new -d -s {}".format(name)) # start tmux session
if result.failed:
raise TmuxSessionExists()
if cwd is None:
cwd = os.getcwd()
# move to current directory
self.run("cd {}".format(cwd))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def run(self, command):
"""Run command in tmux session.
Assume that the session has only one window.
Args:
command (str)
"""
local('tmux send -t {} "{}" Enter'.format(self.name, command))
def close(self):
local("tmux kill-session -t {}".format(self.name))
class TmuxSessionExists(Exception):
pass
def tunnel(local_port, host, target, target_port, tmux_name, autossh_port=20000):
"""Make a port on a target machine appear as if it is a port on our local machine.
Uses autossh to keep the tunnel open even with interruptions.
Runs autossh in a new tmux session, so that it can be monitored.
Args:
local_port (int): a port on this machine, e.g. 18888
host (str): the machine that will be used to create the SSH tunnel, e.g. `[email protected]` or just `jamie`
if we have that alias configured in ~/.ssh/config.
target (str): the address of the target machine, e.g. `[email protected]` or just `john11`. The address
should be RELATIVE to the host machine.
target_port (int): port on the target machine, e.g. 8888
tmux_name (str): name of the tmux session that will be running the autossh command.
autossh_port (int): local port used by autossh to monitor the connection. Cannot be used by more than one
autossh process at a time!
"""
command = "autossh -M {} -N -n -T -L {}:{}:{} {}".format(autossh_port, local_port, target, target_port, host)
tmux = Tmux(tmux_name)
tmux.run(command)
class Workspace(object):
"""Manage paths underneath a top-level root directory.
Paths are registered with this Workspace. An IOError is thrown if the path has already been registered before.
"""
def __init__(self, root):
"""Create a Workspace.
Args:
root (str): absolute path of the top-level directory.
"""
self._root = root
makedirs(root)
self._paths = set()
@property
def root(self):
return self._root
def _add(self, name, relative_path):
"""Register a path.
Args:
name (str): short name to reference the path
relative_path (str): a relative path, relative to the workspace root.
Returns:
self
"""
full_path = join(self._root, relative_path)
if hasattr(self, name):
raise IOError('Name already registered: {}'.format(name))
if full_path in self._paths:
raise IOError('Path already registered: {}'.format(relative_path))
setattr(self, name, full_path)
def add_dir(self, name, relative_path):
self._add(name, relative_path)
makedirs(getattr(self, name))
def add_file(self, name, relative_path):
self._add(name, relative_path)
|
ContextualSP/lemon/executor/gtd/io.py/0
|
{
"file_path": "ContextualSP/lemon/executor/gtd/io.py",
"repo_id": "ContextualSP",
"token_count": 7652
}
| 236 |
from abc import ABCMeta, abstractmethod
from collections import Sequence
import logging
import os
import random
from dependency.data_directory import DataDirectory
from gtd.utils import random_seed
class Dataset(Sequence, metaclass=ABCMeta):
"""Encapsulates an entire dataset or fetches the data if necessary."""
def __init__(self):
self._examples = []
def __getitem__(self, i):
return self._examples[i]
def __len__(self):
return len(self._examples)
class DatasetFromFile(Dataset):
"""Dataset that is loaded from a file.
An ExampleFactory is used to read the file and yield Examples.
"""
# TODO: Write this to a FileSequence
def __init__(self, filenames, filename_to_examples, relative_path=True, shuffle=True):
"""Construct the dataset based on the data in the files.
Args:
filenames (unicode or list[unicode]): names of the files
filename_to_examples: a callable that takes a filename
and yields Examples
relative_path: whether to resolve the filename on DataDirectory.root
"""
self._examples = []
if isinstance(filenames, str):
filenames = [filenames]
for filename in filenames:
if relative_path:
filename = os.path.join(DataDirectory.root, filename)
self._examples.extend(filename_to_examples(filename))
if shuffle:
with random_seed(42):
random.shuffle(self._examples)
logging.info('Read {} examples ({}) from {}'.format(
len(self._examples), 'shuffled' if shuffle else 'not shuffled', filenames))
|
ContextualSP/lemon/executor/strongsup/dataset.py/0
|
{
"file_path": "ContextualSP/lemon/executor/strongsup/dataset.py",
"repo_id": "ContextualSP",
"token_count": 664
}
| 237 |
import operator
import os
from gtd.utils import EqualityMixin
from functools import reduce
class ExperimentType(EqualityMixin):
"""Defines the configs for an experiment
Args:
configs (list[string]): the config mixins
base (string): the base config e.g. "default-base"
"""
@classmethod
def parse_configs(cls, configs):
"""Creates a new ExperimentType object from list of configs of the
form configs/rlong/dataset-mixins/something.txt
Args:
configs (list[string]): the configs
Returns:
ExperimentType
string: the dataset
int: the seed
"""
base = base_filename(configs[0])
confs = []
seed = None
for config in configs[1:]:
if config.find("dataset-mixins") != -1:
dataset = base_filename(config)
elif config.find("seed-mixins") != -1:
seed = int(base_filename(config).replace("seed=", ""))
else:
confs.append(base_filename(config))
confs.sort()
# Default configs
experiment_type = cls(confs, base)
# Default seed
if seed is None:
seed = 0
return experiment_type, dataset, seed
def __init__(self, configs, base):
self._configs = configs
self._base = base
@property
def configs(self):
return self._configs
@property
def base(self):
return self._base
def __str__(self):
configs = '-'.join(self.configs)
if configs == "":
configs = self._base
return "{}".format(configs)
__repr__ = __str__
def __hash__(self):
return hash((tuple(self.configs), self.base))
class Entry(object):
"""A single entry in the Table. Contains results for all seeds of the
same ExperimentType
Args:
experiment_type (ExperimentType): the experiment type
"""
def __init__(self, experiment_type):
self._experiment_type = experiment_type
self._results = {} # seed -> result value
def add_seed(self, seed, result_value):
"""Adds a result value associated with this seed
Args:
seed (int)
result_value (ResultValue)
"""
if seed in self._results:
raise ValueError("Seed {} already in Entry {}".format(seed, self))
self._results[seed] = result_value
def update_seed(self, seed, result_value):
"""Updates the result value associated with this seed
Args:
seed (int)
result_value (ResultValue)
"""
self._results[seed] = result_value
def delete_seed(self, seed):
"""Deletes value associated with this seed.
Args:
seed (int)
"""
self._results.pop(seed, None)
def contains_seed(self, seed):
"""Returns True if there's a value already associated with this seed.
Args:
seed (int)
Returns:
bool
"""
return seed in self._results
def __eq__(self, other):
return self._experiment_type == other._experiment_type and \
self._results == other._results
@property
def seeds(self):
return list(self._results.keys())
@property
def experiment_type(self):
return self._experiment_type
@property
def name(self):
return str(self._experiment_type)
def get_value(self, seed):
"""Returns the ResultValue associated with this seed."""
return self._results[seed]
@property
def best(self):
"""Returns the seed and ResultValue achieving highest
result value
Returns:
seed (int)
ResultValue
"""
return max(iter(self._results.items()), key=operator.itemgetter(1))
@property
def avg(self):
"""Returns the ResultValue of the average over all seeds
Returns:
ResultValue
"""
return reduce(
operator.add, list(self._results.values())) / len(self._results)
@property
def var(self):
"""Returns the ResultValue of the var over all seeds
Returns:
ResultValue
"""
return reduce(operator.add, ((value - self.avg).squared()
for value in list(self._results.values()))) / len(self._results)
def __str__(self):
return "Entry({}: {})".format(self._experiment_type, self._results)
__repr__ = __str__
def base_filename(path):
"""Returns the filename without the extension from the path"""
return os.path.splitext(os.path.basename(path))[0]
|
ContextualSP/lemon/executor/strongsup/results/entry.py/0
|
{
"file_path": "ContextualSP/lemon/executor/strongsup/results/entry.py",
"repo_id": "ContextualSP",
"token_count": 2047
}
| 238 |
from strongsup.world import World
from strongsup.rlong.executor import RLongExecutor
from strongsup.rlong.predicates_computer import get_predicates_computer
from strongsup.rlong.state import RLongState
class RLongWorld(World):
"""World for Alchemy, Scene, and Tangrams domains."""
def __init__(self, initial_state):
"""Create a new RLongWorld.
Args:
initial_state (RLongState)
"""
assert isinstance(initial_state, RLongState)
self._initial_state = initial_state
self._executor = RLongExecutor(initial_state)
@property
def initial_state(self):
"""Return a RLongState object."""
return self._initial_state
def __repr__(self):
return '{}({})'.format(type(self).__name__, self.initial_state)
@property
def executor(self):
return self._executor
@property
def predicates_computer(self):
return self._PREDICATES_COMPUTER
def dump_human_readable(self, fout):
self.initial_state.dump_human_readable(fout)
class RLongAlchemyWorld(RLongWorld):
_PREDICATES_COMPUTER = get_predicates_computer('alchemy')
class RLongSceneWorld(RLongWorld):
_PREDICATES_COMPUTER = get_predicates_computer('scene')
class RLongTangramsWorld(RLongWorld):
_PREDICATES_COMPUTER = get_predicates_computer('tangrams')
class RLongUndogramsWorld(RLongWorld):
_PREDICATES_COMPUTER = get_predicates_computer('undograms')
|
ContextualSP/lemon/executor/strongsup/rlong/world.py/0
|
{
"file_path": "ContextualSP/lemon/executor/strongsup/rlong/world.py",
"repo_id": "ContextualSP",
"token_count": 554
}
| 239 |
# import pytest
import sys
sys.path.append('../../../')
from strongsup.rlong.executor import RLongExecutor
from strongsup.rlong.predicate import RLongPredicate
from strongsup.rlong.state import \
RLongAlchemyState, RLongSceneState, RLongTangramsState, RLongUndogramsState
class RLongExecutorTester(object):
def prepare_state(self, state):
if not isinstance(state, self.STATE_CLASS):
state = self.STATE_CLASS.from_raw_string(state)
return state
def prepare_lf(self, lf):
if isinstance(lf, str):
lf = lf.split()
if not all(isinstance(x, RLongPredicate) for x in lf):
lf = [x if isinstance(x, RLongPredicate) else RLongPredicate(x)
for x in lf]
return lf
def assert_good(self, initial_state, lf, final_state):
initial_state = self.prepare_state(initial_state)
final_state = self.prepare_state(final_state)
# executor = RLongExecutor(initial_state, debug=True)
executor = RLongExecutor(initial_state, debug=False)
lf = self.prepare_lf(lf)
# print(('=' * 10, lf, '=' * 10))
# Direct execution
denotation = executor.execute(lf)
assert denotation.world_state == final_state
# Token-by-token execution
denotation = None
for x in lf:
denotation = executor.execute_predicate(x, denotation)
assert denotation.world_state == final_state
def assert_bad(self, initial_state, lf):
initial_state = self.prepare_state(initial_state)
# executor = RLongExecutor(initial_state, debug=True)
executor = RLongExecutor(initial_state, debug=False)
lf = self.prepare_lf(lf)
# print(('=' * 10, lf, '=' * 10))
# Direct execution
try:
denotation = executor.execute(lf)
assert False, 'No error: denotation = {}'.format(denotation)
except:
pass
# Token-by-token execution
denotation = None
for x in lf:
try:
denotation = executor.execute_predicate(x, denotation)
except Exception as e:
denotation = e
assert isinstance(denotation, Exception)
################################
class TestAlchemyExecutor(RLongExecutorTester):
STATE_CLASS = RLongAlchemyState
def test_simple(self):
# 1:ggg 2:_ 3:_ 4:_ 5:o 6:ooo 7:gggg throw out two units of first beaker 1:g 2:_ 3:_ 4:_ 5:o 6:ooo 7:gggg throw out fifth beaker 1:g 2:_ 3:_ 4:_ 5:_ 6 :ooo 7:gggg throw out first one 1:_ 2:_ 3:_ 4:_ 5:_ 6:ooo 7:gggg throw out orange beaker 1:_ 2:_ 3:_ 4:_ 5:_ 6:_ 7:gggg throw out one unit of green 1:_ 2:_ 3:_ 4:_ 5:_ 6:_ 7:ggg
self.assert_good(
'1:ggg 2:_ 3:_ 4:_ 5:o 6:ooo 7:gggg',
'all-objects 1 index 2 ADrain',
'1:g 2:_ 3:_ 4:_ 5:o 6:ooo 7:gggg')
self.assert_good(
'1:ggg 2:_ 3:_ 4:_ 5:o 6:ooo 7:gggg',
'g PColor 1 index 2 ADrain',
'1:g 2:_ 3:_ 4:_ 5:o 6:ooo 7:gggg')
self.assert_good(
'1:ggg 2:_ 3:_ 4:r 5:o 6:ooo 7:gggg',
'r PColor 1 ADrain',
'1:ggg 2:_ 3:_ 4:_ 5:o 6:ooo 7:gggg')
self.assert_good(
'1:ggg 2:_ 3:_ 4:r 5:o 6:ooo 7:gggg',
'all-objects 5 index r PColor APour',
'1:ggg 2:_ 3:_ 4:ro 5:_ 6:ooo 7:gggg')
self.assert_good(
'1:ggg 2:_ 3:_ 4:r 5:o 6:ooo 7:gggg',
'all-objects -1 index X1/2 ADrain',
'1:ggg 2:_ 3:_ 4:r 5:o 6:ooo 7:gg')
self.assert_good(
'1:ggg 2:_ 3:_ 4:r 5:o 6:ooo 7:gggg',
'all-objects -1 index X1/2 ADrain all-objects -1 index 1 ADrain',
'1:ggg 2:_ 3:_ 4:r 5:o 6:ooo 7:g')
self.assert_good(
'1:ggg 2:_ 3:_ 4:r 5:o 6:ooo 7:gggg',
'all-objects -1 index X1/2 ADrain -1 H1 1 ADrain',
'1:ggg 2:_ 3:_ 4:r 5:o 6:ooo 7:g')
self.assert_good(
'1:ggg 2:_ 3:_ 4:r 5:o 6:ooo 7:gggg',
'all-objects -1 index X1/2 ADrain -1 H1 1 H2 -1 H0',
'1:ggg 2:_ 3:_ 4:r 5:o 6:ooo 7:_')
self.assert_good(
'1:ggg 2:_ 3:_ 4:r 5:o 6:ooo 7:gggg',
'all-objects -1 index X1/2 ADrain -1 H1 -1 H2 -1 H0',
'1:ggg 2:_ 3:_ 4:r 5:o 6:ooo 7:_')
class TestSceneExecutor(RLongExecutorTester):
STATE_CLASS = RLongSceneState
def test_simple(self):
# train-1100 1:__ 2:__ 3:__ 4:__ 5:__ 6:__ 7:go 8:__ 9:__ 10:yo a man in a green shirt and an orange hat stands near the middle and a man in a yellow shirt and an orange hat stands on the far right 1:r_ 2:__ 3:__ 4:__ 5:__ 6:__ 7:go 8:__ 9:__ 10:yo a man in a red shirt and no hat enters and stands on the far left 1:r_ 2:__ 3:__ 4:__ 5:__ 6:__ 7:go 8:__ 9:y_ 10:yo a man in a yellow shirt and no hat joins and stands next to the man in the yellow shirt and orange hat 1:r_ 2:__ 3:__ 4:__ 5:__ 6:go 7:__ 8:__ 9:y_ 10:yo the man in the yellow shirt and orange hat moves next to the man in the green shirt and orange hat, he stands on the right 1:r_ 2:__ 3:__ 4:__ 5:__ 6:go 7:yo 8:__ 9:y_ 10:__ a man in a green shirt and no hat joins and stands next to the man in the green shirt and orange hat 1:r_ 2:__ 3:__ 4:__ 5:g_ 6:go 7:yo 8:__ 9:y_ 10:__
# Well the sentences were really wrong ...
self.assert_good(
'1:__ 2:__ 3:__ 4:__ 5:__ 6:__ 7:go 8:__ 9:__ 10:yo',
'1 r e ACreate',
'1:r_ 2:__ 3:__ 4:__ 5:__ 6:__ 7:go 8:__ 9:__ 10:yo')
self.assert_bad(
'1:__ 2:__ 3:__ 4:__ 5:__ 6:__ 7:go 8:__ 9:__ 10:yo',
'7 r e ACreate')
self.assert_good(
'1:__ 2:__ 3:__ 4:__ 5:__ 6:__ 7:go 8:__ 9:__ 10:yo',
'1 r e ACreate 9 y e ACreate',
'1:r_ 2:__ 3:__ 4:__ 5:__ 6:__ 7:go 8:__ 9:y_ 10:yo')
self.assert_good(
'1:__ 2:__ 3:__ 4:__ 5:__ 6:__ 7:go 8:__ 9:__ 10:yo',
'1 r e ACreate y o DShirtHat PLeft y e ACreate',
'1:r_ 2:__ 3:__ 4:__ 5:__ 6:__ 7:go 8:__ 9:y_ 10:yo')
self.assert_good(
'1:__ 2:__ 3:__ 4:__ 5:__ 6:__ 7:go 8:__ 9:__ 10:yo',
'1 r e ACreate y o DShirtHat PLeft y e ACreate ' +
'g PShirt g PShirt PLeft AMove',
'1:r_ 2:__ 3:__ 4:__ 5:__ 6:go 7:__ 8:__ 9:y_ 10:yo')
self.assert_good(
'1:__ 2:__ 3:__ 4:__ 5:__ 6:__ 7:go 8:__ 9:__ 10:yo',
'1 r e ACreate y o DShirtHat PLeft y e ACreate ' +
'g PShirt g PShirt PLeft AMove ' +
'all-objects 2 index ALeave',
'1:r_ 2:__ 3:__ 4:__ 5:__ 6:__ 7:__ 8:__ 9:y_ 10:yo')
# train-1101 1:bo 2:__ 3:__ 4:br 5:__ 6:__ 7:__ 8:__ 9:__ 10:__ the person in an orange hat moves to the left of the person in a red hat 1:__ 2:__ 3:bo 4:br 5:__ 6:__ 7:__ 8:__ 9:__ 10:__ he then disappears 1:__ 2:__ 3:__ 4:br 5:__ 6:__ 7:__ 8:__ 9:__ 10:__ then a person in orange appears on the far right 1:__ 2:__ 3:__ 4:br 5:__ 6:__ 7:__ 8:__ 9:__ 10:o_ he then disappears 1:__ 2:__ 3:__ 4:br 5:__ 6:__ 7:__ 8:__ 9:__ 10:__ the person in blue in a red hat moves to the far left 1:br 2:__ 3:__ 4:__ 5:__ 6:__ 7:__ 8:__ 9:__ 10:__
self.assert_good(
'1:bo 2:__ 3:__ 4:br 5:__ 6:__ 7:__ 8:__ 9:__ 10:__',
'o PHat r PHat PLeft AMove',
'1:__ 2:__ 3:bo 4:br 5:__ 6:__ 7:__ 8:__ 9:__ 10:__')
self.assert_good(
'1:bo 2:__ 3:__ 4:br 5:__ 6:__ 7:__ 8:__ 9:__ 10:__',
'o PHat r PHat PLeft AMove -1 H1 ALeave',
'1:__ 2:__ 3:__ 4:br 5:__ 6:__ 7:__ 8:__ 9:__ 10:__')
self.assert_good(
'1:bo 2:__ 3:__ 4:br 5:__ 6:__ 7:__ 8:__ 9:__ 10:__',
'o PHat r PHat PLeft AMove -1 H1 ALeave -1 o e ACreate',
'1:__ 2:__ 3:__ 4:br 5:__ 6:__ 7:__ 8:__ 9:__ 10:o_')
self.assert_good(
'1:bo 2:__ 3:__ 4:br 5:__ 6:__ 7:__ 8:__ 9:__ 10:__',
'o PHat r PHat PLeft AMove -1 H1 ALeave -1 o e ACreate ' +
'-1 H1 ALeave',
'1:__ 2:__ 3:__ 4:br 5:__ 6:__ 7:__ 8:__ 9:__ 10:__')
self.assert_good(
'1:bo 2:__ 3:__ 4:br 5:__ 6:__ 7:__ 8:__ 9:__ 10:__',
'o PHat r PHat PLeft AMove -1 H1 ALeave -1 o e ACreate ' +
'-1 H1 ALeave b r DShirtHat 1 AMove',
'1:br 2:__ 3:__ 4:__ 5:__ 6:__ 7:__ 8:__ 9:__ 10:__')
class TestTangramsExecutor(RLongExecutorTester):
STATE_CLASS = RLongTangramsState
def test_simple(self):
# train-437 1:2 2:1 3:4 4:0 5:3 delete the second object from the left 1:2 2:4 3:0 4:3 delete the leftmost object 1:4 2:0 3:3 swap the leftmost and the rightmost objects 1:3 2:0 3:4 swap them again 1:4 2:0 3:3 add back the object we removed on step 1 1:1 2:4 3:0 4:3
self.assert_good(
'1:2 2:1 3:4 4:0 5:3',
'all-objects 2 index ARemove',
'1:2 2:4 3:0 4:3')
self.assert_good(
'1:2 2:1 3:4 4:0 5:3',
'all-objects 2 index ARemove all-objects 1 index ARemove',
'1:4 2:0 3:3')
self.assert_good(
'1:2 2:1 3:4 4:0 5:3',
'all-objects 2 index ARemove all-objects 1 index ARemove ' +
'all-objects 1 index all-objects -1 index ASwap',
'1:3 2:0 3:4')
self.assert_good(
'1:2 2:1 3:4 4:0 5:3',
'all-objects 2 index ARemove all-objects 1 index ARemove ' +
'all-objects 1 index all-objects -1 index ASwap ' +
'-1 H1 -1 H2 ASwap',
'1:4 2:0 3:3')
self.assert_good(
'1:2 2:1 3:4 4:0 5:3',
'all-objects 2 index ARemove all-objects 1 index ARemove ' +
'all-objects 1 index all-objects -1 index ASwap ' +
'-1 H1 -1 H2 ASwap 1 1 H1 AAdd',
'1:1 2:4 3:0 4:3')
# train-438 1:0 2:2 3:4 4:3 5:1 swap the second and third figures 1:0 2:4 3:2 4:3 5:1 remove the second figure 1:0 2:2 3:3 4:1 swap the second and third figures 1:0 2:3 3:2 4:1 remove the third figure 1:0 2:3 3:1 add back the figure removed in step 2, and place in the third space 1:0 2:3 3:4 4:1
self.assert_good(
'1:0 2:2 3:4 4:3 5:1',
'all-objects 2 index all-objects 3 index ASwap',
'1:0 2:4 3:2 4:3 5:1')
self.assert_good(
'1:0 2:2 3:4 4:3 5:1',
'all-objects 2 index all-objects 3 index ASwap ' +
'all-objects 2 index ARemove',
'1:0 2:2 3:3 4:1')
self.assert_good(
'1:0 2:2 3:4 4:3 5:1',
'all-objects 2 index all-objects 3 index ASwap ' +
'all-objects 2 index ARemove ' +
'all-objects 2 index all-objects 3 index ASwap ' +
'all-objects 3 index ARemove ' +
'3 2 H1 AAdd',
'1:0 2:3 3:4 4:1')
class TestUndogramsExecutor(RLongExecutorTester):
STATE_CLASS = RLongUndogramsState
def test_simple(self):
# train-437 1:2 2:1 3:4 4:0 5:3 delete the second object from the left 1:2 2:4 3:0 4:3 delete the leftmost object 1:4 2:0 3:3 swap the leftmost and the rightmost objects 1:3 2:0 3:4 swap them again 1:4 2:0 3:3 add back the object we removed on step 1 1:1 2:4 3:0 4:3
self.assert_good(
'1:2 2:1 3:4 4:0 5:3',
'all-objects 2 index ARemove',
'1:2 2:4 3:0 4:3')
self.assert_good(
'1:2 2:1 3:4 4:0 5:3',
'all-objects 2 index ARemove all-objects 1 index ARemove',
'1:4 2:0 3:3')
self.assert_good(
'1:2 2:1 3:4 4:0 5:3',
'all-objects 2 index ARemove all-objects 1 index ARemove ' +
'all-objects 1 index all-objects -1 index ASwap',
'1:3 2:0 3:4')
self.assert_good(
'1:2 2:1 3:4 4:0 5:3',
'all-objects 2 index ARemove all-objects 1 index ARemove ' +
'all-objects 1 index all-objects -1 index ASwap ' +
'-1 H1 -1 H2 ASwap',
'1:4 2:0 3:3')
self.assert_good(
'1:2 2:1 3:4 4:0 5:3',
'all-objects 2 index ARemove all-objects 1 index ARemove ' +
'all-objects 1 index all-objects -1 index ASwap ' +
'-1 H1 -1 H2 ASwap 1 1 H2 AAdd',
'1:1 2:4 3:0 4:3')
# train-438 1:0 2:2 3:4 4:3 5:1 swap the second and third figures 1:0 2:4 3:2 4:3 5:1 remove the second figure 1:0 2:2 3:3 4:1 swap the second and third figures 1:0 2:3 3:2 4:1 remove the third figure 1:0 2:3 3:1 add back the figure removed in step 2, and place in the third space 1:0 2:3 3:4 4:1
self.assert_good(
'1:0 2:2 3:4 4:3 5:1',
'all-objects 2 index all-objects 3 index ASwap',
'1:0 2:4 3:2 4:3 5:1')
self.assert_good(
'1:0 2:2 3:4 4:3 5:1',
'all-objects 2 index all-objects 3 index ASwap ' +
'all-objects 2 index ARemove',
'1:0 2:2 3:3 4:1')
self.assert_good(
'1:0 2:2 3:4 4:3 5:1',
'all-objects 2 index all-objects 3 index ASwap ' +
'all-objects 2 index ARemove ' +
'all-objects 2 index all-objects 3 index ASwap ' +
'all-objects 3 index ARemove ' +
'3 2 H2 AAdd',
'1:0 2:3 3:4 4:1')
def test_undo(self):
# train-440 1:2 2:1 3:3 4:0 5:4 delete the rightmost figure 1:2 2:1 3:3 4:0 undo step 1 1:2 2:1 3:3 4:0 5:4 delete the 1st figure 1:1 2:3 3:0 4:4 swap the 1st and 3rd figure 1:0 2:3 3:1 4:4 undo step 4 1:1 2:3 3:0 4:4
self.assert_good(
'1:2 2:1 3:3 4:0 5:4',
'all-objects -1 index ARemove',
'1:2 2:1 3:3 4:0')
self.assert_good(
'1:2 2:1 3:3 4:0 5:4',
'all-objects -1 index ARemove ' +
'1 H1 1 H2 1 HUndo',
'1:2 2:1 3:3 4:0 5:4')
self.assert_good(
'1:2 2:1 3:3 4:0 5:4',
'all-objects -1 index ARemove ' +
'1 H1 1 H2 1 HUndo ' +
'all-objects 1 index ARemove ' +
'all-objects 1 index all-objects 3 index ASwap ' +
'4 H1 4 H2 4 HUndo',
'1:1 2:3 3:0 4:4')
if __name__ == '__main__':
# tester = TestAlchemyExecutor()
# tester.test_simple()
# tester = TestSceneExecutor()
# tester.test_simple()
# tester = TestTangramsExecutor()
# tester.test_simple()
tester = TestUndogramsExecutor()
# tester.test_simple()
tester.test_undo()
|
ContextualSP/lemon/executor/strongsup/tests/rlong/test_executor.py/0
|
{
"file_path": "ContextualSP/lemon/executor/strongsup/tests/rlong/test_executor.py",
"repo_id": "ContextualSP",
"token_count": 8292
}
| 240 |
# Value interface
from abc import ABCMeta, abstractmethod
class Value(object, metaclass=ABCMeta):
"""A value represents an item in either a denotation (gold or predicted)"""
@abstractmethod
def match(self, other):
"""Return True if the value matches the other value based on the
official criteria.
Args:
other (Value)
Returns:
a boolean
"""
pass
def train_match(self, other):
"""Return a boolean of whether self and other are considered
equal at train time. This can be used to encourage the model to
predict values with the right type.
The default is to use match.
Args:
other: Value
"""
return self.match(other)
def check_denotation(target_values, predicted_values):
"""Return True if the predicted denotation is correct.
Args:
target_values (list[Value] or set[Value])
predicted_values (list[Value] or set[Value])
Returns:
bool
"""
if isinstance(predicted_values, Exception):
# the executor can return Exceptions as the denotation, if the logical form does not make sense
return False
# Check size
if len(target_values) != len(predicted_values):
return False
# Check items
for target in target_values:
if not any(target.match(pred) for pred in predicted_values):
return False
return True
|
ContextualSP/lemon/executor/strongsup/value.py/0
|
{
"file_path": "ContextualSP/lemon/executor/strongsup/value.py",
"repo_id": "ContextualSP",
"token_count": 556
}
| 241 |
# AI2 Reasoning Challenge
* [evaluator](evaluator/) is the program used by the AI2 Leaderboard to evaluate submitted predictions.
* [data-easy](data-easy/) and [data-challege](data-challenge/) have the files (and scripts to generate them) used for evaluating Leaderboard predictions.
## Example usage
To evaluate dummy predictions (every question is predicted to be `A`) against the easy dataset, run this:
```
% python3 evaluator/evaluator.py -qa data-easy/question-answers.jsonl -p data-easy/dummy-predictions.csv -o metrics.json
% cat metrics.json
{"accuracy": 0.2398989898989899}
```
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/arc/README.md/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/arc/README.md",
"repo_id": "ContextualSP",
"token_count": 180
}
| 242 |
#!/bin/bash
set -xe
docker build -t aristo-leaderboard-eval-test .
T=$(mktemp -d /tmp/tmp-XXXXX)
docker run \
-v $T:/output:rw \
-v $PWD:/input:ro \
aristo-leaderboard-eval-test \
./evaluator.py \
--question-answers /input/questions.jsonl \
--predictions /input/predictions.csv \
--output /output/metrics.json
if [ "$(cat $T/metrics.json)" != '{"accuracy": 0.85}' ]; then
echo File $T/metrics.json looks wrong.
exit 1
fi
echo File $T/metrics.json looks okay.
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/arc/evaluator/test.sh/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/arc/evaluator/test.sh",
"repo_id": "ContextualSP",
"token_count": 200
}
| 243 |
#!/bin/bash
set -e
echo ------------------------
echo Building evaluator image
echo ------------------------
echo
set -x
docker build -t eqasc-evaluator .
set +x
echo
echo ------------------------
echo Running evaluator on known predictions and labels
echo ------------------------
echo
tempdir=$(mktemp -d /tmp/temp.XXXX)
set -x
docker run \
-v $PWD/predictions:/predictions:ro \
-v $PWD/../data:/labels:ro \
-v $tempdir:/output:rw \
--entrypoint python \
eqasc-evaluator \
allennlp_reasoning_explainqa/evaluator/evaluator.py \
/predictions/grc.test.predict \
/labels/chainid_to_label_test.json \
/output/metrics.json
set +x
echo
echo
echo ------------------------
echo Comparing metrics.json to expected scores
echo ------------------------
echo
echo -n '{"auc_roc": 0.8457533894216488, "explainP1": 0.5387978142076503, "explainNDCG": 0.6376201537170901}' > $tempdir/metrics.json-expected
echo "Expected metrics:"
echo
cat $tempdir/metrics.json-expected
echo
echo
echo "Actual metrics:"
echo
cat $tempdir/metrics.json
echo
echo
echo Diff:
echo
diff -u $tempdir/metrics.json $tempdir/metrics.json-expected
echo "👍 No difference detected. The calculated metrics match the expected ones!"
echo
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/eqasc/code/test-with-docker.sh/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/eqasc/code/test-with-docker.sh",
"repo_id": "ContextualSP",
"token_count": 417
}
| 244 |
# Locations
NO_LOCATION = 'null' # This location is used of a participant that doesn't exist (was destroyed, or not yet created)
LOCATION_UNKNOWN = 'unk'
# Actions
NO_ACTION = 'NONE'
MOVE = 'MOVE'
CREATE = 'CREATE'
DESTROY = 'DESTROY'
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/process/constants.py/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/process/constants.py",
"repo_id": "ContextualSP",
"token_count": 83
}
| 245 |
## Test case: Prediction and answer are same
* answers.tsv is the answer to process 1167 from the training set.
* predictions.tsv is a copy of the answer to process 1167.
An evaluation on this prediction should result in an F1 score of 1.0.
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/testfiles-2/README.md/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/testfiles-2/README.md",
"repo_id": "ContextualSP",
"token_count": 65
}
| 246 |
#!/usr/bin/env python3
import csv
from typing import *
import logging
import sys
import json
EXIT_STATUS_ANSWERS_MALFORMED = 1
EXIT_STATUS_PREDICTIONS_MALFORMED = 2
EXIT_STATUS_PREDICTIONS_EXTRA = 3
EXIT_STATUS_PREDICTION_MISSING = 4
VALID_PREDICTION_VALUES = ['E', 'N']
def calculate_accuracy(answers: Dict[str, str], predictions: Dict[str, str]) -> float:
score = 0.0
for entailment_pair_id, answer in answers.items():
try:
predictions_for_q = predictions[entailment_pair_id]
except KeyError:
logging.error("Missing prediction for entailment pair '%s'.", entailment_pair_id)
sys.exit(EXIT_STATUS_PREDICTION_MISSING)
if answer in predictions_for_q:
score += 1
del predictions[entailment_pair_id]
if len(predictions) > 0:
logging.error("Found %d extra predictions, for example: %s", len(predictions),
", ".join(list(predictions.keys())[:3]))
sys.exit(EXIT_STATUS_PREDICTIONS_EXTRA)
return score / len(answers)
def read_answers(filename: str) -> Dict[str, str]:
answers = {} # type: Dict[str, str]
with open(filename, "rt", encoding="UTF-8", errors="replace") as f:
for line in f:
line = line.strip()
try:
record = json.loads(line)
except ValueError as e:
logging.error("Error while reading file %s: %s", filename, e)
sys.exit(EXIT_STATUS_ANSWERS_MALFORMED)
entailment_pair_id = record["id"]
answer = record["gold_label"]
if entailment_pair_id in answers:
logging.error("Key %s repeated in %s", entailment_pair_id, filename)
sys.exit(EXIT_STATUS_ANSWERS_MALFORMED)
answers[entailment_pair_id] = answer
if len(answers) == 0:
logging.error("No answers found in file %s", filename)
sys.exit(EXIT_STATUS_ANSWERS_MALFORMED)
return answers
def read_predictions(filename: str) -> Dict[str, str]:
predictions = {} # type: Dict[str, str]
with open(filename, "rt", encoding="UTF-8", errors="replace") as f:
reader = csv.reader(f)
try:
for row in reader:
try:
entailment_pair_id = row[0]
prediction = row[1]
except IndexError as e:
logging.error("Error reading value from CSV file %s on line %d: %s", filename, reader.line_num, e)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
if entailment_pair_id in predictions:
logging.error("Key %s repeated in file %s on line %d", entailment_pair_id, filename,
reader.line_num)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
if entailment_pair_id == "":
logging.error("Key is empty in file %s on line %d", filename, reader.line_num)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
# prediction cannot be empty string
if prediction == "":
logging.error("Key %s has empty string for prediction in file %s on line %d",
entailment_pair_id, filename, reader.line_num)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
# predictions must be part of the controlled vocabulary
if prediction not in VALID_PREDICTION_VALUES:
logging.error("Key %s has invalid prediction in file %s on line %d",
entailment_pair_id, filename, reader.line_num)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
predictions[entailment_pair_id] = prediction
except csv.Error as e:
logging.error('file %s, line %d: %s', filename, reader.line_num, e)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
return predictions
def main():
import argparse
parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for SciTail sentence pairs.')
parser.add_argument(
'--answers', '-a',
help='Filename of the answers to read. Expects a JSONL file with documents that have fields "id" and '
'"gold_label".',
required=True)
parser.add_argument(
'--predictions', '-p',
help="Filename of the leaderboard predictions, in CSV format.",
required=True)
parser.add_argument(
'--output', '-o',
help='Output results to this file.')
args = parser.parse_args()
answers = read_answers(args.answers)
predictions = read_predictions(args.predictions)
accuracy = calculate_accuracy(answers, predictions)
if args.output:
print("Writing results to file: %s" % args.output)
with open(args.output, "wt", encoding="UTF-8") as output:
output.write(json.dumps({"accuracy": accuracy}))
else:
print("accuracy:", accuracy)
if __name__ == '__main__':
main()
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/scitail/evaluator/evaluator.py/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/scitail/evaluator/evaluator.py",
"repo_id": "ContextualSP",
"token_count": 2357
}
| 247 |
import os, sys
import json
import numpy as np
import re
import inflect
from elasticsearch import Elasticsearch
from elasticsearch import helpers
from tqdm import tqdm
sys.path.append('../')
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--start_index', help='Path to load verifier model')
parser.add_argument('--num_examples', help='local rank')
parser.add_argument('--local_rank')
args = parser.parse_args()
inflect = inflect.engine()
def check_contain_upper(self, password):
pattern = re.compile('[A-Z]+')
match = pattern.findall(password)
if match:
return True
else:
return False
def write(d, f):
json.dump(d, f)
f.write('\n')
class SearchQuery():
# @classmethod
# def claim2text(cls, claim, type='text'):
# search_body = {
# "query": {
# "match": {
# type: claim
# }
# }
# }
# return search_body
@classmethod
def claim2text(cls, claim):
# score in both text and title
search_body = {
"query": {
"multi_match": {
"query": claim,
"fields": ['text'],
"fuzziness": "AUTO"
}
}
}
return search_body
@classmethod
def kws2title(cls, multi_claim):
search_body = {
"query": {
"bool": {
"should": [
]
}
}}
for claim in multi_claim:
tiny_body = {
"match_phrase": {
"title": {
'query': claim,
"slop": 2
}
# "slop": 5
}
}
search_body['query']['bool']['should'].append(tiny_body)
return search_body
class MyElastic():
def __init__(self, index_name='wiki_search'):
self.es = Elasticsearch([{'host': '127.0.0.1', 'port': 9200}])
self.index_name = index_name
body = {
"properties": {
"id": {
"type": "keywords",
# "analyzer":"not_analyzed"
}
}
}
if not self.es.indices.exists(index=self.index_name,request_timeout=60):
self.es.indices.create(self.index_name,request_timeout=60)
self.es.indices.put_mapping(index=self.index_name, doc_type='wiki_title',
body=body, include_type_name=True)
# self.es.indices.put_mapping(index=self.index_name, doc_type='wiki_sentence',
# body=body, include_type_name=True)
def search(self, search_body):
ret = self.es.search(index=self.index_name, body=search_body, size=10)
return ret
def search_by_text(self,query):
search_body = SearchQuery.claim2text(query)
ret = self.search(search_body)
return ret
if __name__ == '__main__':
ES = MyElastic()
start_index = int(args.start_index)
num_examples = int(args.num_examples)
basic_dir = './LogiGAN'
with open(f"{basic_dir}/data/gan_corpus_new/beta/gen_train_B.jsonl", "r") as fr:
with open(f"{basic_dir}/data/gan_corpus_new/es/gen_train_src_{start_index}_{start_index + num_examples}.jsonl", "w") as fw:
cnt = 0
for l in fr.readlines():
if cnt < start_index:
cnt += 1
continue
print(f"From local rank {args.local_rank}: {num_examples} left.")
if num_examples == 0: break
dic = json.loads(l)
in_ = dic["input"]
out_ = dic["output"]
res = ES.search_by_text(out_)["hits"]["hits"]
profounds = [r["_source"]["text"] for r in res[1:]]
for p in profounds:
d = {"input": in_, "conclusion": p, "is_gold": 0}
write(d, fw)
d = {"input": in_, "conclusion": out_, "is_gold": 1}
write(d, fw)
num_examples -= 1
|
ContextualSP/logigan/corpus_construction/elastic_search/build_gen_train.py/0
|
{
"file_path": "ContextualSP/logigan/corpus_construction/elastic_search/build_gen_train.py",
"repo_id": "ContextualSP",
"token_count": 2299
}
| 248 |
from transformers.tokenization_utils_base import BatchEncoding, PreTrainedTokenizerBase
from typing import Any, Callable, Dict, List, NewType, Optional, Tuple, Union
from transformers.file_utils import PaddingStrategy
import copy
from dataclasses import dataclass
InputDataClass = NewType("InputDataClass", Any)
@dataclass
class DataCollatorForGAN:
"""
Data collator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
The tokenizer used for encoding the data.
model ([`PreTrainedModel`]):
The model that is being trained. If set and has the *prepare_decoder_input_ids_from_labels*, use it to
prepare the *decoder_input_ids*
This is useful when using *label_smoothing* to avoid calculating loss twice.
padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
is provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
label_pad_token_id (`int`, *optional*, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).
return_tensors (`str`):
The type of Tensor to return. Allowable values are "np", "pt" and "tf".
"""
tokenizer: PreTrainedTokenizerBase
model: Optional[Any] = None
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
max_instance_num: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
label_pad_token_id: int = -100
return_tensors: str = "pt"
def __call__(self, features, return_tensors=None):
import numpy as np
if return_tensors is None:
return_tensors = self.return_tensors
new_features = []
batch_size = self.max_instance_num
for ins_feature in features:
if len(new_features)>=batch_size:
break
tmp_features = []
gold_idx = len(ins_feature)
for i in range(len(ins_feature['labels'])):
tmp_features.append(copy.deepcopy(ins_feature))
tmp_features[-1]['labels'] = ins_feature['labels'][i]
if 'is_gold' in ins_feature.keys():
tmp_features[-1]['is_gold'] = ins_feature['is_gold'][i]
gold_idx = ins_feature['is_gold'].index(1)
if 'ver_prob' in ins_feature.keys():
tmp_features[-1]['ver_prob'] = ins_feature['ver_prob'][i]
new_features.append(tmp_features[gold_idx])
rest_num = min(max(0,batch_size-len(new_features)),len(tmp_features)-1)
if rest_num!=0:
fake_ids = np.random.choice(np.arange(len(tmp_features)), rest_num+1,
replace=False) if rest_num + 1 < len(tmp_features) else np.arange(len(tmp_features))
selected_negs = [tmp_features[id] for id in fake_ids if id!=gold_idx][:rest_num]
new_features.extend(selected_negs)
# print(new_features)
# print(len(new_features))
# exit()
labels = [feature["labels"] for feature in new_features] if "labels" in new_features[0].keys() else None
# We have to pad the labels before calling `tokenizer.pad` as this method won't pad them and needs them of the
# same length to return tensors.
if labels is not None:
max_label_length = max(len(l) for l in labels)
if self.pad_to_multiple_of is not None:
max_label_length = (
(max_label_length + self.pad_to_multiple_of - 1)
// self.pad_to_multiple_of
* self.pad_to_multiple_of
)
padding_side = self.tokenizer.padding_side
for feature in new_features:
remainder = [self.label_pad_token_id] * (max_label_length - len(feature["labels"]))
if isinstance(feature["labels"], list):
feature["labels"] = (
feature["labels"] + remainder if padding_side == "right" else remainder + feature["labels"]
)
elif padding_side == "right":
feature["labels"] = np.concatenate([feature["labels"], remainder]).astype(np.int64)
else:
feature["labels"] = np.concatenate([remainder, feature["labels"]]).astype(np.int64)
new_features = self.tokenizer.pad(
new_features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors=return_tensors,
)
# prepare decoder_input_ids
if (
labels is not None
and self.model is not None
and hasattr(self.model, "prepare_decoder_input_ids_from_labels")
):
decoder_input_ids = self.model.prepare_decoder_input_ids_from_labels(labels=new_features["labels"])
new_features["decoder_input_ids"] = decoder_input_ids
return new_features
|
ContextualSP/logigan/pre-training/gan_dataset.py/0
|
{
"file_path": "ContextualSP/logigan/pre-training/gan_dataset.py",
"repo_id": "ContextualSP",
"token_count": 2776
}
| 249 |
## Poset Decoding <img src="https://pytorch.org/assets/images/logo-dark.svg" height = "25" align=center />
The official pytorch implementation of our paper [Hierarchical Poset Decoding for Compositional Generalization in Language](https://arxiv.org/pdf/2002.00652.pdf).
If you find our code useful, please consider citing our paper:
```
@inproceedings{Yinuo2020Hirearchical,
title={Hierarchical Poset Decoding for Compositional Generalization in Language},
author={Yinuo Guo and Zeqi Lin and Jian-Guang Lou and Dongmei Zhang},
booktitle={Advances in Neural Information Processing Systems},
year={2020}
}
```
## Dependency
pip install -r requirements.txt
## Data preprocess
get CFQ data : Download dataset from [link](https://storage.cloud.google.com/cfq_dataset/cfq1.1.tar.gz)
```bash
bash preprocess.sh
```
## Training
### Sketch Prediction
```bash
cd sketch_prediction/
bash ./train.sh
```
### Traversal Path Prediction
> The module is based on the open-source project Matchzoo-py <https://github.com/NTMC-Community/MatchZoo-py>
```bash
cd ./traversal_path_prediction/MatchZoo-py/
python ./traversal_path_prediction/MatchZoo-py/train_esim.py
```
## Evaluation
```bash
bash evaluate.sh
```
## MCD2 and MCD3
In the aforementioned Training and Evaluation sections, we train and evaluate HPD on the MCD1 split.
To train and evaluate on MCD2/MCD3 split, please replace `mcd1` to `mcd2` or `mcd3` in the following files:
- sketch_prediction/train.sh
- sketch_prediction/evaluate.sh
- traversal_path_prediction/MatchZoo-py/train_esim.py
- traversal_path_prediction/MatchZoo-py/evaluate_esim.py
- traversal_path_prediction/MatchZoo-py/datasets/cfq/load_data.py
- evaluate.sh
## Acknowledgement
We will thank the following repos which are very helpful to us.
- [Matchzoo-py](https://github.com/NTMC-Community/MatchZoo-py)
## Contact
Any question please contact `zeqi DOT lin AT microsoft DOT com`
|
ContextualSP/poset_decoding/README.md/0
|
{
"file_path": "ContextualSP/poset_decoding/README.md",
"repo_id": "ContextualSP",
"token_count": 650
}
| 250 |
import torch
import torch
import torch.nn as nn
import torch.nn.functional as F
class Tree:
def __init__(self, value):
# value = [1] tensor, the value is: output_token_idx
# value of tree root should be [word_to_idx('<sos>')]
self.value = value
self.children = dict()
class Trie:
def __init__(self, node = None, sos_token = None):
if node:
self.root = node
elif sos_token:
self.root = Tree(sos_token)
def add_path(self, sparql):
sparql_list = sparql.split()
node = self.root
for v in sparql_list:
# a1, r, a2 = sub_sparql.split()
# for v in sub_sparql.split():
if v not in node.children:
node.children[v] = Tree(v)
node = node.children[v]
def get_label(self, sparql):
## 目的是根据树获得target的标签
target_list = []
sparql_list = sparql.split()
node = self.root
for token in sparql_list:
target_list.append(' '.join(list(node.children.keys())))
node = node.children[token]
return target_list
def print_tree(self):
def dfs(node):
if len(node.children) > 0:
print(f"value:{node.value}, children:{node.children.keys()}")
for k, v in node.children.items():
dfs(v)
dfs(self.root)
def get_path(self):
ans = []
path = []
def dfs(node, ans, path):
if len(node.children) > 0:
# print(f"value:{node.value}, children:{node.children.keys()}")
for k, v in node.children.items():
dfs(v, ans, path+[k])
else:
ans.append(path)
path = []
dfs(self.root, ans, path)
return ans
def collate_fn(samples):
source_samples, target_samples, label_samples, duplicate_len = [], [], [], []
for sample in samples:
source_samples += sample[0]
target_samples += sample[1]
label_samples += sample[2]
duplicate_len.append(sample[3])
return source_samples, target_samples, label_samples, duplicate_len
|
ContextualSP/poset_decoding/sketch_prediction/utils.py/0
|
{
"file_path": "ContextualSP/poset_decoding/sketch_prediction/utils.py",
"repo_id": "ContextualSP",
"token_count": 799
}
| 251 |
"""Convert list of input into class:`DataPack` expected format."""
import typing
import pandas as pd
import numpy as np
import matchzoo
from matchzoo.engine.base_task import BaseTask
def pack(
df: pd.DataFrame,
task: typing.Union[str, BaseTask] = 'ranking',
) -> 'matchzoo.DataPack':
"""
Pack a :class:`DataPack` using `df`.
The `df` must have `text_left` and `text_right` columns. Optionally,
the `df` can have `id_left`, `id_right` to index `text_left` and
`text_right` respectively. `id_left`, `id_right` will be automatically
generated if not specified.
:param df: Input :class:`pandas.DataFrame` to use.
:param task: Could be one of `ranking`, `classification` or a
:class:`matchzoo.engine.BaseTask` instance.
Examples::
>>> import matchzoo as mz
>>> import pandas as pd
>>> df = pd.DataFrame(data={'text_left': list('AABC'),
... 'text_right': list('abbc'),
... 'label': [0, 1, 1, 0]})
>>> mz.pack(df, task='classification').frame()
id_left text_left id_right text_right label
0 L-0 A R-0 a 0
1 L-0 A R-1 b 1
2 L-1 B R-1 b 1
3 L-2 C R-2 c 0
>>> mz.pack(df, task='ranking').frame()
id_left text_left id_right text_right label
0 L-0 A R-0 a 0.0
1 L-0 A R-1 b 1.0
2 L-1 B R-1 b 1.0
3 L-2 C R-2 c 0.0
"""
if 'text_left' not in df or 'text_right' not in df:
raise ValueError(
'Input data frame must have `text_left` and `text_right`.')
df = df.dropna(axis=0, how='any').reset_index(drop=True)
# Gather IDs
if 'id_left' not in df:
id_left = _gen_ids(df, 'text_left', 'L-')
else:
id_left = df['id_left']
if 'id_right' not in df:
id_right = _gen_ids(df, 'text_right', 'R-')
else:
id_right = df['id_right']
# Build Relation
relation = pd.DataFrame(data={'id_left': id_left, 'id_right': id_right})
for col in df:
if col not in ['id_left', 'id_right', 'text_left', 'text_right']:
relation[col] = df[col]
if 'label' in relation:
if task == 'classification' or isinstance(
task, matchzoo.tasks.Classification):
relation['label'] = relation['label'].astype(int)
elif task == 'ranking' or isinstance(task, matchzoo.tasks.Ranking):
relation['label'] = relation['label'].astype(float)
else:
raise ValueError(f"{task} is not a valid task.")
# Build Left and Right
left = _merge(df, id_left, 'text_left', 'id_left')
right = _merge(df, id_right, 'text_right', 'id_right')
return matchzoo.DataPack(relation, left, right)
def _merge(data: pd.DataFrame, ids: typing.Union[list, np.array],
text_label: str, id_label: str):
left = pd.DataFrame(data={
text_label: data[text_label], id_label: ids
})
left.drop_duplicates(id_label, inplace=True)
left.set_index(id_label, inplace=True)
return left
def _gen_ids(data: pd.DataFrame, col: str, prefix: str):
lookup = {}
for text in data[col].unique():
lookup[text] = prefix + str(len(lookup))
return data[col].map(lookup)
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/data_pack/pack.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/data_pack/pack.py",
"repo_id": "ContextualSP",
"token_count": 1734
}
| 252 |
"""WikiQA data loader."""
import typing
import csv
from pathlib import Path
import pandas as pd
import matchzoo
from matchzoo.engine.base_task import BaseTask
_url = "https://download.microsoft.com/download/E/5/F/" \
"E5FCFCEE-7005-4814-853D-DAA7C66507E0/WikiQACorpus.zip"
def load_data(
stage: str = 'train',
task: typing.Union[str, BaseTask] = 'ranking',
filtered: bool = False,
return_classes: bool = False
) -> typing.Union[matchzoo.DataPack, tuple]:
"""
Load WikiQA data.
:param stage: One of `train`, `dev`, and `test`.
:param task: Could be one of `ranking`, `classification` or a
:class:`matchzoo.engine.BaseTask` instance.
:param filtered: Whether remove the questions without correct answers.
:param return_classes: `True` to return classes for classification task,
`False` otherwise.
:return: A DataPack unless `task` is `classificiation` and `return_classes`
is `True`: a tuple of `(DataPack, classes)` in that case.
"""
if stage not in ('train', 'dev', 'test'):
raise ValueError(f"{stage} is not a valid stage."
f"Must be one of `train`, `dev`, and `test`.")
data_root = _download_data()
file_path = data_root.joinpath(f'WikiQA-{stage}.tsv')
data_pack = _read_data(file_path, task)
if filtered and stage in ('dev', 'test'):
ref_path = data_root.joinpath(f'WikiQA-{stage}.ref')
filter_ref_path = data_root.joinpath(f'WikiQA-{stage}-filtered.ref')
with open(filter_ref_path, mode='r') as f:
filtered_ids = set([line.split()[0] for line in f])
filtered_lines = []
with open(ref_path, mode='r') as f:
for idx, line in enumerate(f.readlines()):
if line.split()[0] in filtered_ids:
filtered_lines.append(idx)
data_pack = data_pack[filtered_lines]
if task == 'ranking' or isinstance(task, matchzoo.tasks.Ranking):
return data_pack
elif task == 'classification' or isinstance(
task, matchzoo.tasks.Classification):
if return_classes:
return data_pack, [False, True]
else:
return data_pack
else:
raise ValueError(f"{task} is not a valid task."
f"Must be one of `Ranking` and `Classification`.")
def _download_data():
ref_path = matchzoo.utils.get_file(
'wikiqa', _url, extract=True,
cache_dir=matchzoo.USER_DATA_DIR,
cache_subdir='wiki_qa'
)
return Path(ref_path).parent.joinpath('WikiQACorpus')
def _read_data(path, task):
table = pd.read_csv(path, sep='\t', header=0, quoting=csv.QUOTE_NONE)
df = pd.DataFrame({
'text_left': table['Question'],
'text_right': table['Sentence'],
'id_left': table['QuestionID'],
'id_right': table['SentenceID'],
'label': table['Label']
})
return matchzoo.pack(df, task)
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/datasets/wiki_qa/load_data.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/datasets/wiki_qa/load_data.py",
"repo_id": "ContextualSP",
"token_count": 1284
}
| 253 |
"""Accuracy metric for Classification."""
import numpy as np
from matchzoo.engine.base_metric import ClassificationMetric
class Accuracy(ClassificationMetric):
"""Accuracy metric."""
ALIAS = ['accuracy', 'acc']
def __init__(self):
""":class:`Accuracy` constructor."""
def __repr__(self) -> str:
""":return: Formated string representation of the metric."""
return f"{self.ALIAS[0]}"
def __call__(self, y_true: np.array, y_pred: np.array) -> float:
"""
Calculate accuracy.
Example:
>>> import numpy as np
>>> y_true = np.array([1])
>>> y_pred = np.array([[0, 1]])
>>> Accuracy()(y_true, y_pred)
1.0
:param y_true: The ground true label of each document.
:param y_pred: The predicted scores of each document.
:return: Accuracy.
"""
y_pred = np.argmax(y_pred, axis=1)
return np.sum(y_pred == y_true) / float(y_true.size)
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/metrics/accuracy.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/metrics/accuracy.py",
"repo_id": "ContextualSP",
"token_count": 440
}
| 254 |
"""An implementation of ConvKNRM Model."""
import typing
import torch
import torch.nn as nn
import torch.nn.functional as F
from matchzoo.engine.param_table import ParamTable
from matchzoo.engine.param import Param
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine import hyper_spaces
from matchzoo.modules import GaussianKernel
from matchzoo.utils import parse_activation
class ConvKNRM(BaseModel):
"""
ConvKNRM Model.
Examples:
>>> model = ConvKNRM()
>>> model.params['filters'] = 128
>>> model.params['conv_activation_func'] = 'tanh'
>>> model.params['max_ngram'] = 3
>>> model.params['use_crossmatch'] = True
>>> model.params['kernel_num'] = 11
>>> model.params['sigma'] = 0.1
>>> model.params['exact_sigma'] = 0.001
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params(with_embedding=True)
params.add(Param(
name='filters',
value=128,
desc="The filter size in the convolution layer."
))
params.add(Param(
name='conv_activation_func',
value='relu',
desc="The activation function in the convolution layer."))
params.add(Param(
name='max_ngram',
value=3,
desc="The maximum length of n-grams for the convolution "
"layer."))
params.add(Param(
name='use_crossmatch',
value=True,
desc="Whether to match left n-grams and right n-grams of "
"different lengths"))
params.add(Param(
name='kernel_num',
value=11,
hyper_space=hyper_spaces.quniform(low=5, high=20),
desc="The number of RBF kernels."
))
params.add(Param(
name='sigma',
value=0.1,
hyper_space=hyper_spaces.quniform(
low=0.01, high=0.2, q=0.01),
desc="The `sigma` defines the kernel width."
))
params.add(Param(
name='exact_sigma', value=0.001,
desc="The `exact_sigma` denotes the `sigma` "
"for exact match."
))
return params
def build(self):
"""Build model structure."""
self.embedding = self._make_default_embedding_layer()
self.q_convs = nn.ModuleList()
self.d_convs = nn.ModuleList()
for i in range(self._params['max_ngram']):
conv = nn.Sequential(
nn.ConstantPad1d((0, i), 0),
nn.Conv1d(
in_channels=self._params['embedding_output_dim'],
out_channels=self._params['filters'],
kernel_size=i + 1
),
parse_activation(
self._params['conv_activation_func']
)
)
self.q_convs.append(conv)
self.d_convs.append(conv)
self.kernels = nn.ModuleList()
for i in range(self._params['kernel_num']):
mu = 1. / (self._params['kernel_num'] - 1) + (2. * i) / (
self._params['kernel_num'] - 1) - 1.0
sigma = self._params['sigma']
if mu > 1.0:
sigma = self._params['exact_sigma']
mu = 1.0
self.kernels.append(GaussianKernel(mu=mu, sigma=sigma))
dim = self._params['max_ngram'] ** 2 * self._params['kernel_num']
self.out = self._make_output_layer(dim)
def forward(self, inputs):
"""Forward."""
query, doc = inputs['text_left'], inputs['text_right']
q_embed = self.embedding(query.long()).transpose(1, 2)
d_embed = self.embedding(doc.long()).transpose(1, 2)
q_convs = []
d_convs = []
for q_conv, d_conv in zip(self.q_convs, self.d_convs):
q_convs.append(q_conv(q_embed).transpose(1, 2))
d_convs.append(d_conv(d_embed).transpose(1, 2))
KM = []
for qi in range(self._params['max_ngram']):
for di in range(self._params['max_ngram']):
# do not match n-gram with different length if use crossmatch
if not self._params['use_crossmatch'] and qi != di:
continue
mm = torch.einsum(
'bld,brd->blr',
F.normalize(q_convs[qi], p=2, dim=-1),
F.normalize(d_convs[di], p=2, dim=-1)
)
for kernel in self.kernels:
K = torch.log1p(kernel(mm).sum(dim=-1)).sum(dim=-1)
KM.append(K)
phi = torch.stack(KM, dim=1)
out = self.out(phi)
return out
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/models/conv_knrm.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/models/conv_knrm.py",
"repo_id": "ContextualSP",
"token_count": 2516
}
| 255 |
from .attention import Attention
from .attention import BidirectionalAttention
from .attention import MatchModule
from .dropout import RNNDropout
from .stacked_brnn import StackedBRNN
from .gaussian_kernel import GaussianKernel
from .matching import Matching
from .bert_module import BertModule
from .character_embedding import CharacterEmbedding
from .semantic_composite import SemanticComposite
from .dense_net import DenseNet
from .matching_tensor import MatchingTensor
from .spatial_gru import SpatialGRU
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/modules/__init__.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/modules/__init__.py",
"repo_id": "ContextualSP",
"token_count": 140
}
| 256 |
"""Build unit from data pack."""
from tqdm import tqdm
import matchzoo as mz
from .units import StatefulUnit
def build_unit_from_data_pack(
unit: StatefulUnit,
data_pack: mz.DataPack, mode: str = 'both',
flatten: bool = True, verbose: int = 1
) -> StatefulUnit:
"""
Build a :class:`StatefulUnit` from a :class:`DataPack` object.
:param unit: :class:`StatefulUnit` object to be built.
:param data_pack: The input :class:`DataPack` object.
:param mode: One of 'left', 'right', and 'both', to determine the source
data for building the :class:`VocabularyUnit`.
:param flatten: Flatten the datapack or not. `True` to organize the
:class:`DataPack` text as a list, and `False` to organize
:class:`DataPack` text as a list of list.
:param verbose: Verbosity.
:return: A built :class:`StatefulUnit` object.
"""
corpus = []
if flatten:
data_pack.apply_on_text(corpus.extend, mode=mode, verbose=verbose)
else:
data_pack.apply_on_text(corpus.append, mode=mode, verbose=verbose)
if verbose:
description = 'Building ' + unit.__class__.__name__ + \
' from a datapack.'
corpus = tqdm(corpus, desc=description)
unit.fit(corpus)
return unit
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/build_unit_from_data_pack.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/build_unit_from_data_pack.py",
"repo_id": "ContextualSP",
"token_count": 525
}
| 257 |
import nltk
from .unit import Unit
class Tokenize(Unit):
"""Process unit for text tokenization."""
def transform(self, input_: str) -> list:
"""
Process input data from raw terms to list of tokens.
:param input_: raw textual input.
:return tokens: tokenized tokens as a list.
"""
# return nltk.word_tokenize(input_)
return input_.split()
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/units/tokenize.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/units/tokenize.py",
"repo_id": "ContextualSP",
"token_count": 158
}
| 258 |
"""One hot vectors."""
import numpy as np
def one_hot(indices: int, num_classes: int) -> np.ndarray:
""":return: A one-hot encoded vector."""
vec = np.zeros((num_classes,), dtype=np.int64)
vec[indices] = 1
return vec
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/utils/one_hot.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/utils/one_hot.py",
"repo_id": "ContextualSP",
"token_count": 94
}
| 259 |
import pytest
from matchzoo.engine.base_model import BaseModel
def test_base_model_abstract_instantiation():
with pytest.raises(TypeError):
model = BaseModel(BaseModel.get_default_params())
assert model
def test_base_model_concrete_instantiation():
class MyBaseModel(BaseModel):
def build(self):
self.a, self.b = 1, 2
def forward(self):
return self.a + self.b
model = MyBaseModel()
assert model.params
model.guess_and_fill_missing_params()
model.build()
assert model.params.completed(exclude=['out_activation_func'])
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tests/models/test_base_model.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tests/models/test_base_model.py",
"repo_id": "ContextualSP",
"token_count": 244
}
| 260 |
<jupyter_start><jupyter_code>%run init.ipynb
preprocessor = mz.models.ArcII.get_default_preprocessor(
filter_mode='df',
filter_low_freq=2,
)
train_pack_processed = preprocessor.fit_transform(train_pack_raw)
dev_pack_processed = preprocessor.transform(dev_pack_raw)
test_pack_processed = preprocessor.transform(test_pack_raw)
preprocessor.context
glove_embedding = mz.datasets.embeddings.load_glove_embedding(dimension=100)
term_index = preprocessor.context['vocab_unit'].state['term_index']
embedding_matrix = glove_embedding.build_matrix(term_index)
l2_norm = np.sqrt((embedding_matrix * embedding_matrix).sum(axis=1))
embedding_matrix = embedding_matrix / l2_norm[:, np.newaxis]
trainset = mz.dataloader.Dataset(
data_pack=train_pack_processed,
mode='pair',
num_dup=2,
num_neg=1
)
testset = mz.dataloader.Dataset(
data_pack=test_pack_processed
)
padding_callback = mz.models.ArcII.get_default_padding_callback(
fixed_length_left=10,
fixed_length_right=100,
pad_word_value=0,
pad_word_mode='pre'
)
trainloader = mz.dataloader.DataLoader(
dataset=trainset,
batch_size=20,
stage='train',
resample=True,
sort=False,
callback=padding_callback
)
testloader = mz.dataloader.DataLoader(
dataset=testset,
batch_size=20,
stage='dev',
callback=padding_callback
)
model = mz.models.ArcII()
model.params['task'] = ranking_task
model.params['embedding'] = embedding_matrix
model.params['left_length'] = 10
model.params['right_length'] = 100
model.params['kernel_1d_count'] = 32
model.params['kernel_1d_size'] = 3
model.params['kernel_2d_count'] = [64, 64]
model.params['kernel_2d_size'] = [(3, 3), (3, 3)]
model.params['pool_2d_size'] = [(3, 3), (3, 3)]
model.params['dropout_rate'] = 0.3
model.build()
print(model)
print('Trainable params: ', sum(p.numel() for p in model.parameters() if p.requires_grad))
optimizer = torch.optim.Adam(model.parameters())
trainer = mz.trainers.Trainer(
model=model,
optimizer=optimizer,
trainloader=trainloader,
validloader=testloader,
validate_interval=None,
epochs=10
)
trainer.run()<jupyter_output><empty_output>
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tutorials/ranking/arcii.ipynb/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tutorials/ranking/arcii.ipynb",
"repo_id": "ContextualSP",
"token_count": 859
}
| 261 |
set seed=1
set config_file=train_configs_bert/concat.none.jsonnet
set model_file=checkpoints_cosql/cosql_bert_concat_none_model
set tables_file=dataset_cosql/tables.json
set database_path=dataset_cosql/database
set dataset_path=dataset_cosql
set train_data_path=dataset_cosql/train.json
set validation_data_path=dataset_cosql/dev.json
allennlp train -s %model_file% %config_file% ^
--include-package dataset_reader.sparc_reader ^
--include-package models.sparc_parser ^
-o {"""model.serialization_dir""":"""%model_file%""","""random_seed""":"""%seed%""","""numpy_seed""":"""%seed%""","""pytorch_seed""":"""%seed%""","""dataset_reader.tables_file""":"""%tables_file%""","""dataset_reader.database_path""":"""%database_path%""","""train_data_path""":"""%train_data_path%""","""validation_data_path""":"""%validation_data_path%""","""model.dataset_path""":"""%dataset_path%"""}
|
ContextualSP/semantic_parsing_in_context/bash_files/windows/train_cosql_bert.bat/0
|
{
"file_path": "ContextualSP/semantic_parsing_in_context/bash_files/windows/train_cosql_bert.bat",
"repo_id": "ContextualSP",
"token_count": 332
}
| 262 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import json
import argparse
def convert_dataset(valid_file, valid_out_file):
"""
The package `allennlp` requires the validation file as the format of each line containing a json object.
:param valid_file: valid file input, the original dataset validation file.
:param valid_out_file: valid file output, the adapted file for allennlp package.
"""
write_file = open(valid_out_file, "w", encoding="utf8")
with open(valid_file, "r", encoding="utf8") as f:
content = json.load(f)
for instance in content:
write_file.write(json.dumps(instance) + "\n")
write_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--valid_file', type=str)
parser.add_argument('--valid_out_file', type=str)
args = parser.parse_args()
convert_dataset(args.valid_file, args.valid_out_file)
|
ContextualSP/semantic_parsing_in_context/postprocess.py/0
|
{
"file_path": "ContextualSP/semantic_parsing_in_context/postprocess.py",
"repo_id": "ContextualSP",
"token_count": 340
}
| 263 |
import re
from collections import Counter, defaultdict
from typing import Dict, Tuple, List
from unidecode import unidecode
from semparse.sql.spider_utils import TableColumn, read_dataset_schema, read_dataset_values
# == stop words that will be omitted by ContextGenerator
STOP_WORDS = {"", "", "all", "being", "-", "over", "through", "yourselves", "its", "before",
"hadn", "with", "had", ",", "should", "to", "only", "under", "ours", "has", "ought", "do",
"them", "his", "than", "very", "cannot", "they", "not", "during", "yourself", "him",
"nor", "did", "didn", "'ve", "this", "she", "each", "where", "because", "doing", "some", "we", "are",
"further", "ourselves", "out", "what", "for", "weren", "does", "above", "between", "mustn", "?",
"be", "hasn", "who", "were", "here", "shouldn", "let", "hers", "by", "both", "about", "couldn",
"of", "could", "against", "isn", "or", "own", "into", "while", "whom", "down", "wasn", "your",
"from", "her", "their", "aren", "there", "been", ".", "few", "too", "wouldn", "themselves",
":", "was", "until", "more", "himself", "on", "but", "don", "herself", "haven", "those", "he",
"me", "myself", "these", "up", ";", "below", "'re", "can", "theirs", "my", "and", "would", "then",
"is", "am", "it", "doesn", "an", "as", "itself", "at", "have", "in", "any", "if", "!",
"again", "'ll", "no", "that", "when", "same", "how", "other", "which", "you", "many", "shan",
"'t", "'s", "our", "after", "most", "'d", "such", "'m", "why", "a", "off", "i", "yours", "so",
"the", "having", "once"}
digits_list = [str(i) for i in range(10)]
class SpiderDBContext:
schemas = {}
db_knowledge_graph = {}
db_tables_data = {}
def __init__(self, db_id: str, utterance: str, tables_file: str, dataset_path: str, stanza_model=None, schemas=None,
original_utterance=None):
self.dataset_path = dataset_path
self.tables_file = tables_file
self.db_id = db_id
self.utterance = utterance
self.tokenized_utterance = utterance
self.stanza_model = stanza_model
self.original_utterance = original_utterance if original_utterance is not None else utterance
if schemas is not None:
SpiderDBContext.schemas = schemas
elif db_id not in SpiderDBContext.schemas:
SpiderDBContext.schemas = read_dataset_schema(self.tables_file, self.stanza_model)
self.schema = SpiderDBContext.schemas[db_id]
@staticmethod
def entity_key_for_column(table_name: str, column: TableColumn) -> str:
return f"{table_name.lower()}@{column.name.lower()}"
if column.foreign_key is not None:
column_type = "foreign"
elif column.is_primary_key:
column_type = "primary"
else:
column_type = column.column_type
return f"column:{column_type.lower()}:{table_name.lower()}:{column.name.lower()}"
def get_db_knowledge_graph(self, db_id: str):
db_schema = self.schema
tables = db_schema.values()
if db_id not in self.db_tables_data:
self.db_tables_data[db_id] = read_dataset_values(db_id, self.dataset_path, tables)
tables_data = self.db_tables_data[db_id]
string_column_mapping: Dict[str, set] = defaultdict(set)
for table, table_data in tables_data.items():
for table_row in table_data:
for column, cell_value in zip(db_schema[table.name].columns, table_row):
if column.column_type == 'text' and type(cell_value) is str:
cell_value_normalized = self.normalize_string(cell_value)
column_key = self.entity_key_for_column(table.name, column)
string_column_mapping[cell_value_normalized].add(column_key)
# for key in string_column_mapping:
# string_column_mapping[key]=list(string_column_mapping[key])
string_entities = self.get_entities_from_question(string_column_mapping)
value_match=[]
value_alignment = []
for item in string_entities:
value_match+=item['token_in_columns']
value_alignment.append(item['alignment'])
value_match = list(set(value_match))
r_schemas={}
for table in db_schema:
r_schemas["{0}".format(db_schema[table].name).lower()] = db_schema[table].lemma.lower()
for column in db_schema[table].columns:
r_schemas[f"{db_schema[table].name}@{column.name}".lower()] = column.lemma.strip('is ').lower()
question_tokens = [t for t in self.tokenized_utterance]
schema_counter = Counter()
partial_match = []
exact_match = []
for r_k, r_s in r_schemas.items():
schema_counter[r_s] = 0
#exact match
if r_s in self.tokenized_utterance and r_s not in STOP_WORDS:
schema_counter[r_s] += 2
#partial_match
else:
for tok in r_s.split(' '):
if tok in question_tokens and tok not in STOP_WORDS:
schema_counter[r_s]+=1
continue
for ques_tok in question_tokens:
if tok in STOP_WORDS or ques_tok in STOP_WORDS or \
len(tok)<=3 or len(ques_tok)<=3:
continue
if ques_tok in tok or tok in ques_tok:
schema_counter[r_s] += 1
if schema_counter[r_s]>=2:
exact_match.append(r_k)
elif schema_counter[r_s]==1:
partial_match.append(r_k)
return value_match, value_alignment, exact_match, partial_match
def _string_in_table(self, candidate: str,
string_column_mapping: Dict[str, set]) -> List[str]:
"""
Checks if the string occurs in the table, and if it does, returns the names of the columns
under which it occurs. If it does not, returns an empty list.
"""
candidate_column_names: List[str] = []
alignment = []
# First check if the entire candidate occurs as a cell.
candidate= candidate.strip('-_"\'')
if candidate in string_column_mapping and candidate not in digits_list:
candidate_column_names = string_column_mapping[candidate]
alignment.append((candidate,candidate))
# # If not, check if it is a substring pf any cell value.
# if not candidate_column_names:
# for cell_value, column_names in string_column_mapping.items():
# if candidate in re.split(' |_|:',
# cell_value) and candidate not in STOP_WORDS and candidate not in digits_list:
# candidate_column_names.extend(column_names)
# alignment.append((candidate, cell_value))
candidate_column_names = list(set(candidate_column_names))
return candidate_column_names, alignment
def get_entities_from_question(self, string_column_mapping: Dict[str, set]) -> List[Tuple[str, str]]:
entity_data = []
for cell_value, column_names in string_column_mapping.items():
if (cell_value.replace('_', ' ') in ' '.join(self.utterance) or cell_value.replace('_',
' ') in self.original_utterance) \
and len(re.split('_', cell_value)) >= 2:
entity_data.append({'value': cell_value,
'token_start': 0,
'token_end': 0,
'alignment': [(cell_value, cell_value)],
'token_in_columns': list(set(column_names))})
for i, token in enumerate(self.tokenized_utterance):
token_text = token
if token_text in STOP_WORDS:
continue
normalized_token_text = token_text
# normalized_token_text = self.normalize_string(token_text)
if not normalized_token_text:
continue
token_columns, alignment = self._string_in_table(normalized_token_text, string_column_mapping)
if token_columns:
entity_data.append({'value': normalized_token_text,
'token_start': i,
'token_end': i+1,
'alignment': alignment,
'token_in_columns': token_columns})
return entity_data
@staticmethod
def normalize_string(string: str) -> str:
"""
These are the transformation rules used to normalize cell in column names in Sempre. See
``edu.stanford.nlp.sempre.tables.StringNormalizationUtils.characterNormalize`` and
``edu.stanford.nlp.sempre.tables.TableTypeSystem.canonicalizeName``. We reproduce those
rules here to normalize and canonicalize cells and columns in the same way so that we can
match them against constants in logical forms appropriately.
"""
# Normalization rules from Sempre
# \u201A -> ,
string = re.sub("‚", ",", string)
string = re.sub("„", ",,", string)
string = re.sub("[·・]", "../sql", string)
string = re.sub("…", "...", string)
string = re.sub("ˆ", "^", string)
string = re.sub("˜", "~", string)
string = re.sub("‹", "<", string)
string = re.sub("›", ">", string)
string = re.sub("[‘’´`]", "'", string)
string = re.sub("[“”«»]", "\"", string)
string = re.sub("[•†‡²³]", "", string)
string = re.sub("[‐‑–—−]", "-", string)
# Oddly, some unicode characters get converted to _ instead of being stripped. Not really
# sure how sempre decides what to do with these... TODO(mattg): can we just get rid of the
# need for this function somehow? It's causing a whole lot of headaches.
string = re.sub("[ðø′″€⁄ªΣ]", "_", string)
# This is such a mess. There isn't just a block of unicode that we can strip out, because
# sometimes sempre just strips diacritics... We'll try stripping out a few separate
# blocks, skipping the ones that sempre skips...
string = re.sub("[\\u0180-\\u0210]", "", string).strip()
string = re.sub("[\\u0220-\\uFFFF]", "", string).strip()
string = string.replace("\\n", "_")
string = re.sub("\\s+", " ", string)
# Canonicalization rules from Sempre.
string = re.sub("[^\\w]", "_", string)
string = re.sub("_+", "_", string)
string = re.sub("_$", "", string)
return unidecode(string.lower())
def _expand_entities(self, question, entity_data, string_column_mapping: Dict[str, set]):
new_entities = []
for entity in entity_data:
# to ensure the same strings are not used over and over
if new_entities and entity['token_end'] <= new_entities[-1]['token_end']:
continue
current_start = entity['token_start']
current_end = entity['token_end']
current_token = entity['value']
current_token_type = entity['token_type']
current_token_columns = entity['token_in_columns']
while current_end < len(question):
next_token = question[current_end].text
next_token_normalized = self.normalize_string(next_token)
if next_token_normalized == "":
current_end += 1
continue
candidate = "%s_%s" %(current_token, next_token_normalized)
candidate_columns = self._string_in_table(candidate, string_column_mapping)
candidate_columns = list(set(candidate_columns).intersection(current_token_columns))
if not candidate_columns:
break
candidate_type = candidate_columns[0].split(":")[1]
if candidate_type != current_token_type:
break
current_end += 1
current_token = candidate
current_token_columns = candidate_columns
new_entities.append({'token_start': current_start,
'token_end': current_end,
'value': current_token,
'token_type': current_token_type,
'token_in_columns': current_token_columns})
return new_entities
|
ContextualSP/unified_parser_text_to_sql/semparse/contexts/spider_db_context.py/0
|
{
"file_path": "ContextualSP/unified_parser_text_to_sql/semparse/contexts/spider_db_context.py",
"repo_id": "ContextualSP",
"token_count": 6196
}
| 264 |
import os
import sys
import json
import sqlite3
from os import listdir, makedirs
from os.path import isfile, isdir, join, split, exists, splitext
from nltk import word_tokenize, tokenize
import traceback
EXIST = {"atis", "geo", "advising", "yelp", "restaurants", "imdb", "academic"}
def convert_fk_index(data):
fk_holder = []
for fk in data["foreign_keys"]:
tn, col, ref_tn, ref_col = fk[0][0], fk[0][1], fk[1][0], fk[1][1]
ref_cid, cid = None, None
try:
tid = data["table_names_original"].index(tn)
ref_tid = data["table_names_original"].index(ref_tn)
for i, (tab_id, col_org) in enumerate(data["column_names_original"]):
if tab_id == ref_tid and ref_col == col_org:
ref_cid = i
elif tid == tab_id and col == col_org:
cid = i
if ref_cid and cid:
fk_holder.append([cid, ref_cid])
except:
traceback.print_exc()
print("table_names_original: ", data["table_names_original"])
print("finding tab name: ", tn, ref_tn)
sys.exit()
return fk_holder
def dump_db_json_schema(db, f):
"""read table and column info"""
conn = sqlite3.connect(db)
conn.execute("pragma foreign_keys=ON")
cursor = conn.execute("SELECT name FROM sqlite_master WHERE type='table';")
data = {
"db_id": f,
"table_names_original": [],
"table_names": [],
"column_names_original": [(-1, "*")],
"column_names": [(-1, "*")],
"column_types": ["text"],
"primary_keys": [],
"foreign_keys": [],
}
fk_holder = []
for i, item in enumerate(cursor.fetchall()):
table_name = item[0]
data["table_names_original"].append(table_name)
data["table_names"].append(table_name.lower().replace("_", " "))
fks = conn.execute(
"PRAGMA foreign_key_list('{}') ".format(table_name)
).fetchall()
# print("db:{} table:{} fks:{}".format(f,table_name,fks))
fk_holder.extend([[(table_name, fk[3]), (fk[2], fk[4])] for fk in fks])
cur = conn.execute("PRAGMA table_info('{}') ".format(table_name))
for j, col in enumerate(cur.fetchall()):
data["column_names_original"].append((i, col[1]))
data["column_names"].append((i, col[1].lower().replace("_", " ")))
# varchar, '' -> text, int, numeric -> integer,
col_type = col[2].lower()
if (
"char" in col_type
or col_type == ""
or "text" in col_type
or "var" in col_type
):
data["column_types"].append("text")
elif (
"int" in col_type
or "numeric" in col_type
or "decimal" in col_type
or "number" in col_type
or "id" in col_type
or "real" in col_type
or "double" in col_type
or "float" in col_type
):
data["column_types"].append("number")
elif "date" in col_type or "time" in col_type or "year" in col_type:
data["column_types"].append("time")
elif "boolean" in col_type:
data["column_types"].append("boolean")
else:
data["column_types"].append("others")
if col[5] == 1:
data["primary_keys"].append(len(data["column_names"]) - 1)
data["foreign_keys"] = fk_holder
data["foreign_keys"] = convert_fk_index(data)
return data
if __name__ == "__main__":
if len(sys.argv) < 2:
print(
"Usage: python get_tables.py [dir includes many subdirs containing database.sqlite files] [output file name e.g. output.json] [existing tables.json file to be inherited]"
)
sys.exit()
input_dir = sys.argv[1]
output_file = sys.argv[2]
ex_tab_file = sys.argv[3]
all_fs = [
df for df in listdir(input_dir) if exists(join(input_dir, df, df + ".sqlite"))
]
with open(ex_tab_file) as f:
ex_tabs = json.load(f)
# for tab in ex_tabs:
# tab["foreign_keys"] = convert_fk_index(tab)
ex_tabs = {tab["db_id"]: tab for tab in ex_tabs if tab["db_id"] in all_fs}
print("precessed file num: ", len(ex_tabs))
not_fs = [
df
for df in listdir(input_dir)
if not exists(join(input_dir, df, df + ".sqlite"))
]
for d in not_fs:
print("no sqlite file found in: ", d)
db_files = [
(df + ".sqlite", df)
for df in listdir(input_dir)
if exists(join(input_dir, df, df + ".sqlite"))
]
tables = []
for f, df in db_files:
# if df in ex_tabs.keys():
# print 'reading old db: ', df
# tables.append(ex_tabs[df])
db = join(input_dir, df, f)
print("\nreading new db: ", df)
table = dump_db_json_schema(db, df)
prev_tab_num = len(ex_tabs[df]["table_names"])
prev_col_num = len(ex_tabs[df]["column_names"])
cur_tab_num = len(table["table_names"])
cur_col_num = len(table["column_names"])
if (
df in ex_tabs.keys()
and prev_tab_num == cur_tab_num
and prev_col_num == cur_col_num
and prev_tab_num != 0
and len(ex_tabs[df]["column_names"]) > 1
):
table["table_names"] = ex_tabs[df]["table_names"]
table["column_names"] = ex_tabs[df]["column_names"]
else:
print("\n----------------------------------problem db: ", df)
tables.append(table)
print("final db num: ", len(tables))
with open(output_file, "wt") as out:
json.dump(tables, out, sort_keys=True, indent=2, separators=(",", ": "))
|
ContextualSP/unified_parser_text_to_sql/third_party/spider/preprocess/get_tables.py/0
|
{
"file_path": "ContextualSP/unified_parser_text_to_sql/third_party/spider/preprocess/get_tables.py",
"repo_id": "ContextualSP",
"token_count": 2963
}
| 265 |
import os
import cv2
import json
import torch
import scipy
import scipy.io as sio
from skimage import io
from torchvision import datasets, transforms
from torchvision.datasets.folder import ImageFolder, default_loader
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.data import create_transform
class Flowers(ImageFolder):
def __init__(self, root, train=True, transform=None, **kwargs):
self.dataset_root = root
self.loader = default_loader
self.target_transform = None
self.transform = transform
label_path = os.path.join(root, 'imagelabels.mat')
split_path = os.path.join(root, 'setid.mat')
print('Dataset Flowers is trained with resolution 224!')
# labels
labels = sio.loadmat(label_path)['labels'][0]
self.img_to_label = dict()
for i in range(len(labels)):
self.img_to_label[i] = labels[i]
splits = sio.loadmat(split_path)
self.trnid, self.valid, self.tstid = sorted(splits['trnid'][0].tolist()), \
sorted(splits['valid'][0].tolist()), \
sorted(splits['tstid'][0].tolist())
if train:
self.imgs = self.trnid + self.valid
else:
self.imgs = self.tstid
self.samples = []
for item in self.imgs:
self.samples.append((os.path.join(root, 'jpg', "image_{:05d}.jpg".format(item)), self.img_to_label[item-1]-1))
class Cars196(ImageFolder, datasets.CIFAR10):
base_folder_devkit = 'devkit'
base_folder_trainims = 'cars_train'
base_folder_testims = 'cars_test'
filename_testanno = 'cars_test_annos.mat'
filename_trainanno = 'cars_train_annos.mat'
base_folder = 'cars_train'
train_list = [
['00001.jpg', '8df595812fee3ca9a215e1ad4b0fb0c4'],
['00002.jpg', '4b9e5efcc3612378ec63a22f618b5028']
]
test_list = []
num_training_classes = 98 # 196/2
def __init__(self, root, train=False, transform=None, target_transform=None, **kwargs):
self.root = root
self.transform = transform
self.target_transform = target_transform
self.loader = default_loader
print('Dataset Cars196 is trained with resolution 224!')
self.samples = []
self.nb_classes = 196
if train:
labels = \
sio.loadmat(os.path.join(self.root, self.base_folder_devkit, self.filename_trainanno))['annotations'][0]
for item in labels:
img_name = item[-1].tolist()[0]
label = int(item[4]) - 1
self.samples.append((os.path.join(self.root, self.base_folder_trainims, img_name), label))
else:
labels = \
sio.loadmat(os.path.join(self.root, 'cars_test_annos_withlabels.mat'))['annotations'][0]
for item in labels:
img_name = item[-1].tolist()[0]
label = int(item[-2]) - 1
self.samples.append((os.path.join(self.root, self.base_folder_testims, img_name), label))
class Pets(ImageFolder):
def __init__(self, root, train=True, transform=None, target_transform=None, **kwargs):
self.dataset_root = root
self.loader = default_loader
self.target_transform = None
self.transform = transform
train_list_path = os.path.join(self.dataset_root, 'annotations', 'trainval.txt')
test_list_path = os.path.join(self.dataset_root, 'annotations', 'test.txt')
self.samples = []
if train:
with open(train_list_path, 'r') as f:
for line in f:
img_name = line.split(' ')[0]
label = int(line.split(' ')[1])
self.samples.append((os.path.join(root, 'images', "{}.jpg".format(img_name)), label-1))
else:
with open(test_list_path, 'r') as f:
for line in f:
img_name = line.split(' ')[0]
label = int(line.split(' ')[1])
self.samples.append((os.path.join(root, 'images', "{}.jpg".format(img_name)), label-1))
class INatDataset(ImageFolder):
def __init__(self, root, train=True, year=2018, transform=None, target_transform=None,
category='name', loader=default_loader):
self.transform = transform
self.loader = loader
self.target_transform = target_transform
self.year = year
# assert category in ['kingdom','phylum','class','order','supercategory','family','genus','name']
path_json = os.path.join(root, f'{"train" if train else "val"}{year}.json')
with open(path_json) as json_file:
data = json.load(json_file)
with open(os.path.join(root, 'categories.json')) as json_file:
data_catg = json.load(json_file)
path_json_for_targeter = os.path.join(root, f"train{year}.json")
with open(path_json_for_targeter) as json_file:
data_for_targeter = json.load(json_file)
targeter = {}
indexer = 0
for elem in data_for_targeter['annotations']:
king = []
king.append(data_catg[int(elem['category_id'])][category])
if king[0] not in targeter.keys():
targeter[king[0]] = indexer
indexer += 1
self.nb_classes = len(targeter)
self.samples = []
for elem in data['images']:
cut = elem['file_name'].split('/')
target_current = int(cut[2])
path_current = os.path.join(root, cut[0], cut[2], cut[3])
categors = data_catg[target_current]
target_current_true = targeter[categors[category]]
self.samples.append((path_current, target_current_true))
# __getitem__ and __len__ inherited from ImageFolder
def build_dataset(is_train, args, folder_name=None):
transform = build_transform(is_train, args)
if args.data_set == 'CIFAR10':
dataset = datasets.CIFAR10(args.data_path, train=is_train, transform=transform, download=True)
nb_classes = 10
elif args.data_set == 'CIFAR100':
dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform, download=True)
nb_classes = 100
elif args.data_set == 'CARS':
dataset = Cars196(args.data_path, train=is_train, transform=transform)
nb_classes = 196
elif args.data_set == 'PETS':
dataset = Pets(args.data_path, train=is_train, transform=transform)
nb_classes = 37
elif args.data_set == 'FLOWERS':
dataset = Flowers(args.data_path, train=is_train, transform=transform)
nb_classes = 102
elif args.data_set == 'IMNET':
root = os.path.join(args.data_path, 'train' if is_train else 'val')
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = 1000
elif args.data_set == 'EVO_IMNET':
root = os.path.join(args.data_path, folder_name)
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = 1000
elif args.data_set == 'INAT':
dataset = INatDataset(args.data_path, train=is_train, year=2018,
category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
elif args.data_set == 'INAT19':
dataset = INatDataset(args.data_path, train=is_train, year=2019,
category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
return dataset, nb_classes
def build_transform(is_train, args):
resize_im = args.input_size > 32
if is_train:
# this should always dispatch to transforms_imagenet_train
transform = create_transform(
input_size=args.input_size,
is_training=True,
color_jitter=args.color_jitter,
auto_augment=args.aa,
interpolation=args.train_interpolation,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
)
if not resize_im:
# replace RandomResizedCropAndInterpolation with
# RandomCrop
transform.transforms[0] = transforms.RandomCrop(
args.input_size, padding=4)
return transform
t = []
if resize_im:
size = int((256 / 224) * args.input_size)
t.append(
transforms.Resize(size, interpolation=3), # to maintain same ratio w.r.t. 224 images
)
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
t.append(transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD))
return transforms.Compose(t)
|
Cream/AutoFormer/lib/datasets.py/0
|
{
"file_path": "Cream/AutoFormer/lib/datasets.py",
"repo_id": "Cream",
"token_count": 4094
}
| 266 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.