text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
# coding=utf-8
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright (c) HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from collections import Counter
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch.utils.data import Dataset
POOLING_BREAKDOWN = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class ImageEncoder(nn.Module):
def __init__(self, args):
super().__init__()
model = torchvision.models.resnet152(pretrained=True)
modules = list(model.children())[:-2]
self.model = nn.Sequential(*modules)
self.pool = nn.AdaptiveAvgPool2d(POOLING_BREAKDOWN[args.num_image_embeds])
def forward(self, x):
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
out = self.pool(self.model(x))
out = torch.flatten(out, start_dim=2)
out = out.transpose(1, 2).contiguous()
return out # BxNx2048
class JsonlDataset(Dataset):
def __init__(self, data_path, tokenizer, transforms, labels, max_seq_length):
self.data = [json.loads(l) for l in open(data_path)]
self.data_dir = os.path.dirname(data_path)
self.tokenizer = tokenizer
self.labels = labels
self.n_classes = len(labels)
self.max_seq_length = max_seq_length
self.transforms = transforms
def __len__(self):
return len(self.data)
def __getitem__(self, index):
sentence = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"], add_special_tokens=True))
start_token, sentence, end_token = sentence[0], sentence[1:-1], sentence[-1]
sentence = sentence[: self.max_seq_length]
label = torch.zeros(self.n_classes)
label[[self.labels.index(tgt) for tgt in self.data[index]["label"]]] = 1
image = Image.open(os.path.join(self.data_dir, self.data[index]["img"])).convert("RGB")
image = self.transforms(image)
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def get_label_frequencies(self):
label_freqs = Counter()
for row in self.data:
label_freqs.update(row["label"])
return label_freqs
def collate_fn(batch):
lens = [len(row["sentence"]) for row in batch]
bsz, max_seq_len = len(batch), max(lens)
mask_tensor = torch.zeros(bsz, max_seq_len, dtype=torch.long)
text_tensor = torch.zeros(bsz, max_seq_len, dtype=torch.long)
for i_batch, (input_row, length) in enumerate(zip(batch, lens)):
text_tensor[i_batch, :length] = input_row["sentence"]
mask_tensor[i_batch, :length] = 1
img_tensor = torch.stack([row["image"] for row in batch])
tgt_tensor = torch.stack([row["label"] for row in batch])
img_start_token = torch.stack([row["image_start_token"] for row in batch])
img_end_token = torch.stack([row["image_end_token"] for row in batch])
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def get_mmimdb_labels():
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def get_image_transforms():
return transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46777044, 0.44531429, 0.40661017],
std=[0.12221994, 0.12145835, 0.14380469],
),
]
)
|
AdaMix/examples/research_projects/mm-imdb/utils_mmimdb.py/0
|
{
"file_path": "AdaMix/examples/research_projects/mm-imdb/utils_mmimdb.py",
"repo_id": "AdaMix",
"token_count": 2032
}
| 39 |
# coding=utf-8
# Copyright 2018 The Google Flax Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Dict, Tuple
import numpy as np
import flax.linen as nn
import jax
import jax.numpy as jnp
from jax.random import PRNGKey
from modeling_flax_performer_utils import make_fast_softmax_attention
from transformers.file_utils import add_start_docstrings
from transformers.modeling_flax_utils import ACT2FN
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.bert.modeling_flax_bert import FlaxBertOnlyMLMHead, FlaxBertPreTrainedModel
from transformers.utils import logging
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "BertConfig"
_TOKENIZER_FOR_DOC = "BertTokenizer"
BERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
BERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
class FlaxPerformerLayerNorm(nn.Module):
"""
Layer normalization (https://arxiv.org/abs/1607.06450). Operates on the last axis of the input data.
"""
epsilon: float = 1e-6
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
bias: bool = True # If True, bias (beta) is added.
scale: bool = True # If True, multiply by scale (gamma). When the next layer is linear
# (also e.g. nn.relu), this can be disabled since the scaling will be
# done by the next layer.
bias_init: jnp.ndarray = nn.initializers.zeros
scale_init: jnp.ndarray = nn.initializers.ones
@nn.compact
def __call__(self, x):
"""
Applies layer normalization on the input. It normalizes the activations of the layer for each given example in
a batch independently, rather than across a batch like Batch Normalization. i.e. applies a transformation that
maintains the mean activation within each example close to 0 and the activation standard deviation close to 1
Args:
x: the inputs
Returns:
Normalized inputs (the same shape as inputs).
"""
features = x.shape[-1]
mean = jnp.mean(x, axis=-1, keepdims=True)
mean2 = jnp.mean(jax.lax.square(x), axis=-1, keepdims=True)
var = mean2 - jax.lax.square(mean)
mul = jax.lax.rsqrt(var + self.epsilon)
if self.scale:
mul = mul * jnp.asarray(self.param("gamma", self.scale_init, (features,)), self.dtype)
y = (x - mean) * mul
if self.bias:
y = y + jnp.asarray(self.param("beta", self.bias_init, (features,)), self.dtype)
return y
class FlaxPerformerEmbedding(nn.Module):
"""
Specify a new class for doing the embedding stuff as Flax's one use 'embedding' for the parameter name and PyTorch
use 'weight'
"""
vocab_size: int
hidden_size: int
emb_init: Callable[..., np.ndarray] = nn.initializers.normal(stddev=0.1)
@nn.compact
def __call__(self, inputs):
embedding = self.param("weight", self.emb_init, (self.vocab_size, self.hidden_size))
return jnp.take(embedding, inputs, axis=0)
class FlaxPerformerEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
vocab_size: int
hidden_size: int
type_vocab_size: int
max_length: int
@nn.compact
def __call__(self, input_ids, token_type_ids, position_ids, attention_mask):
# Embed
w_emb = FlaxPerformerEmbedding(self.vocab_size, self.hidden_size, name="word_embeddings")(
jnp.atleast_2d(input_ids.astype("i4"))
)
p_emb = FlaxPerformerEmbedding(self.max_length, self.hidden_size, name="position_embeddings")(
jnp.atleast_2d(position_ids.astype("i4"))
)
t_emb = FlaxPerformerEmbedding(self.type_vocab_size, self.hidden_size, name="token_type_embeddings")(
jnp.atleast_2d(token_type_ids.astype("i4"))
)
# Sum all embeddings
summed_emb = w_emb + jnp.broadcast_to(p_emb, w_emb.shape) + t_emb
# Layer Norm
layer_norm = FlaxPerformerLayerNorm(name="layer_norm")(summed_emb)
return layer_norm
class FlaxPerformerAttention(nn.Module):
num_heads: int
head_size: int
@nn.compact
def __call__(self, hidden_state, attention_mask):
single_head_dim = self.head_size // self.num_heads
fast_softmax_attention = make_fast_softmax_attention(qkv_dim=single_head_dim)
self_att = nn.attention.SelfAttention(
num_heads=self.num_heads, qkv_features=self.head_size, name="self", attention_fn=fast_softmax_attention
)(hidden_state, attention_mask)
layer_norm = FlaxPerformerLayerNorm(name="layer_norm")(self_att + hidden_state)
return layer_norm
class FlaxPerformerIntermediate(nn.Module):
output_size: int
hidden_act: str = "gelu"
@nn.compact
def __call__(self, hidden_state):
# TODO: Add ACT2FN reference to change activation function
dense = nn.Dense(features=self.output_size, name="dense")(hidden_state)
return ACT2FN[self.hidden_act](dense)
class FlaxPerformerOutput(nn.Module):
@nn.compact
def __call__(self, intermediate_output, attention_output):
hidden_state = nn.Dense(attention_output.shape[-1], name="dense")(intermediate_output)
hidden_state = FlaxPerformerLayerNorm(name="layer_norm")(hidden_state + attention_output)
return hidden_state
class FlaxPerformerLayer(nn.Module):
num_heads: int
head_size: int
intermediate_size: int
hidden_act: str = "gelu"
@nn.compact
def __call__(self, hidden_state, attention_mask):
attention = FlaxPerformerAttention(self.num_heads, self.head_size, name="attention")(
hidden_state, attention_mask
)
intermediate = FlaxPerformerIntermediate(
self.intermediate_size, name="intermediate", hidden_act=self.hidden_act
)(attention)
output = FlaxPerformerOutput(name="output")(intermediate, attention)
return output
class FlaxPerformerLayerCollection(nn.Module):
"""
Stores N BertLayer(s)
"""
num_layers: int
num_heads: int
head_size: int
intermediate_size: int
hidden_act: str = "gelu"
@nn.compact
def __call__(self, inputs, attention_mask):
assert self.num_layers > 0, f"num_layers should be >= 1, got ({self.num_layers})"
# Initialize input / output
input_i = inputs
# Forward over all encoders
for i in range(self.num_layers):
layer = FlaxPerformerLayer(
self.num_heads, self.head_size, self.intermediate_size, hidden_act=self.hidden_act, name=f"{i}"
)
input_i = layer(input_i, attention_mask)
return input_i
class FlaxPerformerEncoder(nn.Module):
num_layers: int
num_heads: int
head_size: int
intermediate_size: int
hidden_act: str = "gelu"
@nn.compact
def __call__(self, hidden_state, attention_mask):
layer = FlaxPerformerLayerCollection(
self.num_layers,
self.num_heads,
self.head_size,
self.intermediate_size,
name="layer",
hidden_act=self.hidden_act,
)(hidden_state, attention_mask)
return layer
class FlaxPerformerPooler(nn.Module):
@nn.compact
def __call__(self, hidden_state):
cls_token = hidden_state[:, 0]
out = nn.Dense(hidden_state.shape[-1], name="dense")(cls_token)
return jax.lax.tanh(out)
class FlaxPerformerModule(nn.Module):
vocab_size: int
hidden_size: int
type_vocab_size: int
max_length: int
num_encoder_layers: int
num_heads: int
head_size: int
intermediate_size: int
hidden_act: str = "gelu"
add_pooling_layer: bool = True
@nn.compact
def __call__(self, input_ids, token_type_ids, position_ids, attention_mask):
# Embedding
embeddings = FlaxPerformerEmbeddings(
self.vocab_size, self.hidden_size, self.type_vocab_size, self.max_length, name="embeddings"
)(input_ids, token_type_ids, position_ids, attention_mask)
# N stacked encoding layers
encoder = FlaxPerformerEncoder(
self.num_encoder_layers,
self.num_heads,
self.head_size,
self.intermediate_size,
hidden_act=self.hidden_act,
name="encoder",
)(embeddings, attention_mask)
if not self.add_pooling_layer:
return encoder
pooled = FlaxPerformerPooler(name="pooler")(encoder)
return encoder, pooled
@add_start_docstrings(
"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING,
)
class FlaxPerformerModel(FlaxBertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
"""
model_class = FlaxPerformerModule
config_class = BertConfig
base_model_prefix = "bert"
@staticmethod
def convert_from_pytorch(pt_state: Dict, config: BertConfig) -> Dict:
jax_state = dict(pt_state)
# Need to change some parameters name to match Flax names so that we don't have to fork any layer
for key, tensor in pt_state.items():
# Key parts
key_parts = set(key.split("."))
# Every dense layer has "kernel" parameters instead of "weight"
if "dense.weight" in key:
del jax_state[key]
key = key.replace("weight", "kernel")
jax_state[key] = tensor
# SelfAttention needs also to replace "weight" by "kernel"
if {"query", "key", "value"} & key_parts:
# Flax SelfAttention decomposes the heads (num_head, size // num_heads)
if "bias" in key:
jax_state[key] = tensor.reshape((config.num_attention_heads, -1))
elif "weight":
del jax_state[key]
key = key.replace("weight", "kernel")
tensor = tensor.reshape((config.num_attention_heads, -1, config.hidden_size)).transpose((2, 0, 1))
jax_state[key] = tensor
# SelfAttention output is not a separate layer, remove one nesting
if "attention.output.dense" in key:
del jax_state[key]
key = key.replace("attention.output.dense", "attention.self.out")
jax_state[key] = tensor
# SelfAttention output is not a separate layer, remove nesting on layer norm
if "attention.output.LayerNorm" in key:
del jax_state[key]
key = key.replace("attention.output.LayerNorm", "attention.LayerNorm")
jax_state[key] = tensor
# There are some transposed parameters w.r.t their PyTorch counterpart
if "intermediate.dense.kernel" in key or "output.dense.kernel" in key:
jax_state[key] = tensor.T
# Self Attention output projection needs to be transposed
if "out.kernel" in key:
jax_state[key] = tensor.reshape((config.hidden_size, config.num_attention_heads, -1)).transpose(
1, 2, 0
)
# Pooler needs to transpose its kernel
if "pooler.dense.kernel" in key:
jax_state[key] = tensor.T
# Handle LayerNorm conversion
if "LayerNorm" in key:
del jax_state[key]
# Replace LayerNorm by layer_norm
new_key = key.replace("LayerNorm", "layer_norm")
if "weight" in key:
new_key = new_key.replace("weight", "gamma")
elif "bias" in key:
new_key = new_key.replace("bias", "beta")
jax_state[new_key] = tensor
return jax_state
def __init__(
self, config: BertConfig, input_shape: Tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, **kwargs
):
module = FlaxPerformerModule(
vocab_size=config.vocab_size,
hidden_size=config.hidden_size,
type_vocab_size=config.type_vocab_size,
max_length=config.max_position_embeddings,
num_encoder_layers=config.num_hidden_layers,
num_heads=config.num_attention_heads,
head_size=config.hidden_size,
intermediate_size=config.intermediate_size,
dropout_rate=config.hidden_dropout_prob,
hidden_act=config.hidden_act,
)
super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype)
@property
def module(self) -> nn.Module:
return self._module
def __call__(
self, input_ids, token_type_ids=None, position_ids=None, dropout_rng: PRNGKey = None, attention_mask=None
):
input_ids, attention_mask, token_type_ids, position_ids = self._check_inputs(
input_ids, attention_mask, token_type_ids, position_ids
)
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
return self.module.apply(
{"params": self.params},
jnp.array(input_ids, dtype="i4"),
jnp.array(token_type_ids, dtype="i4"),
jnp.array(position_ids, dtype="i4"),
jnp.array(attention_mask, dtype="i4"),
rng=rngs,
)
class FlaxPerformerForMaskedLM(FlaxBertPreTrainedModel):
def __init__(
self, config: BertConfig, input_shape: Tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, **kwargs
):
module = FlaxPerformerForMaskedLMModule(
vocab_size=config.vocab_size,
type_vocab_size=config.type_vocab_size,
hidden_size=config.hidden_size,
intermediate_size=config.intermediate_size,
head_size=config.hidden_size,
num_heads=config.num_attention_heads,
num_encoder_layers=config.num_hidden_layers,
max_length=config.max_position_embeddings,
hidden_act=config.hidden_act,
**kwargs,
)
super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype)
def __call__(
self,
input_ids,
attention_mask=None,
token_type_ids=None,
position_ids=None,
params: dict = None,
train: bool = False,
dropout_rng: PRNGKey = None,
):
input_ids, attention_mask, token_type_ids, position_ids = self._check_inputs(
input_ids, attention_mask, token_type_ids, position_ids
)
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
return self.module.apply(
{"params": params or self.params},
jnp.array(input_ids, dtype="i4"),
jnp.array(attention_mask, dtype="i4"),
jnp.array(token_type_ids, dtype="i4"),
jnp.array(position_ids, dtype="i4"),
not train,
rngs=rngs,
)
class FlaxPerformerForMaskedLMModule(nn.Module):
vocab_size: int
hidden_size: int
intermediate_size: int
head_size: int
num_heads: int
num_encoder_layers: int
type_vocab_size: int
max_length: int
hidden_act: str
dropout_rate: float = 0.0
dtype: jnp.dtype = jnp.float32
@nn.compact
def __call__(
self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, deterministic: bool = True
):
# Model
encoder = FlaxPerformerModule(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
type_vocab_size=self.type_vocab_size,
max_length=self.max_length,
num_encoder_layers=self.num_encoder_layers,
num_heads=self.num_heads,
head_size=self.hidden_size,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
add_pooling_layer=False,
name="bert",
)(input_ids, attention_mask, token_type_ids, position_ids)
# Compute the prediction scores
encoder = nn.Dropout(rate=self.dropout_rate)(encoder, deterministic=deterministic)
logits = FlaxBertOnlyMLMHead(
vocab_size=self.vocab_size, hidden_act=self.hidden_act, name="cls", dtype=self.dtype
)(encoder)
return (logits,)
|
AdaMix/examples/research_projects/performer/modeling_flax_performer.py/0
|
{
"file_path": "AdaMix/examples/research_projects/performer/modeling_flax_performer.py",
"repo_id": "AdaMix",
"token_count": 9147
}
| 40 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
logger = logging.getLogger(__name__)
class RagPyTorchDistributedRetriever(RagRetriever):
"""
A distributed retriever built on top of the ``torch.distributed`` communication package. During training all workers
initialize their own instance of the retriever, however, only the main worker loads the index into memory. The index is stored
in cpu memory. The index will also work well in a non-distributed setup.
Args:
config (:class:`~transformers.RagConfig`):
The configuration of the RAG model this Retriever is used with. Contains parameters indicating which ``Index`` to build.
question_encoder_tokenizer (:class:`~transformers.PretrainedTokenizer`):
The tokenizer that was used to tokenize the question.
It is used to decode the question and then use the generator_tokenizer.
generator_tokenizer (:class:`~transformers.PretrainedTokenizer`):
The tokenizer used for the generator part of the RagModel.
index (:class:`~transformers.models.rag.retrieval_rag.Index`, optional, defaults to the one defined by the configuration):
If specified, use this index instead of the one built using the configuration
"""
def __init__(self, config, question_encoder_tokenizer, generator_tokenizer, index=None):
super().__init__(
config,
question_encoder_tokenizer=question_encoder_tokenizer,
generator_tokenizer=generator_tokenizer,
index=index,
init_retrieval=False,
)
self.process_group = None
def init_retrieval(self, distributed_port: int):
"""
Retriever initialization function, needs to be called from the training process. The function sets some common parameters
and environment variables. On top of that, (only) the main process in the process group loads the index into memory.
Args:
distributed_port (:obj:`int`):
The port on which the main communication of the training run is carried out. We set the port for retrieval-related
communication as ``distributed_port + 1``.
"""
logger.info("initializing retrieval")
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized")
# needs to be set manually
os.environ["GLOO_SOCKET_IFNAME"] = self._infer_socket_ifname()
# avoid clash with the NCCL port
os.environ["MASTER_PORT"] = str(distributed_port + 1)
self.process_group = dist.new_group(ranks=None, backend="gloo")
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main")
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group)
def _is_main(self):
return dist.get_rank(group=self.process_group) == 0
def _scattered(self, scatter_list, target_shape, target_type=torch.float32):
target_tensor = torch.empty(target_shape, dtype=target_type)
dist.scatter(target_tensor, src=0, scatter_list=scatter_list, group=self.process_group)
return target_tensor
def _infer_socket_ifname(self):
addrs = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
ifname = next((addr for addr in addrs if addr.startswith("e")), None)
return ifname
def retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> Tuple[np.ndarray, List[dict]]:
"""
Retrieves documents for specified ``question_hidden_states``. The main process, which has the access to the index stored in memory, gathers queries
from all the processes in the main training process group, performs the retrieval and scatters back the results.
Args:
question_hidden_states (:obj:`np.ndarray` of shape :obj:`(batch_size, vector_size)`):
A batch of query vectors to retrieve with.
n_docs (:obj:`int`):
The number of docs retrieved per query.
Output:
retrieved_doc_embeds (:obj:`np.ndarray` of shape :obj:`(batch_size, n_docs, dim)`
The retrieval embeddings of the retrieved docs per query.
doc_ids (:obj:`np.ndarray` of shape :obj:`batch_size, n_docs`)
The ids of the documents in the index
doc_dicts (:obj:`List[dict]`):
The retrieved_doc_embeds examples per query.
"""
# single GPU training
if not dist.is_initialized():
doc_ids, retrieved_doc_embeds = self._main_retrieve(question_hidden_states, n_docs)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(doc_ids)
# distributed training
world_size = dist.get_world_size(group=self.process_group)
# gather logic
gather_list = None
if self._is_main():
gather_list = [torch.empty(question_hidden_states.shape, dtype=torch.float32) for _ in range(world_size)]
dist.gather(torch.tensor(question_hidden_states), dst=0, gather_list=gather_list, group=self.process_group)
# scatter logic
n_queries = question_hidden_states.shape[0]
scatter_ids = []
scatter_vectors = []
if self._is_main():
assert len(gather_list) == world_size
ids, vectors = self._main_retrieve(torch.cat(gather_list).numpy(), n_docs)
ids, vectors = torch.tensor(ids), torch.tensor(vectors)
scatter_ids = self._chunk_tensor(ids, n_queries)
scatter_vectors = self._chunk_tensor(vectors, n_queries)
doc_ids = self._scattered(scatter_ids, [n_queries, n_docs], target_type=torch.int64)
retrieved_doc_embeds = self._scattered(scatter_vectors, [n_queries, n_docs, question_hidden_states.shape[1]])
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(doc_ids)
|
AdaMix/examples/research_projects/rag/distributed_pytorch_retriever.py/0
|
{
"file_path": "AdaMix/examples/research_projects/rag/distributed_pytorch_retriever.py",
"repo_id": "AdaMix",
"token_count": 2561
}
| 41 |
import argparse
import logging
import os
import sys
import tempfile
from pathlib import Path
import pytest
import pytorch_lightning as pl
import torch
import lightning_base
from convert_pl_checkpoint_to_hf import convert_pl_to_hf
from distillation import distill_main
from finetune import SummarizationModule, main
from parameterized import parameterized
from run_eval import generate_summaries_or_translations
from transformers import AutoConfig, AutoModelForSeq2SeqLM
from transformers.hf_api import HfApi
from transformers.testing_utils import CaptureStderr, CaptureStdout, TestCasePlus, require_torch_gpu, slow
from utils import label_smoothed_nll_loss, lmap, load_json
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
CUDA_AVAILABLE = torch.cuda.is_available()
CHEAP_ARGS = {
"max_tokens_per_batch": None,
"supervise_forward": True,
"normalize_hidden": True,
"label_smoothing": 0.2,
"eval_max_gen_length": None,
"eval_beams": 1,
"val_metric": "loss",
"save_top_k": 1,
"adafactor": True,
"early_stopping_patience": 2,
"logger_name": "default",
"length_penalty": 0.5,
"cache_dir": "",
"task": "summarization",
"num_workers": 2,
"alpha_hid": 0,
"freeze_embeds": True,
"enc_only": False,
"tgt_suffix": "",
"resume_from_checkpoint": None,
"sortish_sampler": True,
"student_decoder_layers": 1,
"val_check_interval": 1.0,
"output_dir": "",
"fp16": False, # TODO(SS): set this to CUDA_AVAILABLE if ci installs apex or start using native amp
"no_teacher": False,
"fp16_opt_level": "O1",
"gpus": 1 if CUDA_AVAILABLE else 0,
"n_tpu_cores": 0,
"max_grad_norm": 1.0,
"do_train": True,
"do_predict": True,
"accumulate_grad_batches": 1,
"server_ip": "",
"server_port": "",
"seed": 42,
"model_name_or_path": "sshleifer/bart-tiny-random",
"config_name": "",
"tokenizer_name": "facebook/bart-large",
"do_lower_case": False,
"learning_rate": 0.3,
"lr_scheduler": "linear",
"weight_decay": 0.0,
"adam_epsilon": 1e-08,
"warmup_steps": 0,
"max_epochs": 1,
"train_batch_size": 2,
"eval_batch_size": 2,
"max_source_length": 12,
"max_target_length": 12,
"val_max_target_length": 12,
"test_max_target_length": 12,
"fast_dev_run": False,
"no_cache": False,
"n_train": -1,
"n_val": -1,
"n_test": -1,
"student_encoder_layers": 1,
"freeze_encoder": False,
"auto_scale_batch_size": False,
"overwrite_output_dir": False,
"student": None,
}
def _dump_articles(path: Path, articles: list):
content = "\n".join(articles)
Path(path).open("w").writelines(content)
ARTICLES = [" Sam ate lunch today.", "Sams lunch ingredients."]
SUMMARIES = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"]
T5_TINY = "patrickvonplaten/t5-tiny-random"
T5_TINIER = "sshleifer/t5-tinier-random"
BART_TINY = "sshleifer/bart-tiny-random"
MBART_TINY = "sshleifer/tiny-mbart"
MARIAN_TINY = "sshleifer/tiny-marian-en-de"
FSMT_TINY = "stas/tiny-wmt19-en-de"
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
def make_test_data_dir(tmp_dir):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(tmp_dir, f"{split}.source"), ARTICLES)
_dump_articles(os.path.join(tmp_dir, f"{split}.target"), SUMMARIES)
return tmp_dir
class TestSummarizationDistiller(TestCasePlus):
@classmethod
def setUpClass(cls):
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
return cls
@slow
@require_torch_gpu
def test_hub_configs(self):
"""I put require_torch_gpu cause I only want this to run with self-scheduled."""
model_list = HfApi().model_list()
org = "sshleifer"
model_ids = [x.modelId for x in model_list if x.modelId.startswith(org)]
allowed_to_be_broken = ["sshleifer/blenderbot-3B", "sshleifer/blenderbot-90M"]
failures = []
for m in model_ids:
if m in allowed_to_be_broken:
continue
try:
AutoConfig.from_pretrained(m)
except Exception:
failures.append(m)
assert not failures, f"The following models could not be loaded through AutoConfig: {failures}"
def test_distill_no_teacher(self):
updates = dict(student_encoder_layers=2, student_decoder_layers=1, no_teacher=True)
self._test_distiller_cli(updates)
def test_distill_checkpointing_with_teacher(self):
updates = dict(
student_encoder_layers=2,
student_decoder_layers=1,
max_epochs=4,
val_check_interval=0.25,
alpha_hid=2.0,
model_name_or_path="IGNORE_THIS_IT_DOESNT_GET_USED",
)
model = self._test_distiller_cli(updates, check_contents=False)
ckpts = list(Path(model.output_dir).glob("*.ckpt"))
self.assertEqual(1, len(ckpts))
transformer_ckpts = list(Path(model.output_dir).glob("**/*.bin"))
self.assertEqual(len(transformer_ckpts), 2)
examples = lmap(str.strip, Path(model.hparams.data_dir).joinpath("test.source").open().readlines())
out_path = tempfile.mktemp() # XXX: not being cleaned up
generate_summaries_or_translations(examples, out_path, str(model.output_dir / "best_tfmr"))
self.assertTrue(Path(out_path).exists())
out_path_new = self.get_auto_remove_tmp_dir()
convert_pl_to_hf(ckpts[0], transformer_ckpts[0].parent, out_path_new)
assert os.path.exists(os.path.join(out_path_new, "pytorch_model.bin"))
def test_loss_fn(self):
model = AutoModelForSeq2SeqLM.from_pretrained(BART_TINY)
input_ids, mask = model.dummy_inputs["input_ids"], model.dummy_inputs["attention_mask"]
target_ids = torch.tensor([[0, 4, 8, 2], [0, 8, 2, 1]], dtype=torch.long, device=model.device)
decoder_input_ids = target_ids[:, :-1].contiguous() # Why this line?
lm_labels = target_ids[:, 1:].clone() # why clone?
model_computed_loss = model(
input_ids, attention_mask=mask, decoder_input_ids=decoder_input_ids, labels=lm_labels, use_cache=False
).loss
logits = model(input_ids, attention_mask=mask, decoder_input_ids=decoder_input_ids, use_cache=False).logits
lprobs = torch.nn.functional.log_softmax(logits, dim=-1)
smoothed_loss, nll_loss = label_smoothed_nll_loss(
lprobs, lm_labels, 0.1, ignore_index=model.config.pad_token_id
)
with self.assertRaises(AssertionError):
# TODO: understand why this breaks
self.assertEqual(nll_loss, model_computed_loss)
def test_distill_mbart(self):
updates = dict(
student_encoder_layers=2,
student_decoder_layers=1,
num_train_epochs=4,
val_check_interval=0.25,
alpha_hid=2.0,
task="translation",
model_name_or_path="IGNORE_THIS_IT_DOESNT_GET_USED",
tokenizer_name=MBART_TINY,
teacher=MBART_TINY,
src_lang="en_XX",
tgt_lang="ro_RO",
)
model = self._test_distiller_cli(updates, check_contents=False)
assert model.model.config.model_type == "mbart"
ckpts = list(Path(model.output_dir).glob("*.ckpt"))
self.assertEqual(1, len(ckpts))
transformer_ckpts = list(Path(model.output_dir).glob("**/*.bin"))
all_files = list(Path(model.output_dir).glob("best_tfmr/*"))
assert len(all_files) > 2
self.assertEqual(len(transformer_ckpts), 2)
def test_distill_t5(self):
updates = dict(
student_encoder_layers=1,
student_decoder_layers=1,
alpha_hid=2.0,
teacher=T5_TINY,
model_name_or_path=T5_TINY,
tokenizer_name=T5_TINY,
)
self._test_distiller_cli(updates)
def test_distill_different_base_models(self):
updates = dict(
teacher=T5_TINY,
student=T5_TINIER,
model_name_or_path=T5_TINIER,
tokenizer_name=T5_TINIER,
)
self._test_distiller_cli(updates)
def _test_distiller_cli(self, updates, check_contents=True):
default_updates = dict(
label_smoothing=0.0,
early_stopping_patience=-1,
train_batch_size=1,
eval_batch_size=2,
max_epochs=2,
alpha_mlm=0.2,
alpha_ce=0.8,
do_predict=True,
model_name_or_path="sshleifer/tinier_bart",
teacher=CHEAP_ARGS["model_name_or_path"],
val_check_interval=0.5,
)
default_updates.update(updates)
args_d: dict = CHEAP_ARGS.copy()
tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
output_dir = self.get_auto_remove_tmp_dir()
args_d.update(data_dir=tmp_dir, output_dir=output_dir, **default_updates)
model = distill_main(argparse.Namespace(**args_d))
if not check_contents:
return model
contents = os.listdir(output_dir)
contents = {os.path.basename(p) for p in contents}
ckpt_files = [p for p in contents if p.endswith("ckpt")]
assert len(ckpt_files) > 0
self.assertIn("test_generations.txt", contents)
self.assertIn("test_results.txt", contents)
metrics = load_json(model.metrics_save_path)
last_step_stats = metrics["val"][-1]
self.assertGreaterEqual(last_step_stats["val_avg_gen_time"], 0.01)
self.assertGreaterEqual(1.0, last_step_stats["val_avg_gen_time"])
self.assertIsInstance(last_step_stats[f"val_avg_{model.val_metric}"], float)
desired_n_evals = int(args_d["max_epochs"] * (1 / args_d["val_check_interval"]) + 1)
self.assertEqual(len(metrics["val"]), desired_n_evals)
self.assertEqual(len(metrics["test"]), 1)
return model
class TestTheRest(TestCasePlus):
@parameterized.expand(
[T5_TINY, BART_TINY, MBART_TINY, MARIAN_TINY, FSMT_TINY],
)
def test_finetune(self, model):
args_d: dict = CHEAP_ARGS.copy()
task = "translation" if model in [MBART_TINY, MARIAN_TINY, FSMT_TINY] else "summarization"
args_d["label_smoothing"] = 0.1 if task == "translation" else 0
tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
output_dir = self.get_auto_remove_tmp_dir()
args_d.update(
data_dir=tmp_dir,
model_name_or_path=model,
tokenizer_name=None,
train_batch_size=2,
eval_batch_size=2,
output_dir=output_dir,
do_predict=True,
task=task,
src_lang="en_XX",
tgt_lang="ro_RO",
freeze_encoder=True,
freeze_embeds=True,
)
assert "n_train" in args_d
args = argparse.Namespace(**args_d)
module = main(args)
input_embeds = module.model.get_input_embeddings()
assert not input_embeds.weight.requires_grad
if model == T5_TINY:
lm_head = module.model.lm_head
assert not lm_head.weight.requires_grad
assert (lm_head.weight == input_embeds.weight).all().item()
elif model == FSMT_TINY:
fsmt = module.model.model
embed_pos = fsmt.decoder.embed_positions
assert not embed_pos.weight.requires_grad
assert not fsmt.decoder.embed_tokens.weight.requires_grad
# check that embeds are not the same
assert fsmt.decoder.embed_tokens != fsmt.encoder.embed_tokens
else:
bart = module.model.model
embed_pos = bart.decoder.embed_positions
assert not embed_pos.weight.requires_grad
assert not bart.shared.weight.requires_grad
# check that embeds are the same
assert bart.decoder.embed_tokens == bart.encoder.embed_tokens
assert bart.decoder.embed_tokens == bart.shared
example_batch = load_json(module.output_dir / "text_batch.json")
assert isinstance(example_batch, dict)
assert len(example_batch) >= 4
def test_finetune_extra_model_args(self):
args_d: dict = CHEAP_ARGS.copy()
task = "summarization"
tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
args_d.update(
data_dir=tmp_dir,
tokenizer_name=None,
train_batch_size=2,
eval_batch_size=2,
do_predict=False,
task=task,
src_lang="en_XX",
tgt_lang="ro_RO",
freeze_encoder=True,
freeze_embeds=True,
)
# test models whose config includes the extra_model_args
model = BART_TINY
output_dir = self.get_auto_remove_tmp_dir()
args_d1 = args_d.copy()
args_d1.update(
model_name_or_path=model,
output_dir=output_dir,
)
extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
args_d1[p] = 0.5
args = argparse.Namespace(**args_d1)
model = main(args)
for p in extra_model_params:
assert getattr(model.config, p) == 0.5, f"failed to override the model config for param {p}"
# test models whose config doesn't include the extra_model_args
model = T5_TINY
output_dir = self.get_auto_remove_tmp_dir()
args_d2 = args_d.copy()
args_d2.update(
model_name_or_path=model,
output_dir=output_dir,
)
unsupported_param = "encoder_layerdrop"
args_d2[unsupported_param] = 0.5
args = argparse.Namespace(**args_d2)
with pytest.raises(Exception) as excinfo:
model = main(args)
assert str(excinfo.value) == f"model config doesn't have a `{unsupported_param}` attribute"
def test_finetune_lr_schedulers(self):
args_d: dict = CHEAP_ARGS.copy()
task = "summarization"
tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
model = BART_TINY
output_dir = self.get_auto_remove_tmp_dir()
args_d.update(
data_dir=tmp_dir,
model_name_or_path=model,
output_dir=output_dir,
tokenizer_name=None,
train_batch_size=2,
eval_batch_size=2,
do_predict=False,
task=task,
src_lang="en_XX",
tgt_lang="ro_RO",
freeze_encoder=True,
freeze_embeds=True,
)
# emulate finetune.py
parser = argparse.ArgumentParser()
parser = pl.Trainer.add_argparse_args(parser)
parser = SummarizationModule.add_model_specific_args(parser, os.getcwd())
args = {"--help": True}
# --help test
with pytest.raises(SystemExit) as excinfo:
with CaptureStdout() as cs:
args = parser.parse_args(args)
assert False, "--help is expected to sys.exit"
assert excinfo.type == SystemExit
expected = lightning_base.arg_to_scheduler_metavar
assert expected in cs.out, "--help is expected to list the supported schedulers"
# --lr_scheduler=non_existing_scheduler test
unsupported_param = "non_existing_scheduler"
args = {f"--lr_scheduler={unsupported_param}"}
with pytest.raises(SystemExit) as excinfo:
with CaptureStderr() as cs:
args = parser.parse_args(args)
assert False, "invalid argument is expected to sys.exit"
assert excinfo.type == SystemExit
expected = f"invalid choice: '{unsupported_param}'"
assert expected in cs.err, f"should have bailed on invalid choice of scheduler {unsupported_param}"
# --lr_scheduler=existing_scheduler test
supported_param = "cosine"
args_d1 = args_d.copy()
args_d1["lr_scheduler"] = supported_param
args = argparse.Namespace(**args_d1)
model = main(args)
assert (
getattr(model.hparams, "lr_scheduler") == supported_param
), f"lr_scheduler={supported_param} shouldn't fail"
|
AdaMix/examples/research_projects/seq2seq-distillation/_test_seq2seq_examples.py/0
|
{
"file_path": "AdaMix/examples/research_projects/seq2seq-distillation/_test_seq2seq_examples.py",
"repo_id": "AdaMix",
"token_count": 7867
}
| 42 |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for sequence to sequence.
"""
# You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments.
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import nltk # Here to have a nice missing dependency error message early on
import numpy as np
from datasets import load_dataset, load_metric
import transformers
from filelock import FileLock
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
default_data_collator,
set_seed,
)
from transformers.file_utils import is_offline_mode
from transformers.trainer_utils import get_last_checkpoint, is_main_process
from transformers.utils import check_min_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.4.0")
logger = logging.getLogger(__name__)
try:
nltk.data.find("tokenizers/punkt")
except (LookupError, OSError):
if is_offline_mode():
raise LookupError(
"Offline mode: run this script without TRANSFORMERS_OFFLINE first to download nltk data files"
)
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
text_column: Optional[str] = field(
default=None,
metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."},
)
summary_column: Optional[str] = field(
default=None,
metadata={"help": "The name of the column in the datasets containing the summaries (for summarization)."},
)
train_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a jsonlines or csv file)."}
)
validation_file: Optional[str] = field(
default=None,
metadata={
"help": "An optional input evaluation data file to evaluate the metrics (rouge) on "
"(a jsonlines or csv file)."
},
)
test_file: Optional[str] = field(
default=None,
metadata={
"help": "An optional input test data file to evaluate the metrics (rouge) on " "(a jsonlines or csv file)."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_source_length: Optional[int] = field(
default=1024,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
max_target_length: Optional[int] = field(
default=128,
metadata={
"help": "The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
val_max_target_length: Optional[int] = field(
default=None,
metadata={
"help": "The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`."
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to model maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_val_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
},
)
max_test_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of test examples to this "
"value if set."
},
)
num_beams: Optional[int] = field(
default=None,
metadata={
"help": "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, "
"which is used during ``evaluate`` and ``predict``."
},
)
ignore_pad_token_for_loss: bool = field(
default=True,
metadata={
"help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not."
},
)
source_prefix: Optional[str] = field(
default=None, metadata={"help": "A prefix to add before every source text (useful for T5 models)."}
)
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if self.val_max_target_length is None:
self.val_max_target_length = self.max_target_length
summarization_name_mapping = {
"amazon_reviews_multi": ("review_body", "review_title"),
"big_patent": ("description", "abstract"),
"cnn_dailymail": ("article", "highlights"),
"orange_sum": ("text", "summary"),
"pn_summary": ("article", "summary"),
"psc": ("extract_text", "summary_text"),
"samsum": ("dialogue", "summary"),
"thaisum": ("body", "summary"),
"xglue": ("news_body", "news_title"),
"xsum": ("document", "summary"),
"wiki_summary": ("article", "highlights"),
}
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if data_args.source_prefix is None and model_args.model_name_or_path in [
"t5-small",
"t5-base",
"t5-large",
"t5-3b",
"t5-11b",
]:
logger.warning(
"You're running a t5 model but didn't provide a source prefix, which is the expected, e.g. with "
"`--source_prefix 'summarize: ' `"
)
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s", training_args)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files this script will use the first column for the full texts and the second column for the
# summaries (unless you specify column names for this with the `text_column` and `summary_column` arguments).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.validation_file.split(".")[-1]
if data_args.test_file is not None:
data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1]
datasets = load_dataset(extension, data_files=data_files)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForSeq2SeqLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
prefix = data_args.source_prefix if data_args.source_prefix is not None else ""
# Preprocessing the datasets.
# We need to tokenize inputs and targets.
if training_args.do_train:
column_names = datasets["train"].column_names
elif training_args.do_eval:
column_names = datasets["validation"].column_names
elif training_args.do_predict:
column_names = datasets["test"].column_names
else:
logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.")
return
# Get the column names for input/target.
dataset_columns = summarization_name_mapping.get(data_args.dataset_name, None)
if data_args.text_column is None:
text_column = dataset_columns[0] if dataset_columns is not None else column_names[0]
else:
text_column = data_args.text_column
if text_column not in column_names:
raise ValueError(
f"--text_column' value '{data_args.text_column}' needs to be one of: {', '.join(column_names)}"
)
if data_args.summary_column is None:
summary_column = dataset_columns[1] if dataset_columns is not None else column_names[1]
else:
summary_column = data_args.summary_column
if summary_column not in column_names:
raise ValueError(
f"--summary_column' value '{data_args.summary_column}' needs to be one of: {', '.join(column_names)}"
)
# Temporarily set max_target_length for training.
max_target_length = data_args.max_target_length
padding = "max_length" if data_args.pad_to_max_length else False
if training_args.label_smoothing_factor > 0 and not hasattr(model, "prepare_decoder_input_ids_from_labels"):
logger.warn(
"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for"
f"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory"
)
def preprocess_function(examples):
inputs = examples[text_column]
targets = examples[summary_column]
inputs = [prefix + inp for inp in inputs]
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True)
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# padding in the loss.
if padding == "max_length" and data_args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
if training_args.do_train:
train_dataset = datasets["train"]
if "train" not in datasets:
raise ValueError("--do_train requires a train dataset")
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
)
if training_args.do_eval:
max_target_length = data_args.val_max_target_length
if "validation" not in datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = datasets["validation"]
if data_args.max_val_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
eval_dataset = eval_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
)
if training_args.do_predict:
max_target_length = data_args.val_max_target_length
if "test" not in datasets:
raise ValueError("--do_predict requires a test dataset")
test_dataset = datasets["test"]
if data_args.max_test_samples is not None:
test_dataset = test_dataset.select(range(data_args.max_test_samples))
test_dataset = test_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
)
# Data collator
label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id
if data_args.pad_to_max_length:
data_collator = default_data_collator
else:
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if training_args.fp16 else None,
)
# Metric
metric = load_metric("rouge")
def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [label.strip() for label in labels]
# rougeLSum expects newline after each sentence
preds = ["\n".join(nltk.sent_tokenize(pred)) for pred in preds]
labels = ["\n".join(nltk.sent_tokenize(label)) for label in labels]
return preds, labels
def compute_metrics(eval_preds):
preds, labels = eval_preds
if isinstance(preds, tuple):
preds = preds[0]
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
if data_args.ignore_pad_token_for_loss:
# Replace -100 in the labels as we can't decode them.
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
# Some simple post-processing
decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)
result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True)
# Extract a few results from ROUGE
result = {key: value.mid.fmeasure * 100 for key, value in result.items()}
prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]
result["gen_len"] = np.mean(prediction_lens)
result = {k: round(v, 4) for k, v in result.items()}
return result
# Initialize our Trainer
trainer = Seq2SeqTrainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics if training_args.predict_with_generate else None,
)
# Training
if training_args.do_train:
if last_checkpoint is not None:
checkpoint = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path):
checkpoint = model_args.model_name_or_path
else:
checkpoint = None
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate(
max_length=data_args.val_max_target_length, num_beams=data_args.num_beams, metric_key_prefix="eval"
)
max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_val_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
if training_args.do_predict:
logger.info("*** Test ***")
test_results = trainer.predict(
test_dataset,
metric_key_prefix="test",
max_length=data_args.val_max_target_length,
num_beams=data_args.num_beams,
)
metrics = test_results.metrics
max_test_samples = data_args.max_test_samples if data_args.max_test_samples is not None else len(test_dataset)
metrics["test_samples"] = min(max_test_samples, len(test_dataset))
trainer.log_metrics("test", metrics)
trainer.save_metrics("test", metrics)
if trainer.is_world_process_zero():
if training_args.predict_with_generate:
test_preds = tokenizer.batch_decode(
test_results.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True
)
test_preds = [pred.strip() for pred in test_preds]
output_test_preds_file = os.path.join(training_args.output_dir, "test_generations.txt")
with open(output_test_preds_file, "w") as writer:
writer.write("\n".join(test_preds))
return results
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
|
AdaMix/examples/seq2seq/run_summarization.py/0
|
{
"file_path": "AdaMix/examples/seq2seq/run_summarization.py",
"repo_id": "AdaMix",
"token_count": 10001
}
| 43 |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning multi-lingual models on XNLI (e.g. Bert, DistilBERT, XLM).
Adapted from `examples/text-classification/run_glue.py`"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
from datasets import load_dataset, load_metric
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
from transformers.utils import check_min_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.4.0")
logger = logging.getLogger(__name__)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
max_seq_length: Optional[int] = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_val_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
},
)
max_test_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of test examples to this "
"value if set."
},
)
server_ip: Optional[str] = field(default=None, metadata={"help": "For distant debugging."})
server_port: Optional[str] = field(default=None, metadata={"help": "For distant debugging."})
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
default=None, metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
language: str = field(
default=None, metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."}
)
train_language: Optional[str] = field(
default=None, metadata={"help": "Train language if it is different from the evaluation language."}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
do_lower_case: Optional[bool] = field(
default=False,
metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup distant debugging if needed
if data_args.server_ip and data_args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(data_args.server_ip, data_args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if model_args.train_language is None:
train_dataset = load_dataset("xnli", model_args.language, split="train")
else:
train_dataset = load_dataset("xnli", model_args.train_language, split="train")
eval_dataset = load_dataset("xnli", model_args.language, split="validation")
# Labels
label_list = train_dataset.features["label"].names
num_labels = len(label_list)
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task="xnli",
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
do_lower_case=model_args.do_lower_case,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
padding = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
padding = False
def preprocess_function(examples):
# Tokenize the texts
return tokenizer(
examples["premise"],
examples["hypothesis"],
padding=padding,
max_length=data_args.max_seq_length,
truncation=True,
)
if training_args.do_train:
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
load_from_cache_file=not data_args.overwrite_cache,
)
if training_args.do_eval:
if data_args.max_val_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
eval_dataset = eval_dataset.map(
preprocess_function,
batched=True,
load_from_cache_file=not data_args.overwrite_cache,
)
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# Get the metric function
metric = load_metric("xnli")
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(p: EvalPrediction):
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
preds = np.argmax(preds, axis=1)
return metric.compute(predictions=preds, references=p.label_ids)
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
data_collator = default_data_collator
elif training_args.fp16:
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
else:
data_collator = None
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset if training_args.do_eval else None,
compute_metrics=compute_metrics,
tokenizer=tokenizer,
data_collator=data_collator,
)
# Training
if training_args.do_train:
if last_checkpoint is not None:
model_path = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path):
model_path = model_args.model_name_or_path
else:
model_path = None
train_result = trainer.train(model_path=model_path)
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate(eval_dataset=eval_dataset)
max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_val_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
if __name__ == "__main__":
main()
|
AdaMix/examples/text-classification/run_xnli.py/0
|
{
"file_path": "AdaMix/examples/text-classification/run_xnli.py",
"repo_id": "AdaMix",
"token_count": 5328
}
| 44 |
#!/usr/bin/env bash
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script acquires data and converts it to fsmt model
# it covers:
# - facebook/wmt19-ru-en
# - facebook/wmt19-en-ru
# - facebook/wmt19-de-en
# - facebook/wmt19-en-de
# this script needs to be run from the top level of the transformers repo
if [ ! -d "src/transformers" ]; then
echo "Error: This script needs to be run from the top of the transformers repo"
exit 1
fi
mkdir data
# get data (run once)
cd data
wget https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.ensemble.tar.gz
wget https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.ensemble.tar.gz
wget https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.ensemble.tar.gz
wget https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.ensemble.tar.gz
tar -xvzf wmt19.en-de.joined-dict.ensemble.tar.gz
tar -xvzf wmt19.de-en.joined-dict.ensemble.tar.gz
tar -xvzf wmt19.en-ru.ensemble.tar.gz
tar -xvzf wmt19.ru-en.ensemble.tar.gz
cd -
# run conversions and uploads
export PAIR=ru-en
PYTHONPATH="src" python src/transformers/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py --fsmt_checkpoint_path data/wmt19.$PAIR.ensemble/model4.pt --pytorch_dump_folder_path data/wmt19-$PAIR
export PAIR=en-ru
PYTHONPATH="src" python src/transformers/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py --fsmt_checkpoint_path data/wmt19.$PAIR.ensemble/model4.pt --pytorch_dump_folder_path data/wmt19-$PAIR
export PAIR=de-en
PYTHONPATH="src" python src/transformers/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py --fsmt_checkpoint_path data/wmt19.$PAIR.joined-dict.ensemble/model4.pt --pytorch_dump_folder_path data/wmt19-$PAIR
export PAIR=en-de
PYTHONPATH="src" python src/transformers/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py --fsmt_checkpoint_path data/wmt19.$PAIR.joined-dict.ensemble/model4.pt --pytorch_dump_folder_path data/wmt19-$PAIR
# upload
cd data
transformers-cli upload -y wmt19-ru-en
transformers-cli upload -y wmt19-en-ru
transformers-cli upload -y wmt19-de-en
transformers-cli upload -y wmt19-en-de
cd -
# if updating just small files and not the large models, here is a script to generate the right commands:
perl -le 'for $f (@ARGV) { print qq[transformers-cli upload -y $_/$f --filename $_/$f] for map { "wmt19-$_" } ("en-ru", "ru-en", "de-en", "en-de")}' vocab-src.json vocab-tgt.json tokenizer_config.json config.json
# add/remove files as needed
|
AdaMix/scripts/fsmt/convert-facebook-wmt19.sh/0
|
{
"file_path": "AdaMix/scripts/fsmt/convert-facebook-wmt19.sh",
"repo_id": "AdaMix",
"token_count": 1121
}
| 45 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/master/setup.py
To create the package for pypi.
1. Run `make pre-release` (or `make pre-patch` for a patch release) then run `make fix-copies` to fix the index of the
documentation.
2. Unpin specific versions from setup.py that use a git install.
3. Commit these changes with the message: "Release: VERSION"
4. Add a tag in git to mark the release: "git tag VERSION -m 'Adds tag VERSION for pypi' "
Push the tag to git: git push --tags origin master
5. Build both the sources and the wheel. Do not change anything in setup.py between
creating the wheel and the source distribution (obviously).
For the wheel, run: "python setup.py bdist_wheel" in the top level directory.
(this will build a wheel for the python version you use to build it).
For the sources, run: "python setup.py sdist"
You should now have a /dist directory with both .whl and .tar.gz source versions.
6. Check that everything looks correct by uploading the package to the pypi test server:
twine upload dist/* -r pypitest
(pypi suggest using twine as other methods upload files via plaintext.)
You may have to specify the repository url, use the following command then:
twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/
Check that you can install it in a virtualenv by running:
pip install -i https://testpypi.python.org/pypi transformers
7. Upload the final version to actual pypi:
twine upload dist/* -r pypi
8. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory.
9. Run `make post-release` (or `make post-patch` for a patch release).
"""
import os
import re
import shutil
from distutils.core import Command
from pathlib import Path
from setuptools import find_packages, setup
# Remove stale transformers.egg-info directory to avoid https://github.com/pypa/pip/issues/5466
stale_egg_info = Path(__file__).parent / "transformers.egg-info"
if stale_egg_info.exists():
print(
(
"Warning: {} exists.\n\n"
"If you recently updated transformers to 3.0 or later, this is expected,\n"
"but it may prevent transformers from installing in editable mode.\n\n"
"This directory is automatically generated by Python's packaging tools.\n"
"I will remove it now.\n\n"
"See https://github.com/pypa/pip/issues/5466 for details.\n"
).format(stale_egg_info)
)
shutil.rmtree(stale_egg_info)
# IMPORTANT:
# 1. all dependencies should be listed here with their version requirements if any
# 2. once modified, run: `make deps_table_update` to update src/transformers/dependency_versions_table.py
_deps = [
"black>=20.8b1",
"cookiecutter==1.7.2",
"dataclasses",
"datasets",
"faiss-cpu",
"fastapi",
"filelock",
"flake8>=3.8.3",
"flax>=0.3.2",
"fugashi>=1.0",
"importlib_metadata",
"ipadic>=1.0.0,<2.0",
"isort>=5.5.4",
"jax>=0.2.8",
"jaxlib>=0.1.59",
"keras2onnx",
"numpy>=1.17",
"onnxconverter-common",
"onnxruntime-tools>=1.4.2",
"onnxruntime>=1.4.0",
"packaging",
"parameterized",
"protobuf",
"psutil",
"pydantic",
"pytest",
"pytest-sugar",
"pytest-xdist",
"python>=3.6.0",
"recommonmark",
"regex!=2019.12.17",
"requests",
"sacremoses",
"scikit-learn",
"sentencepiece==0.1.91",
"soundfile",
"sphinx-copybutton",
"sphinx-markdown-tables",
"sphinx-rtd-theme==0.4.3", # sphinx-rtd-theme==0.5.0 introduced big changes in the style.
"sphinx==3.2.1",
"starlette",
"tensorflow-cpu>=2.3",
"tensorflow>=2.3",
"timeout-decorator",
"tokenizers>=0.10.1,<0.11",
"torch>=1.0",
"torchaudio",
"tqdm>=4.27",
"unidic>=1.0.2",
"unidic_lite>=1.0.7",
"uvicorn",
]
# this is a lookup table with items like:
#
# tokenizers: "tokenizers==0.9.4"
# packaging: "packaging"
#
# some of the values are versioned whereas others aren't.
deps = {b: a for a, b in (re.findall(r"^(([^!=<>]+)(?:[!=<>].*)?$)", x)[0] for x in _deps)}
# since we save this data in src/transformers/dependency_versions_table.py it can be easily accessed from
# anywhere. If you need to quickly access the data from this table in a shell, you can do so easily with:
#
# python -c 'import sys; from transformers.dependency_versions_table import deps; \
# print(" ".join([ deps[x] for x in sys.argv[1:]]))' tokenizers datasets
#
# Just pass the desired package names to that script as it's shown with 2 packages above.
#
# If transformers is not yet installed and the work is done from the cloned repo remember to add `PYTHONPATH=src` to the script above
#
# You can then feed this for example to `pip`:
#
# pip install -U $(python -c 'import sys; from transformers.dependency_versions_table import deps; \
# print(" ".join([ deps[x] for x in sys.argv[1:]]))' tokenizers datasets)
#
def deps_list(*pkgs):
return [deps[pkg] for pkg in pkgs]
class DepsTableUpdateCommand(Command):
"""
A custom distutils command that updates the dependency table.
usage: python setup.py deps_table_update
"""
description = "build runtime dependency table"
user_options = [
# format: (long option, short option, description).
("dep-table-update", None, "updates src/transformers/dependency_versions_table.py"),
]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
entries = "\n".join([f' "{k}": "{v}",' for k, v in deps.items()])
content = [
"# THIS FILE HAS BEEN AUTOGENERATED. To update:",
"# 1. modify the `_deps` dict in setup.py",
"# 2. run `make deps_table_update``",
"deps = {",
entries,
"}",
"",
]
target = "src/transformers/dependency_versions_table.py"
print(f"updating {target}")
with open(target, "w", encoding="utf-8", newline="\n") as f:
f.write("\n".join(content))
extras = {}
extras["ja"] = deps_list("fugashi", "ipadic", "unidic_lite", "unidic")
extras["sklearn"] = deps_list("scikit-learn")
extras["tf"] = deps_list("tensorflow", "onnxconverter-common", "keras2onnx")
extras["tf-cpu"] = deps_list("tensorflow-cpu", "onnxconverter-common", "keras2onnx")
extras["torch"] = deps_list("torch")
if os.name == "nt": # windows
extras["retrieval"] = deps_list("datasets") # faiss is not supported on windows
extras["flax"] = [] # jax is not supported on windows
else:
extras["retrieval"] = deps_list("faiss-cpu", "datasets")
extras["flax"] = deps_list("jax", "jaxlib", "flax")
extras["tokenizers"] = deps_list("tokenizers")
extras["onnxruntime"] = deps_list("onnxruntime", "onnxruntime-tools")
extras["onnx"] = deps_list("onnxconverter-common", "keras2onnx") + extras["onnxruntime"]
extras["modelcreation"] = deps_list("cookiecutter")
extras["serving"] = deps_list("pydantic", "uvicorn", "fastapi", "starlette")
extras["speech"] = deps_list("soundfile", "torchaudio")
extras["sentencepiece"] = deps_list("sentencepiece", "protobuf")
extras["testing"] = (
deps_list("pytest", "pytest-xdist", "timeout-decorator", "parameterized", "psutil", "datasets", "pytest-sugar")
+ extras["retrieval"]
+ extras["modelcreation"]
)
extras["docs"] = deps_list("recommonmark", "sphinx", "sphinx-markdown-tables", "sphinx-rtd-theme", "sphinx-copybutton")
extras["quality"] = deps_list("black", "isort", "flake8")
extras["all"] = extras["tf"] + extras["torch"] + extras["flax"] + extras["sentencepiece"] + extras["tokenizers"]
extras["dev"] = (
extras["all"]
+ extras["testing"]
+ extras["quality"]
+ extras["ja"]
+ extras["docs"]
+ extras["sklearn"]
+ extras["modelcreation"]
)
extras["torchhub"] = deps_list(
"filelock",
"importlib_metadata",
"numpy",
"packaging",
"protobuf",
"regex",
"requests",
"sacremoses",
"sentencepiece",
"torch",
"tokenizers",
"tqdm",
)
# when modifying the following list, make sure to update src/transformers/dependency_versions_check.py
install_requires = [
deps["dataclasses"] + ";python_version<'3.7'", # dataclasses for Python versions that don't have it
deps["importlib_metadata"] + ";python_version<'3.8'", # importlib_metadata for Python versions that don't have it
deps["filelock"], # filesystem locks, e.g., to prevent parallel downloads
deps["numpy"],
deps["packaging"], # utilities from PyPA to e.g., compare versions
deps["regex"], # for OpenAI GPT
deps["requests"], # for downloading models over HTTPS
deps["sacremoses"], # for XLM
deps["tokenizers"],
deps["tqdm"], # progress bars in model download and training scripts
]
setup(
name="transformers",
version="4.4.2", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
author="Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Sam Shleifer, Patrick von Platen, Sylvain Gugger, Google AI Language Team Authors, Open AI team Authors, Facebook AI Authors, Carnegie Mellon University Authors",
author_email="[email protected]",
description="State-of-the-art Natural Language Processing for TensorFlow 2.0 and PyTorch",
long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
keywords="NLP deep learning transformer pytorch tensorflow BERT GPT GPT-2 google openai CMU",
license="Apache",
url="https://github.com/huggingface/transformers",
package_dir={"": "src"},
packages=find_packages("src"),
extras_require=extras,
entry_points={"console_scripts": ["transformers-cli=transformers.commands.transformers_cli:main"]},
python_requires=">=3.6.0",
install_requires=install_requires,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
cmdclass={"deps_table_update": DepsTableUpdateCommand},
)
|
AdaMix/setup.py/0
|
{
"file_path": "AdaMix/setup.py",
"repo_id": "AdaMix",
"token_count": 4252
}
| 46 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from ..pipelines import SUPPORTED_TASKS, Pipeline, PipelineDataFormat, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
def try_infer_format_from_ext(path: str):
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(ext):
return ext
raise Exception(
"Unable to determine file format from file extension {}. "
"Please provide the format through --format {}".format(path, PipelineDataFormat.SUPPORTED_FORMATS)
)
def run_command_factory(args):
nlp = pipeline(
task=args.task,
model=args.model if args.model else None,
config=args.config,
tokenizer=args.tokenizer,
device=args.device,
)
format = try_infer_format_from_ext(args.input) if args.format == "infer" else args.format
reader = PipelineDataFormat.from_str(
format=format,
output_path=args.output,
input_path=args.input,
column=args.column if args.column else nlp.default_input_names,
overwrite=args.overwrite,
)
return RunCommand(nlp, reader)
class RunCommand(BaseTransformersCLICommand):
def __init__(self, nlp: Pipeline, reader: PipelineDataFormat):
self._nlp = nlp
self._reader = reader
@staticmethod
def register_subcommand(parser: ArgumentParser):
run_parser = parser.add_parser("run", help="Run a pipeline through the CLI")
run_parser.add_argument("--task", choices=SUPPORTED_TASKS.keys(), help="Task to run")
run_parser.add_argument("--input", type=str, help="Path to the file to use for inference")
run_parser.add_argument("--output", type=str, help="Path to the file that will be used post to write results.")
run_parser.add_argument("--model", type=str, help="Name or path to the model to instantiate.")
run_parser.add_argument("--config", type=str, help="Name or path to the model's config to instantiate.")
run_parser.add_argument(
"--tokenizer", type=str, help="Name of the tokenizer to use. (default: same as the model name)"
)
run_parser.add_argument(
"--column",
type=str,
help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)",
)
run_parser.add_argument(
"--format",
type=str,
default="infer",
choices=PipelineDataFormat.SUPPORTED_FORMATS,
help="Input format to read from",
)
run_parser.add_argument(
"--device",
type=int,
default=-1,
help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)",
)
run_parser.add_argument("--overwrite", action="store_true", help="Allow overwriting the output file.")
run_parser.set_defaults(func=run_command_factory)
def run(self):
nlp, outputs = self._nlp, []
for entry in self._reader:
output = nlp(**entry) if self._reader.is_multi_columns else nlp(entry)
if isinstance(output, dict):
outputs.append(output)
else:
outputs += output
# Saving data
if self._nlp.binary_output:
binary_path = self._reader.save_binary(outputs)
logger.warning("Current pipeline requires output to be in binary format, saving at {}".format(binary_path))
else:
self._reader.save(outputs)
|
AdaMix/src/transformers/commands/run.py/0
|
{
"file_path": "AdaMix/src/transformers/commands/run.py",
"repo_id": "AdaMix",
"token_count": 1668
}
| 47 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from torch.utils.data.dataset import Dataset
from filelock import FileLock
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
logger = logging.get_logger(__name__)
@dataclass
class GlueDataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command
line.
"""
task_name: str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys())})
data_dir: str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."}
)
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
def __post_init__(self):
self.task_name = self.task_name.lower()
class Split(Enum):
train = "train"
dev = "dev"
test = "test"
class GlueDataset(Dataset):
"""
This will be superseded by a framework-agnostic approach soon.
"""
args: GlueDataTrainingArguments
output_mode: str
features: List[InputFeatures]
def __init__(
self,
args: GlueDataTrainingArguments,
tokenizer: PreTrainedTokenizerBase,
limit_length: Optional[int] = None,
mode: Union[str, Split] = Split.train,
cache_dir: Optional[str] = None,
):
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/master/examples/text-classification/run_glue.py",
FutureWarning,
)
self.args = args
self.processor = glue_processors[args.task_name]()
self.output_mode = glue_output_modes[args.task_name]
if isinstance(mode, str):
try:
mode = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name")
# Load data features from cache or dataset file
cached_features_file = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,
"cached_{}_{}_{}_{}".format(
mode.value,
tokenizer.__class__.__name__,
str(args.max_seq_length),
args.task_name,
),
)
label_list = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
self.label_list = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lock_path = cached_features_file + ".lock"
with FileLock(lock_path):
if os.path.exists(cached_features_file) and not args.overwrite_cache:
start = time.time()
self.features = torch.load(cached_features_file)
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
)
else:
logger.info(f"Creating features from dataset file at {args.data_dir}")
if mode == Split.dev:
examples = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
examples = self.processor.get_test_examples(args.data_dir)
else:
examples = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
examples = examples[:limit_length]
self.features = glue_convert_examples_to_features(
examples,
tokenizer,
max_length=args.max_seq_length,
label_list=label_list,
output_mode=self.output_mode,
)
start = time.time()
torch.save(self.features, cached_features_file)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
"Saving features into cached file %s [took %.3f s]", cached_features_file, time.time() - start
)
def __len__(self):
return len(self.features)
def __getitem__(self, i) -> InputFeatures:
return self.features[i]
def get_labels(self):
return self.label_list
|
AdaMix/src/transformers/data/datasets/glue.py/0
|
{
"file_path": "AdaMix/src/transformers/data/datasets/glue.py",
"repo_id": "AdaMix",
"token_count": 2636
}
| 48 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import unittest
import timeout_decorator
from ..file_utils import cached_property, is_torch_available
from ..testing_utils import require_torch
if is_torch_available():
import torch
from ..models.marian import MarianConfig, MarianMTModel
@require_torch
class GenerationUtilsTest(unittest.TestCase):
@cached_property
def config(self):
config = MarianConfig.from_pretrained("sshleifer/tiny-marian-en-de")
return config
@cached_property
def model(self):
return MarianMTModel(self.config)
def test_postprocess_next_token_scores(self):
config = self.config
model = self.model
# Initialize an input id tensor with batch size 8 and sequence length 12
input_ids = torch.arange(0, 96, 1).view((8, 12))
eos = config.eos_token_id
bad_words_ids_test_cases = [[[299]], [[23, 24], [54]], [[config.eos_token_id]], []]
masked_scores = [
[(0, 299), (1, 299), (2, 299), (3, 299), (4, 299), (5, 299), (6, 299), (7, 299)],
[(1, 24), (0, 54), (1, 54), (2, 54), (3, 54), (4, 54), (5, 54), (6, 54), (7, 54)],
[(0, eos), (1, eos), (2, eos), (3, eos), (4, eos), (5, eos), (6, eos), (7, eos)],
[],
]
for test_case_index, bad_words_ids in enumerate(bad_words_ids_test_cases):
# Initialize a scores tensor with batch size 8 and vocabulary size 300
scores = torch.rand((8, 300))
output = model.postprocess_next_token_scores(
scores,
input_ids,
0,
bad_words_ids,
13,
15,
config.max_length,
config.eos_token_id,
config.repetition_penalty,
32,
5,
)
for masked_score in masked_scores[test_case_index]:
self.assertTrue(output[masked_score[0], masked_score[1]] == -float("inf"))
@timeout_decorator.timeout(10)
def test_postprocess_next_token_scores_large_bad_words_list(self):
config = self.config
model = self.model
# Initialize an input id tensor with batch size 8 and sequence length 12
input_ids = torch.arange(0, 96, 1).view((8, 12))
bad_words_ids = []
for _ in range(100):
length_bad_word = random.randint(1, 4)
bad_words_ids.append(random.sample(range(1, 300), length_bad_word))
scores = torch.rand((8, 300))
_ = model.postprocess_next_token_scores(
scores,
input_ids,
0,
bad_words_ids,
13,
15,
config.max_length,
config.eos_token_id,
config.repetition_penalty,
32,
5,
)
|
AdaMix/src/transformers/data/test_generation_utils.py/0
|
{
"file_path": "AdaMix/src/transformers/data/test_generation_utils.py",
"repo_id": "AdaMix",
"token_count": 1576
}
| 49 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from .file_utils import ModelOutput
@dataclass
class BaseModelOutput(ModelOutput):
"""
Base class for model's outputs, with potential hidden states and attentions.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
last_hidden_state: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class BaseModelOutputWithPooling(ModelOutput):
"""
Base class for model's outputs that also contains a pooling of the last hidden states.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token) further processed by a
Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence
prediction (classification) objective during pretraining.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
last_hidden_state: torch.FloatTensor = None
pooler_output: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class BaseModelOutputWithPast(ModelOutput):
"""
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape :obj:`(batch_size,
1, hidden_size)` is output.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors
of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
``config.is_encoder_decoder=True`` 2 additional tensors of shape :obj:`(batch_size, num_heads,
encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
``config.is_encoder_decoder=True`` in the cross-attention blocks) that can be used (see
:obj:`past_key_values` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
last_hidden_state: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class BaseModelOutputWithCrossAttentions(ModelOutput):
"""
Base class for model's outputs, with potential hidden states and attentions.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` and ``config.add_cross_attention=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
"""
last_hidden_state: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class BaseModelOutputWithPoolingAndCrossAttentions(ModelOutput):
"""
Base class for model's outputs that also contains a pooling of the last hidden states.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token) further processed by a
Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence
prediction (classification) objective during pretraining.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` and ``config.add_cross_attention=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors
of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
``config.is_encoder_decoder=True`` 2 additional tensors of shape :obj:`(batch_size, num_heads,
encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
``config.is_encoder_decoder=True`` in the cross-attention blocks) that can be used (see
:obj:`past_key_values` input) to speed up sequential decoding.
"""
last_hidden_state: torch.FloatTensor = None
pooler_output: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class BaseModelOutputWithPastAndCrossAttentions(ModelOutput):
"""
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape :obj:`(batch_size,
1, hidden_size)` is output.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors
of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
``config.is_encoder_decoder=True`` 2 additional tensors of shape :obj:`(batch_size, num_heads,
encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
``config.is_encoder_decoder=True`` in the cross-attention blocks) that can be used (see
:obj:`past_key_values` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` and ``config.add_cross_attention=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
"""
last_hidden_state: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class Seq2SeqModelOutput(ModelOutput):
"""
Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential
decoding.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape :obj:`(batch_size,
1, hidden_size)` is output.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors
of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
decoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
"""
last_hidden_state: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class CausalLMOutput(ModelOutput):
"""
Base class for causal language model (or autoregressive) outputs.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Language modeling loss (for next-token prediction).
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class CausalLMOutputWithPast(ModelOutput):
"""
Base class for causal language model (or autoregressive) outputs.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Language modeling loss (for next-token prediction).
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (:obj:`tuple(tupel(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors
of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`)
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
:obj:`past_key_values` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class CausalLMOutputWithCrossAttentions(ModelOutput):
"""
Base class for causal language model (or autoregressive) outputs.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Language modeling loss (for next-token prediction).
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Cross attentions weights after the attention softmax, used to compute the weighted average in the
cross-attention heads.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`torch.FloatTensor` tuples of length :obj:`config.n_layers`, with each tuple containing the
cached key, value states of the self-attention and the cross-attention layers if model is used in
encoder-decoder setting. Only relevant if ``config.is_decoder = True``.
Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
:obj:`past_key_values` input) to speed up sequential decoding.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class SequenceClassifierOutputWithPast(ModelOutput):
"""
Base class for outputs of sentence classification models.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
past_key_values (:obj:`tuple(tupel(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors
of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`)
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
:obj:`past_key_values` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class MaskedLMOutput(ModelOutput):
"""
Base class for masked language models outputs.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Masked language modeling (MLM) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class Seq2SeqLMOutput(ModelOutput):
"""
Base class for sequence-to-sequence language models outputs.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Language modeling loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors
of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
decoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class NextSentencePredictorOutput(ModelOutput):
"""
Base class for outputs of models predicting if two sentences are consecutive or not.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`next_sentence_label` is provided):
Next sequence prediction (classification) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class SequenceClassifierOutput(ModelOutput):
"""
Base class for outputs of sentence classification models.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class Seq2SeqSequenceClassifierOutput(ModelOutput):
"""
Base class for outputs of sequence-to-sequence sentence classification models.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors
of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
decoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class MultipleChoiceModelOutput(ModelOutput):
"""
Base class for outputs of multiple choice models.
Args:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
`num_choices` is the second dimension of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class TokenClassifierOutput(ModelOutput):
"""
Base class for outputs of token classification models.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) :
Classification loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`):
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class QuestionAnsweringModelOutput(ModelOutput):
"""
Base class for outputs of question answering models.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`):
Span-start scores (before SoftMax).
end_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`):
Span-end scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
start_logits: torch.FloatTensor = None
end_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class Seq2SeqQuestionAnsweringModelOutput(ModelOutput):
"""
Base class for outputs of sequence-to-sequence question answering models.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`):
Span-start scores (before SoftMax).
end_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`):
Span-end scores (before SoftMax).
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors
of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
decoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
"""
loss: Optional[torch.FloatTensor] = None
start_logits: torch.FloatTensor = None
end_logits: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
|
AdaMix/src/transformers/modeling_outputs.py/0
|
{
"file_path": "AdaMix/src/transformers/modeling_outputs.py",
"repo_id": "AdaMix",
"token_count": 19277
}
| 50 |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" ALBERT model configuration """
from ...configuration_utils import PretrainedConfig
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class AlbertConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a :class:`~transformers.AlbertModel` or a
:class:`~transformers.TFAlbertModel`. It is used to instantiate an ALBERT model according to the specified
arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar
configuration to that of the ALBERT `xxlarge <https://huggingface.co/albert-xxlarge-v2>`__ architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model
outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information.
Args:
vocab_size (:obj:`int`, `optional`, defaults to 30000):
Vocabulary size of the ALBERT model. Defines the number of different tokens that can be represented by the
:obj:`inputs_ids` passed when calling :class:`~transformers.AlbertModel` or
:class:`~transformers.TFAlbertModel`.
embedding_size (:obj:`int`, `optional`, defaults to 128):
Dimensionality of vocabulary embeddings.
hidden_size (:obj:`int`, `optional`, defaults to 4096):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (:obj:`int`, `optional`, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_hidden_groups (:obj:`int`, `optional`, defaults to 1):
Number of groups for the hidden layers, parameters in the same group are shared.
num_attention_heads (:obj:`int`, `optional`, defaults to 64):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (:obj:`int`, `optional`, defaults to 16384):
The dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
inner_group_num (:obj:`int`, `optional`, defaults to 1):
The number of inner repetition of attention and ffn.
hidden_act (:obj:`str` or :obj:`Callable`, `optional`, defaults to :obj:`"gelu_new"`):
The non-linear activation function (function or string) in the encoder and pooler. If string,
:obj:`"gelu"`, :obj:`"relu"`, :obj:`"silu"` and :obj:`"gelu_new"` are supported.
hidden_dropout_prob (:obj:`float`, `optional`, defaults to 0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (:obj:`float`, `optional`, defaults to 0):
The dropout ratio for the attention probabilities.
max_position_embeddings (:obj:`int`, `optional`, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
(e.g., 512 or 1024 or 2048).
type_vocab_size (:obj:`int`, `optional`, defaults to 2):
The vocabulary size of the :obj:`token_type_ids` passed when calling :class:`~transformers.AlbertModel` or
:class:`~transformers.TFAlbertModel`.
initializer_range (:obj:`float`, `optional`, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-12):
The epsilon used by the layer normalization layers.
classifier_dropout_prob (:obj:`float`, `optional`, defaults to 0.1):
The dropout ratio for attached classifiers.
position_embedding_type (:obj:`str`, `optional`, defaults to :obj:`"absolute"`):
Type of position embedding. Choose one of :obj:`"absolute"`, :obj:`"relative_key"`,
:obj:`"relative_key_query"`. For positional embeddings use :obj:`"absolute"`. For more information on
:obj:`"relative_key"`, please refer to `Self-Attention with Relative Position Representations (Shaw et al.)
<https://arxiv.org/abs/1803.02155>`__. For more information on :obj:`"relative_key_query"`, please refer to
`Method 4` in `Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)
<https://arxiv.org/abs/2009.13658>`__.
Examples::
>>> from transformers import AlbertConfig, AlbertModel
>>> # Initializing an ALBERT-xxlarge style configuration
>>> albert_xxlarge_configuration = AlbertConfig()
>>> # Initializing an ALBERT-base style configuration
>>> albert_base_configuration = AlbertConfig(
... hidden_size=768,
... num_attention_heads=12,
... intermediate_size=3072,
... )
>>> # Initializing a model from the ALBERT-base style configuration
>>> model = AlbertModel(albert_xxlarge_configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
"""
model_type = "albert"
def __init__(
self,
vocab_size=30000,
embedding_size=128,
hidden_size=4096,
num_hidden_layers=12,
num_hidden_groups=1,
num_attention_heads=64,
intermediate_size=16384,
inner_group_num=1,
hidden_act="gelu_new",
hidden_dropout_prob=0,
attention_probs_dropout_prob=0,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
classifier_dropout_prob=0.1,
position_embedding_type="absolute",
pad_token_id=0,
bos_token_id=2,
eos_token_id=3,
**kwargs
):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_hidden_groups = num_hidden_groups
self.num_attention_heads = num_attention_heads
self.inner_group_num = inner_group_num
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.classifier_dropout_prob = classifier_dropout_prob
self.position_embedding_type = position_embedding_type
|
AdaMix/src/transformers/models/albert/configuration_albert.py/0
|
{
"file_path": "AdaMix/src/transformers/models/albert/configuration_albert.py",
"repo_id": "AdaMix",
"token_count": 3191
}
| 51 |
# flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...file_utils import _BaseLazyModule, is_tf_available, is_tokenizers_available, is_torch_available
_import_structure = {
"configuration_bart": ["BART_PRETRAINED_CONFIG_ARCHIVE_MAP", "BartConfig"],
"tokenization_bart": ["BartTokenizer"],
}
if is_tokenizers_available():
_import_structure["tokenization_bart_fast"] = ["BartTokenizerFast"]
if is_torch_available():
_import_structure["modeling_bart"] = [
"BART_PRETRAINED_MODEL_ARCHIVE_LIST",
"BartForCausalLM",
"BartForConditionalGeneration",
"BartForQuestionAnswering",
"BartForSequenceClassification",
"BartModel",
"BartPretrainedModel",
"PretrainedBartModel",
]
if is_tf_available():
_import_structure["modeling_tf_bart"] = ["TFBartForConditionalGeneration", "TFBartModel", "TFBartPretrainedModel"]
if TYPE_CHECKING:
from .configuration_bart import BART_PRETRAINED_CONFIG_ARCHIVE_MAP, BartConfig
from .tokenization_bart import BartTokenizer
if is_tokenizers_available():
from .tokenization_bart_fast import BartTokenizerFast
if is_torch_available():
from .modeling_bart import (
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BartForCausalLM,
BartForConditionalGeneration,
BartForQuestionAnswering,
BartForSequenceClassification,
BartModel,
BartPretrainedModel,
PretrainedBartModel,
)
if is_tf_available():
from .modeling_tf_bart import TFBartForConditionalGeneration, TFBartModel, TFBartPretrainedModel
else:
import importlib
import os
import sys
class _LazyModule(_BaseLazyModule):
"""
Module class that surfaces all objects but only performs associated imports when the objects are requested.
"""
__file__ = globals()["__file__"]
__path__ = [os.path.dirname(__file__)]
def _get_module(self, module_name: str):
return importlib.import_module("." + module_name, self.__name__)
sys.modules[__name__] = _LazyModule(__name__, _import_structure)
|
AdaMix/src/transformers/models/bart/__init__.py/0
|
{
"file_path": "AdaMix/src/transformers/models/bart/__init__.py",
"repo_id": "AdaMix",
"token_count": 1095
}
| 52 |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Tokenization classes for Bert."""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking": "https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt",
"bert-large-cased-whole-word-masking": "https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking-finetuned-squad": "https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt",
"bert-large-cased-whole-word-masking-finetuned-squad": "https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt",
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt",
"TurkuNLP/bert-base-finnish-cased-v1": "https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt",
"TurkuNLP/bert-base-finnish-uncased-v1": "https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt",
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt",
},
"tokenizer_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json",
"bert-large-uncased-whole-word-masking": "https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json",
"bert-large-cased-whole-word-masking": "https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json",
"bert-large-uncased-whole-word-masking-finetuned-squad": "https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json",
"bert-large-cased-whole-word-masking-finetuned-squad": "https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json",
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json",
"TurkuNLP/bert-base-finnish-cased-v1": "https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json",
"TurkuNLP/bert-base-finnish-uncased-v1": "https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json",
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"bert-base-uncased": 512,
"bert-large-uncased": 512,
"bert-base-cased": 512,
"bert-large-cased": 512,
"bert-base-multilingual-uncased": 512,
"bert-base-multilingual-cased": 512,
"bert-base-chinese": 512,
"bert-base-german-cased": 512,
"bert-large-uncased-whole-word-masking": 512,
"bert-large-cased-whole-word-masking": 512,
"bert-large-uncased-whole-word-masking-finetuned-squad": 512,
"bert-large-cased-whole-word-masking-finetuned-squad": 512,
"bert-base-cased-finetuned-mrpc": 512,
"bert-base-german-dbmdz-cased": 512,
"bert-base-german-dbmdz-uncased": 512,
"TurkuNLP/bert-base-finnish-cased-v1": 512,
"TurkuNLP/bert-base-finnish-uncased-v1": 512,
"wietsedv/bert-base-dutch-cased": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
}
class BertTokenizerFast(PreTrainedTokenizerFast):
r"""
Construct a "fast" BERT tokenizer (backed by HuggingFace's `tokenizers` library). Based on WordPiece.
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizerFast` which contains most of the main
methods. Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (:obj:`str`):
File containing the vocabulary.
do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to lowercase the input when tokenizing.
unk_token (:obj:`str`, `optional`, defaults to :obj:`"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (:obj:`str`, `optional`, defaults to :obj:`"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (:obj:`str`, `optional`, defaults to :obj:`"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (:obj:`str`, `optional`, defaults to :obj:`"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (:obj:`str`, `optional`, defaults to :obj:`"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
clean_text (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to clean the text before tokenization by removing any control characters and replacing all
whitespaces by the classic one.
tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see `this
issue <https://github.com/huggingface/transformers/issues/328>`__).
strip_accents: (:obj:`bool`, `optional`):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for :obj:`lowercase` (as in the original BERT).
wordpieces_prefix: (:obj:`str`, `optional`, defaults to :obj:`"##"`):
The prefix for subwords.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
slow_tokenizer_class = BertTokenizer
def __init__(
self,
vocab_file,
tokenizer_file=None,
do_lower_case=True,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
tokenize_chinese_chars=True,
strip_accents=None,
**kwargs
):
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
do_lower_case=do_lower_case,
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
tokenize_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
**kwargs,
)
pre_tok_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
pre_tok_state.get("do_lower_case", do_lower_case) != do_lower_case
or pre_tok_state.get("strip_accents", strip_accents) != strip_accents
):
pre_tok_class = getattr(normalizers, pre_tok_state.pop("type"))
pre_tok_state["do_lower_case"] = do_lower_case
pre_tok_state["strip_accents"] = strip_accents
self.backend_tokenizer.normalizer = pre_tok_class(**pre_tok_state)
self.do_lower_case = do_lower_case
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BERT sequence has the following format:
- single sequence: ``[CLS] X [SEP]``
- pair of sequences: ``[CLS] A [SEP] B [SEP]``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
"""
output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
if token_ids_1:
output += token_ids_1 + [self.sep_token_id]
return output
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
pair mask has the following format:
::
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given
sequence(s).
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files)
|
AdaMix/src/transformers/models/bert/tokenization_bert_fast.py/0
|
{
"file_path": "AdaMix/src/transformers/models/bert/tokenization_bert_fast.py",
"repo_id": "AdaMix",
"token_count": 6104
}
| 53 |
# coding=utf-8
# Copyright (c) 2020, VinAI Research and the HuggingFace Inc. team.
# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization classes for BERTweet """
import html
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
import regex
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"vinai/bertweet-base": "https://huggingface.co/vinai/bertweet-base/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/bertweet-base": "https://huggingface.co/vinai/bertweet-base/resolve/main/bpe.codes",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"vinai/bertweet-base": 128,
}
def get_pairs(word):
"""
Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
pairs = set(pairs)
return pairs
class BertweetTokenizer(PreTrainedTokenizer):
"""
Constructs a BERTweet tokenizer, using Byte-Pair-Encoding.
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods.
Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (:obj:`str`):
Path to the vocabulary file.
merges_file (:obj:`str`):
Path to the merges file.
normalization (:obj:`bool`, `optional`, defaults to :obj:`False`)
Whether or not to apply a normalization preprocess.
bos_token (:obj:`str`, `optional`, defaults to :obj:`"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
.. note::
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the :obj:`cls_token`.
eos_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The end of sequence token.
.. note::
When building a sequence using special tokens, this is not the token that is used for the end of
sequence. The token used is the :obj:`sep_token`.
sep_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (:obj:`str`, `optional`, defaults to :obj:`"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (:obj:`str`, `optional`, defaults to :obj:`"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(
self,
vocab_file,
merges_file,
normalization=False,
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
**kwargs
):
super().__init__(
normalization=normalization,
bos_token=bos_token,
eos_token=eos_token,
sep_token=sep_token,
cls_token=cls_token,
unk_token=unk_token,
pad_token=pad_token,
mask_token=mask_token,
**kwargs,
)
try:
from emoji import demojize
self.demojizer = demojize
except ImportError:
logger.warning(
"emoji is not installed, thus not converting emoticons or emojis into text. Please install emoji: pip3 install emoji"
)
self.demojizer = None
self.vocab_file = vocab_file
self.merges_file = merges_file
self.encoder = {}
self.encoder[self.bos_token] = 0
self.encoder[self.pad_token] = 1
self.encoder[self.eos_token] = 2
self.encoder[self.unk_token] = 3
self.add_from_file(vocab_file)
self.decoder = {v: k for k, v in self.encoder.items()}
with open(merges_file, encoding="utf-8") as merges_handle:
merges = merges_handle.read().split("\n")[:-1]
merges = [tuple(merge.split()[:-1]) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
self.normalization = normalization
self.tweetPreprocessor = TweetTokenizer()
self.special_puncts = {"’": "'", "…": "..."}
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BERTweet sequence has the following format:
- single sequence: ``<s> X </s>``
- pair of sequences: ``<s> A </s></s> B </s>``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` method.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
:obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model."
)
return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. BERTweet does
not make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
@property
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
word = tuple(list(word[:-1]) + [word[-1] + "</w>"])
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = "@@ ".join(word)
word = word[:-4]
self.cache[token] = word
return word
def _tokenize(self, text):
"""Tokenize a string."""
if self.normalization: # Perform Tweet normalization before performing BPE
text = self.normalizeTweet(text)
split_tokens = []
words = re.findall(r"\S+\n?", text)
for token in words:
split_tokens.extend([t for t in self.bpe(token).split(" ")])
return split_tokens
def normalizeTweet(self, tweet):
"""
Normalize a raw Tweet
"""
for punct in self.special_puncts:
tweet = tweet.replace(punct, self.special_puncts[punct])
tokens = self.tweetPreprocessor.tokenize(tweet)
normTweet = " ".join([self.normalizeToken(token) for token in tokens])
normTweet = (
normTweet.replace("cannot ", "can not ")
.replace("n't ", " n't ")
.replace("n 't ", " n't ")
.replace("ca n't", "can't")
.replace("ai n't", "ain't")
)
normTweet = (
normTweet.replace("'m ", " 'm ")
.replace("'re ", " 're ")
.replace("'s ", " 's ")
.replace("'ll ", " 'll ")
.replace("'d ", " 'd ")
.replace("'ve ", " 've ")
)
normTweet = (
normTweet.replace(" p . m .", " p.m.")
.replace(" p . m ", " p.m ")
.replace(" a . m .", " a.m.")
.replace(" a . m ", " a.m ")
)
return " ".join(normTweet.split())
def normalizeToken(self, token):
"""
Normalize tokens in a Tweet
"""
lowercased_token = token.lower()
if token.startswith("@"):
return "@USER"
elif lowercased_token.startswith("http") or lowercased_token.startswith("www"):
return "HTTPURL"
elif len(token) == 1:
if token in self.special_puncts:
return self.special_puncts[token]
if self.demojizer is not None:
return self.demojizer(token)
else:
return token
else:
return token
def _convert_token_to_id(self, token):
""" Converts a token (str) in an id using the vocab. """
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
""" Converts a sequence of tokens (string) in a single string. """
out_string = " ".join(tokens).replace("@@ ", "").strip()
return out_string
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
out_merge_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
if os.path.abspath(self.merges_file) != os.path.abspath(out_merge_file):
copyfile(self.merges_file, out_merge_file)
return out_vocab_file, out_merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
def add_from_file(self, f):
"""
Loads a pre-existing dictionary from a text file and adds its symbols to this instance.
"""
if isinstance(f, str):
try:
with open(f, "r", encoding="utf-8") as fd:
self.add_from_file(fd)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please " "rebuild the dataset".format(f))
return
lines = f.readlines()
for lineTmp in lines:
line = lineTmp.strip()
idx = line.rfind(" ")
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'")
word = line[:idx]
self.encoder[word] = len(self.encoder)
# Natural Language Toolkit: Twitter Tokenizer
#
# Copyright (C) 2001-2020 NLTK Project
# Author: Christopher Potts <[email protected]>
# Ewan Klein <[email protected]> (modifications)
# Pierpaolo Pantone <> (modifications)
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
#
"""
Twitter-aware tokenizer, designed to be flexible and easy to adapt to new domains and tasks. The basic logic is this:
1. The tuple regex_strings defines a list of regular expression strings.
2. The regex_strings strings are put, in order, into a compiled regular expression object called word_re.
3. The tokenization is done by word_re.findall(s), where s is the user-supplied string, inside the tokenize() method of
the class Tokenizer.
4. When instantiating Tokenizer objects, there is a single option: preserve_case. By default, it is set to True. If it
is set to False, then the tokenizer will downcase everything except for emoticons.
"""
######################################################################
#
# import regex # https://github.com/nltk/nltk/issues/2409
# import html
#
######################################################################
# The following strings are components in the regular expression
# that is used for tokenizing. It's important that phone_number
# appears first in the final regex (since it can contain whitespace).
# It also could matter that tags comes after emoticons, due to the
# possibility of having text like
#
# <:| and some text >:)
#
# Most importantly, the final element should always be last, since it
# does a last ditch whitespace-based tokenization of whatever is left.
# ToDo: Update with http://en.wikipedia.org/wiki/List_of_emoticons ?
# This particular element is used in a couple ways, so we define it
# with a name:
# docstyle-ignore
EMOTICONS = r"""
(?:
[<>]?
[:;=8] # eyes
[\-o\*\']? # optional nose
[\)\]\(\[dDpP/\:\}\{@\|\\] # mouth
|
[\)\]\(\[dDpP/\:\}\{@\|\\] # mouth
[\-o\*\']? # optional nose
[:;=8] # eyes
[<>]?
|
<3 # heart
)"""
# URL pattern due to John Gruber, modified by Tom Winzig. See
# https://gist.github.com/winzig/8894715
# docstyle-ignore
URLS = r""" # Capture 1: entire matched URL
(?:
https?: # URL protocol and colon
(?:
/{1,3} # 1-3 slashes
| # or
[a-z0-9%] # Single letter or digit or '%'
# (Trying not to match e.g. "URI::Escape")
)
| # or
# looks like domain name followed by a slash:
[a-z0-9.\-]+[.]
(?:[a-z]{2,13})
/
)
(?: # One or more:
[^\s()<>{}\[\]]+ # Run of non-space, non-()<>{}[]
| # or
\([^\s()]*?\([^\s()]+\)[^\s()]*?\) # balanced parens, one level deep: (...(...)...)
|
\([^\s]+?\) # balanced parens, non-recursive: (...)
)+
(?: # End with:
\([^\s()]*?\([^\s()]+\)[^\s()]*?\) # balanced parens, one level deep: (...(...)...)
|
\([^\s]+?\) # balanced parens, non-recursive: (...)
| # or
[^\s`!()\[\]{};:'".,<>?«»“”‘’] # not a space or one of these punct chars
)
| # OR, the following to match naked domains:
(?:
(?<!@) # not preceded by a @, avoid matching foo@_gmail.com_
[a-z0-9]+
(?:[.\-][a-z0-9]+)*
[.]
(?:[a-z]{2,13})
\b
/?
(?!@) # not succeeded by a @,
# avoid matching "foo.na" in "[email protected]"
)
"""
# docstyle-ignore
# The components of the tokenizer:
REGEXPS = (
URLS,
# Phone numbers:
r"""
(?:
(?: # (international)
\+?[01]
[ *\-.\)]*
)?
(?: # (area code)
[\(]?
\d{3}
[ *\-.\)]*
)?
\d{3} # exchange
[ *\-.\)]*
\d{4} # base
)""",
# ASCII Emoticons
EMOTICONS,
# HTML tags:
r"""<[^>\s]+>""",
# ASCII Arrows
r"""[\-]+>|<[\-]+""",
# Twitter username:
r"""(?:@[\w_]+)""",
# Twitter hashtags:
r"""(?:\#+[\w_]+[\w\'_\-]*[\w_]+)""",
# email addresses
r"""[\w.+-]+@[\w-]+\.(?:[\w-]\.?)+[\w-]""",
# docstyle-ignore
# Remaining word types:
r"""
(?:[^\W\d_](?:[^\W\d_]|['\-_])+[^\W\d_]) # Words with apostrophes or dashes.
|
(?:[+\-]?\d+[,/.:-]\d+[+\-]?) # Numbers, including fractions, decimals.
|
(?:[\w_]+) # Words without apostrophes or dashes.
|
(?:\.(?:\s*\.){1,}) # Ellipsis dots.
|
(?:\S) # Everything else that isn't whitespace.
""",
)
######################################################################
# This is the core tokenizing regex:
WORD_RE = regex.compile(r"""(%s)""" % "|".join(REGEXPS), regex.VERBOSE | regex.I | regex.UNICODE)
# WORD_RE performs poorly on these patterns:
HANG_RE = regex.compile(r"([^a-zA-Z0-9])\1{3,}")
# The emoticon string gets its own regex so that we can preserve case for
# them as needed:
EMOTICON_RE = regex.compile(EMOTICONS, regex.VERBOSE | regex.I | regex.UNICODE)
# These are for regularizing HTML entities to Unicode:
ENT_RE = regex.compile(r"&(#?(x?))([^&;\s]+);")
######################################################################
# Functions for converting html entities
######################################################################
def _str_to_unicode(text, encoding=None, errors="strict"):
if encoding is None:
encoding = "utf-8"
if isinstance(text, bytes):
return text.decode(encoding, errors)
return text
def _replace_html_entities(text, keep=(), remove_illegal=True, encoding="utf-8"):
"""
Remove entities from text by converting them to their corresponding unicode character.
Args:
text:
A unicode string or a byte string encoded in the given `encoding` (which defaults to 'utf-8').
keep (list):
List of entity names which should not be replaced. This supports both numeric entities (``&#nnnn;`` and
``&#hhhh;``) and named entities (such as `` `` or ``>``).
remove_illegal (bool):
If `True`, entities that can't be converted are removed. Otherwise, entities that can't be converted are
kept "as is".
Returns: A unicode string with the entities removed.
See https://github.com/scrapy/w3lib/blob/master/w3lib/html.py
>>> from nltk.tokenize.casual import _replace_html_entities >>> _replace_html_entities(b'Price: £100')
'Price: \\xa3100' >>> print(_replace_html_entities(b'Price: £100')) Price: £100 >>>
"""
def _convert_entity(match):
entity_body = match.group(3)
if match.group(1):
try:
if match.group(2):
number = int(entity_body, 16)
else:
number = int(entity_body, 10)
# Numeric character references in the 80-9F range are typically
# interpreted by browsers as representing the characters mapped
# to bytes 80-9F in the Windows-1252 encoding. For more info
# see: https://en.wikipedia.org/wiki/ISO/IEC_8859-1#Similar_character_sets
if 0x80 <= number <= 0x9F:
return bytes((number,)).decode("cp1252")
except ValueError:
number = None
else:
if entity_body in keep:
return match.group(0)
else:
number = html.entities.name2codepoint.get(entity_body)
if number is not None:
try:
return chr(number)
except (ValueError, OverflowError):
pass
return "" if remove_illegal else match.group(0)
return ENT_RE.sub(_convert_entity, _str_to_unicode(text, encoding))
######################################################################
class TweetTokenizer:
r"""
Examples::
>>> # Tokenizer for tweets.
>>> from nltk.tokenize import TweetTokenizer
>>> tknzr = TweetTokenizer()
>>> s0 = "This is a cooool #dummysmiley: :-) :-P <3 and some arrows < > -> <--"
>>> tknzr.tokenize(s0)
['This', 'is', 'a', 'cooool', '#dummysmiley', ':', ':-)', ':-P', '<3', 'and', 'some', 'arrows', '<', '>', '->', '<--']
>>> # Examples using `strip_handles` and `reduce_len parameters`:
>>> tknzr = TweetTokenizer(strip_handles=True, reduce_len=True)
>>> s1 = '@remy: This is waaaaayyyy too much for you!!!!!!'
>>> tknzr.tokenize(s1)
[':', 'This', 'is', 'waaayyy', 'too', 'much', 'for', 'you', '!', '!', '!']
"""
def __init__(self, preserve_case=True, reduce_len=False, strip_handles=False):
self.preserve_case = preserve_case
self.reduce_len = reduce_len
self.strip_handles = strip_handles
def tokenize(self, text):
"""
Args:
text: str
Returns: list(str) A tokenized list of strings; concatenating this list returns the original string if
`preserve_case=False`
"""
# Fix HTML character entities:
text = _replace_html_entities(text)
# Remove username handles
if self.strip_handles:
text = remove_handles(text)
# Normalize word lengthening
if self.reduce_len:
text = reduce_lengthening(text)
# Shorten problematic sequences of characters
safe_text = HANG_RE.sub(r"\1\1\1", text)
# Tokenize:
words = WORD_RE.findall(safe_text)
# Possibly alter the case, but avoid changing emoticons like :D into :d:
if not self.preserve_case:
words = list(map((lambda x: x if EMOTICON_RE.search(x) else x.lower()), words))
return words
######################################################################
# Normalization Functions
######################################################################
def reduce_lengthening(text):
"""
Replace repeated character sequences of length 3 or greater with sequences of length 3.
"""
pattern = regex.compile(r"(.)\1{2,}")
return pattern.sub(r"\1\1\1", text)
def remove_handles(text):
"""
Remove Twitter username handles from text.
"""
pattern = regex.compile(
r"(?<![A-Za-z0-9_!@#\$%&*])@(([A-Za-z0-9_]){20}(?!@))|(?<![A-Za-z0-9_!@#\$%&*])@(([A-Za-z0-9_]){1,19})(?![A-Za-z0-9_]*@)"
)
# Substitute handles with ' ' to ensure that text on either side of removed handles are tokenized correctly
return pattern.sub(" ", text)
######################################################################
# Tokenization Function
######################################################################
def casual_tokenize(text, preserve_case=True, reduce_len=False, strip_handles=False):
"""
Convenience function for wrapping the tokenizer.
"""
return TweetTokenizer(preserve_case=preserve_case, reduce_len=reduce_len, strip_handles=strip_handles).tokenize(
text
)
###############################################################################
|
AdaMix/src/transformers/models/bertweet/tokenization_bertweet.py/0
|
{
"file_path": "AdaMix/src/transformers/models/bertweet/tokenization_bertweet.py",
"repo_id": "AdaMix",
"token_count": 12344
}
| 54 |
# coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
PyTorch DistilBERT model adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) and in
part from HuggingFace PyTorch version of Google AI Bert model (https://github.com/google-research/bert)
"""
import copy
import math
import numpy as np
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from ...activations import gelu
from ...file_utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutput,
MaskedLMOutput,
MultipleChoiceModelOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from ...utils import logging
from .configuration_distilbert import DistilBertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "distilbert-base-uncased"
_CONFIG_FOR_DOC = "DistilBertConfig"
_TOKENIZER_FOR_DOC = "DistilBertTokenizer"
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"distilbert-base-uncased",
"distilbert-base-uncased-distilled-squad",
"distilbert-base-cased",
"distilbert-base-cased-distilled-squad",
"distilbert-base-german-cased",
"distilbert-base-multilingual-cased",
"distilbert-base-uncased-finetuned-sst-2-english",
# See all DistilBERT models at https://huggingface.co/models?filter=distilbert
]
# UTILS AND BUILDING BLOCKS OF THE ARCHITECTURE #
def create_sinusoidal_embeddings(n_pos, dim, out):
position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])
out.requires_grad = False
out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
out.detach_()
class Embeddings(nn.Module):
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.dim, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.dim)
if config.sinusoidal_pos_embds:
create_sinusoidal_embeddings(
n_pos=config.max_position_embeddings, dim=config.dim, out=self.position_embeddings.weight
)
self.LayerNorm = nn.LayerNorm(config.dim, eps=1e-12)
self.dropout = nn.Dropout(config.dropout)
def forward(self, input_ids):
"""
Parameters:
input_ids: torch.tensor(bs, max_seq_length) The token ids to embed.
Returns: torch.tensor(bs, max_seq_length, dim) The embedded tokens (plus position embeddings, no token_type
embeddings)
"""
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) # (max_seq_length)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids) # (bs, max_seq_length)
word_embeddings = self.word_embeddings(input_ids) # (bs, max_seq_length, dim)
position_embeddings = self.position_embeddings(position_ids) # (bs, max_seq_length, dim)
embeddings = word_embeddings + position_embeddings # (bs, max_seq_length, dim)
embeddings = self.LayerNorm(embeddings) # (bs, max_seq_length, dim)
embeddings = self.dropout(embeddings) # (bs, max_seq_length, dim)
return embeddings
class MultiHeadSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.n_heads = config.n_heads
self.dim = config.dim
self.dropout = nn.Dropout(p=config.attention_dropout)
assert self.dim % self.n_heads == 0
self.q_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
self.k_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
self.v_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
self.out_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
self.pruned_heads = set()
def prune_heads(self, heads):
attention_head_size = self.dim // self.n_heads
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.n_heads, attention_head_size, self.pruned_heads)
# Prune linear layers
self.q_lin = prune_linear_layer(self.q_lin, index)
self.k_lin = prune_linear_layer(self.k_lin, index)
self.v_lin = prune_linear_layer(self.v_lin, index)
self.out_lin = prune_linear_layer(self.out_lin, index, dim=1)
# Update hyper params
self.n_heads = self.n_heads - len(heads)
self.dim = attention_head_size * self.n_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, query, key, value, mask, head_mask=None, output_attentions=False):
"""
Parameters:
query: torch.tensor(bs, seq_length, dim)
key: torch.tensor(bs, seq_length, dim)
value: torch.tensor(bs, seq_length, dim)
mask: torch.tensor(bs, seq_length)
Returns:
weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs,
seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
"""
bs, q_length, dim = query.size()
k_length = key.size(1)
# assert dim == self.dim, 'Dimensions do not match: %s input vs %s configured' % (dim, self.dim)
# assert key.size() == value.size()
dim_per_head = self.dim // self.n_heads
mask_reshp = (bs, 1, 1, k_length)
def shape(x):
""" separate heads """
return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)
def unshape(x):
""" group heads """
return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)
q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head)
k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head)
v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head)
q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head)
scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, q_length, k_length)
mask = (mask == 0).view(mask_reshp).expand_as(scores) # (bs, n_heads, q_length, k_length)
scores.masked_fill_(mask, -float("inf")) # (bs, n_heads, q_length, k_length)
weights = nn.Softmax(dim=-1)(scores) # (bs, n_heads, q_length, k_length)
weights = self.dropout(weights) # (bs, n_heads, q_length, k_length)
# Mask heads if we want to
if head_mask is not None:
weights = weights * head_mask
context = torch.matmul(weights, v) # (bs, n_heads, q_length, dim_per_head)
context = unshape(context) # (bs, q_length, dim)
context = self.out_lin(context) # (bs, q_length, dim)
if output_attentions:
return (context, weights)
else:
return (context,)
class FFN(nn.Module):
def __init__(self, config):
super().__init__()
self.dropout = nn.Dropout(p=config.dropout)
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.lin1 = nn.Linear(in_features=config.dim, out_features=config.hidden_dim)
self.lin2 = nn.Linear(in_features=config.hidden_dim, out_features=config.dim)
assert config.activation in ["relu", "gelu"], "activation ({}) must be in ['relu', 'gelu']".format(
config.activation
)
self.activation = gelu if config.activation == "gelu" else nn.ReLU()
def forward(self, input):
return apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, input)
def ff_chunk(self, input):
x = self.lin1(input)
x = self.activation(x)
x = self.lin2(x)
x = self.dropout(x)
return x
class TransformerBlock(nn.Module):
def __init__(self, config):
super().__init__()
assert config.dim % config.n_heads == 0
self.attention = MultiHeadSelfAttention(config)
self.sa_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
self.ffn = FFN(config)
self.output_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
def forward(self, x, attn_mask=None, head_mask=None, output_attentions=False):
"""
Parameters:
x: torch.tensor(bs, seq_length, dim)
attn_mask: torch.tensor(bs, seq_length)
Returns:
sa_weights: torch.tensor(bs, n_heads, seq_length, seq_length) The attention weights ffn_output:
torch.tensor(bs, seq_length, dim) The output of the transformer block contextualization.
"""
# Self-Attention
sa_output = self.attention(
query=x,
key=x,
value=x,
mask=attn_mask,
head_mask=head_mask,
output_attentions=output_attentions,
)
if output_attentions:
sa_output, sa_weights = sa_output # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length)
else: # To handle these `output_attentions` or `output_hidden_states` cases returning tuples
assert type(sa_output) == tuple
sa_output = sa_output[0]
sa_output = self.sa_layer_norm(sa_output + x) # (bs, seq_length, dim)
# Feed Forward Network
ffn_output = self.ffn(sa_output) # (bs, seq_length, dim)
ffn_output = self.output_layer_norm(ffn_output + sa_output) # (bs, seq_length, dim)
output = (ffn_output,)
if output_attentions:
output = (sa_weights,) + output
return output
class Transformer(nn.Module):
def __init__(self, config):
super().__init__()
self.n_layers = config.n_layers
layer = TransformerBlock(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.n_layers)])
def forward(
self, x, attn_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=None
): # docstyle-ignore
"""
Parameters:
x: torch.tensor(bs, seq_length, dim) Input sequence embedded.
attn_mask: torch.tensor(bs, seq_length) Attention mask on the sequence.
Returns:
hidden_state: torch.tensor(bs, seq_length, dim) Sequence of hidden states in the last (top)
layer all_hidden_states: Tuple[torch.tensor(bs, seq_length, dim)]
Tuple of length n_layers with the hidden states from each layer.
Optional: only if output_hidden_states=True
all_attentions: Tuple[torch.tensor(bs, n_heads, seq_length, seq_length)]
Tuple of length n_layers with the attention weights from each layer
Optional: only if output_attentions=True
"""
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_state = x
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_state,)
layer_outputs = layer_module(
x=hidden_state, attn_mask=attn_mask, head_mask=head_mask[i], output_attentions=output_attentions
)
hidden_state = layer_outputs[-1]
if output_attentions:
assert len(layer_outputs) == 2
attentions = layer_outputs[0]
all_attentions = all_attentions + (attentions,)
else:
assert len(layer_outputs) == 1
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, all_hidden_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_state, hidden_states=all_hidden_states, attentions=all_attentions
)
# INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL #
class DistilBertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = DistilBertConfig
load_tf_weights = None
base_model_prefix = "distilbert"
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
DISTILBERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.DistilBertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
DISTILBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.DistilBertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare DistilBERT encoder/transformer outputting raw hidden-states without any specific head on top.",
DISTILBERT_START_DOCSTRING,
)
class DistilBertModel(DistilBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embeddings = Embeddings(config) # Embeddings
self.transformer = Transformer(config) # Encoder
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, new_embeddings):
self.embeddings.word_embeddings = new_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.transformer.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device) # (bs, seq_length)
# Prepare head mask if needed
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
if inputs_embeds is None:
inputs_embeds = self.embeddings(input_ids) # (bs, seq_length, dim)
return self.transformer(
x=inputs_embeds,
attn_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
@add_start_docstrings(
"""DistilBert Model with a `masked language modeling` head on top. """,
DISTILBERT_START_DOCSTRING,
)
class DistilBertForMaskedLM(DistilBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.distilbert = DistilBertModel(config)
self.vocab_transform = nn.Linear(config.dim, config.dim)
self.vocab_layer_norm = nn.LayerNorm(config.dim, eps=1e-12)
self.vocab_projector = nn.Linear(config.dim, config.vocab_size)
self.init_weights()
self.mlm_loss_fct = nn.CrossEntropyLoss()
def get_output_embeddings(self):
return self.vocab_projector
def set_output_embeddings(self, new_embeddings):
self.vocab_projector = new_embeddings
@add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
dlbrt_output = self.distilbert(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = dlbrt_output[0] # (bs, seq_length, dim)
prediction_logits = self.vocab_transform(hidden_states) # (bs, seq_length, dim)
prediction_logits = gelu(prediction_logits) # (bs, seq_length, dim)
prediction_logits = self.vocab_layer_norm(prediction_logits) # (bs, seq_length, dim)
prediction_logits = self.vocab_projector(prediction_logits) # (bs, seq_length, vocab_size)
mlm_loss = None
if labels is not None:
mlm_loss = self.mlm_loss_fct(prediction_logits.view(-1, prediction_logits.size(-1)), labels.view(-1))
if not return_dict:
output = (prediction_logits,) + dlbrt_output[1:]
return ((mlm_loss,) + output) if mlm_loss is not None else output
return MaskedLMOutput(
loss=mlm_loss,
logits=prediction_logits,
hidden_states=dlbrt_output.hidden_states,
attentions=dlbrt_output.attentions,
)
@add_start_docstrings(
"""
DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
DISTILBERT_START_DOCSTRING,
)
class DistilBertForSequenceClassification(DistilBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.distilbert = DistilBertModel(config)
self.pre_classifier = nn.Linear(config.dim, config.dim)
self.classifier = nn.Linear(config.dim, config.num_labels)
self.dropout = nn.Dropout(config.seq_classif_dropout)
self.init_weights()
@add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
distilbert_output = self.distilbert(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_state = distilbert_output[0] # (bs, seq_len, dim)
pooled_output = hidden_state[:, 0] # (bs, dim)
pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
pooled_output = nn.ReLU()(pooled_output) # (bs, dim)
pooled_output = self.dropout(pooled_output) # (bs, dim)
logits = self.classifier(pooled_output) # (bs, num_labels)
loss = None
if labels is not None:
if self.num_labels == 1:
loss_fct = nn.MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + distilbert_output[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=distilbert_output.hidden_states,
attentions=distilbert_output.attentions,
)
@add_start_docstrings(
"""
DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
DISTILBERT_START_DOCSTRING,
)
class DistilBertForQuestionAnswering(DistilBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.distilbert = DistilBertModel(config)
self.qa_outputs = nn.Linear(config.dim, config.num_labels)
assert config.num_labels == 2
self.dropout = nn.Dropout(config.qa_dropout)
self.init_weights()
@add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
distilbert_output = self.distilbert(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = distilbert_output[0] # (bs, max_query_len, dim)
hidden_states = self.dropout(hidden_states) # (bs, max_query_len, dim)
logits = self.qa_outputs(hidden_states) # (bs, max_query_len, 2)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1) # (bs, max_query_len)
end_logits = end_logits.squeeze(-1) # (bs, max_query_len)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = nn.CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + distilbert_output[1:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=distilbert_output.hidden_states,
attentions=distilbert_output.attentions,
)
@add_start_docstrings(
"""
DistilBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
for Named-Entity-Recognition (NER) tasks.
""",
DISTILBERT_START_DOCSTRING,
)
class DistilBertForTokenClassification(DistilBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.distilbert = DistilBertModel(config)
self.dropout = nn.Dropout(config.dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.distilbert(
input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
DistilBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
a softmax) e.g. for RocStories/SWAG tasks.
""",
DISTILBERT_START_DOCSTRING,
)
class DistilBertForMultipleChoice(DistilBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.distilbert = DistilBertModel(config)
self.pre_classifier = nn.Linear(config.dim, config.dim)
self.classifier = nn.Linear(config.dim, 1)
self.dropout = nn.Dropout(config.seq_classif_dropout)
self.init_weights()
@add_start_docstrings_to_model_forward(
DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)
@replace_return_docstrings(output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
Returns:
Examples::
>>> from transformers import DistilBertTokenizer, DistilBertForMultipleChoice
>>> import torch
>>> tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-cased')
>>> model = DistilBertForMultipleChoice.from_pretrained('distilbert-base-cased')
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> choice0 = "It is eaten with a fork and a knife."
>>> choice1 = "It is eaten while held in the hand."
>>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1
>>> encoding = tokenizer([[prompt, choice0], [prompt, choice1]], return_tensors='pt', padding=True)
>>> outputs = model(**{k: v.unsqueeze(0) for k,v in encoding.items()}, labels=labels) # batch size is 1
>>> # the linear classifier still needs to be trained
>>> loss = outputs.loss
>>> logits = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.distilbert(
input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_state = outputs[0] # (bs * num_choices, seq_len, dim)
pooled_output = hidden_state[:, 0] # (bs * num_choices, dim)
pooled_output = self.pre_classifier(pooled_output) # (bs * num_choices, dim)
pooled_output = nn.ReLU()(pooled_output) # (bs * num_choices, dim)
pooled_output = self.dropout(pooled_output) # (bs * num_choices, dim)
logits = self.classifier(pooled_output) # (bs * num_choices, 1)
reshaped_logits = logits.view(-1, num_choices) # (bs, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
|
AdaMix/src/transformers/models/distilbert/modeling_distilbert.py/0
|
{
"file_path": "AdaMix/src/transformers/models/distilbert/modeling_distilbert.py",
"repo_id": "AdaMix",
"token_count": 16846
}
| 55 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class EncoderDecoderConfig(PretrainedConfig):
r"""
:class:`~transformers.EncoderDecoderConfig` is the configuration class to store the configuration of a
:class:`~transformers.EncoderDecoderModel`. It is used to instantiate an Encoder Decoder model according to the
specified arguments, defining the encoder and decoder configs.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model
outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information.
Args:
kwargs (`optional`):
Dictionary of keyword arguments. Notably:
- **encoder** (:class:`~transformers.PretrainedConfig`, `optional`) -- An instance of a configuration
object that defines the encoder config.
- **decoder** (:class:`~transformers.PretrainedConfig`, `optional`) -- An instance of a configuration
object that defines the decoder config.
Examples::
>>> from transformers import BertConfig, EncoderDecoderConfig, EncoderDecoderModel
>>> # Initializing a BERT bert-base-uncased style configuration
>>> config_encoder = BertConfig()
>>> config_decoder = BertConfig()
>>> config = EncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder)
>>> # Initializing a Bert2Bert model from the bert-base-uncased style configurations
>>> model = EncoderDecoderModel(config=config)
>>> # Accessing the model configuration
>>> config_encoder = model.config.encoder
>>> config_decoder = model.config.decoder
>>> # set decoder config to causal lm
>>> config_decoder.is_decoder = True
>>> config_decoder.add_cross_attention = True
>>> # Saving the model, including its configuration
>>> model.save_pretrained('my-model')
>>> # loading model and config from pretrained folder
>>> encoder_decoder_config = EncoderDecoderConfig.from_pretrained('my-model')
>>> model = EncoderDecoderModel.from_pretrained('my-model', config=encoder_decoder_config)
"""
model_type = "encoder-decoder"
is_composition = True
def __init__(self, **kwargs):
super().__init__(**kwargs)
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
encoder_config = kwargs.pop("encoder")
encoder_model_type = encoder_config.pop("model_type")
decoder_config = kwargs.pop("decoder")
decoder_model_type = decoder_config.pop("model_type")
from ..auto.configuration_auto import AutoConfig
self.encoder = AutoConfig.for_model(encoder_model_type, **encoder_config)
self.decoder = AutoConfig.for_model(decoder_model_type, **decoder_config)
self.is_encoder_decoder = True
@classmethod
def from_encoder_decoder_configs(
cls, encoder_config: PretrainedConfig, decoder_config: PretrainedConfig, **kwargs
) -> PretrainedConfig:
r"""
Instantiate a :class:`~transformers.EncoderDecoderConfig` (or a derived class) from a pre-trained encoder model
configuration and decoder model configuration.
Returns:
:class:`EncoderDecoderConfig`: An instance of a configuration object
"""
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config")
decoder_config.is_decoder = True
decoder_config.add_cross_attention = True
return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs)
def to_dict(self):
"""
Serializes this instance to a Python dictionary. Override the default `to_dict()` from `PretrainedConfig`.
Returns:
:obj:`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
"""
output = copy.deepcopy(self.__dict__)
output["encoder"] = self.encoder.to_dict()
output["decoder"] = self.decoder.to_dict()
output["model_type"] = self.__class__.model_type
return output
|
AdaMix/src/transformers/models/encoder_decoder/configuration_encoder_decoder.py/0
|
{
"file_path": "AdaMix/src/transformers/models/encoder_decoder/configuration_encoder_decoder.py",
"repo_id": "AdaMix",
"token_count": 1807
}
| 56 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization class for Funnel Transformer."""
from typing import List, Optional
from ...utils import logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_funnel import FunnelTokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_model_names = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt",
"funnel-transformer/intermediate": "https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt",
"funnel-transformer/intermediate-base": "https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt",
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt",
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json",
"funnel-transformer/intermediate": "https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json",
"funnel-transformer/intermediate-base": "https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json",
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {f"funnel-transformer/{name}": 512 for name in _model_names}
PRETRAINED_INIT_CONFIGURATION = {f"funnel-transformer/{name}": {"do_lower_case": True} for name in _model_names}
class FunnelTokenizerFast(BertTokenizerFast):
r"""
Construct a "fast" Funnel Transformer tokenizer (backed by HuggingFace's `tokenizers` library).
:class:`~transformers.FunnelTokenizerFast` is identical to :class:`~transformers.BertTokenizerFast` and runs
end-to-end tokenization: punctuation splitting and wordpiece.
Refer to superclass :class:`~transformers.BertTokenizerFast` for usage examples and documentation concerning
parameters.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
slow_tokenizer_class = FunnelTokenizer
cls_token_type_id: int = 2
def __init__(
self,
vocab_file,
tokenizer_file=None,
do_lower_case=True,
unk_token="<unk>",
sep_token="<sep>",
pad_token="<pad>",
cls_token="<cls>",
mask_token="<mask>",
bos_token="<s>",
eos_token="</s>",
clean_text=True,
tokenize_chinese_chars=True,
strip_accents=None,
wordpieces_prefix="##",
**kwargs
):
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
do_lower_case=do_lower_case,
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
bos_token=bos_token,
eos_token=eos_token,
clean_text=clean_text,
tokenize_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
wordpieces_prefix=wordpieces_prefix,
**kwargs,
)
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Funnel
Transformer sequence pair mask has the following format:
::
2 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given
sequence(s).
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls) * [self.cls_token_type_id] + len(token_ids_0 + sep) * [0]
return len(cls) * [self.cls_token_type_id] + len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
|
AdaMix/src/transformers/models/funnel/tokenization_funnel_fast.py/0
|
{
"file_path": "AdaMix/src/transformers/models/funnel/tokenization_funnel_fast.py",
"repo_id": "AdaMix",
"token_count": 2921
}
| 57 |
# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for M2M100."""
import json
from contextlib import contextmanager
from pathlib import Path
from shutil import copyfile
from typing import Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
logger = logging.get_logger(__name__)
SPIECE_UNDERLINE = "▁"
VOCAB_FILES_NAMES = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"facebook/m2m100_418M": 1024,
}
# fmt: off
FAIRSEQ_LANGUAGE_CODES = ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"]
# fmt: on
class M2M100Tokenizer(PreTrainedTokenizer):
"""
Construct an M2M100 tokenizer. Based on `SentencePiece <https://github.com/google/sentencepiece>`__.
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods.
Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (:obj:`str`):
Path to the vocabulary file.
spm_file (:obj:`str`):
Path to `SentencePiece <https://github.com/google/sentencepiece>`__ file (generally has a .spm extension)
that contains the vocabulary.
src_lang (:obj:`str`, `optional`):
A string representing the source language.
tgt_lang (:obj:`str`, `optional`):
A string representing the target language.
eos_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The end of sequence token.
sep_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
Examples::
>>> from transformers import M2M100Tokenizer
>>> tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M, src_lang="en", tgt_lang="ro")
>>> src_text = " UN Chief Says There Is No Military Solution in Syria"
>>> tgt_text = "Şeful ONU declară că nu există o soluţie militară în Siria"
>>> model_inputs = tokenizer(src_text, return_tensors="pt")
>>> with tokenizer.as_target_tokenizer():
... labels = tokenizer(tgt_text, return_tensors="pt").input_ids
>>> # model(**model_inputs, labels=labels) should work
"""
vocab_files_names = VOCAB_FILES_NAMES
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
model_input_names = ["input_ids", "attention_mask"]
prefix_tokens: List[int] = []
suffix_tokens: List[int] = []
def __init__(
self,
vocab_file,
spm_file,
src_lang=None,
tgt_lang=None,
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
pad_token="<pad>",
unk_token="<unk>",
**kwargs,
):
super().__init__(
src_lang=src_lang,
tgt_lang=tgt_lang,
bos_token=bos_token,
eos_token=eos_token,
sep_token=sep_token,
unk_token=unk_token,
pad_token=pad_token,
**kwargs,
)
self.vocab_file = vocab_file
self.encoder = load_json(vocab_file)
self.decoder = {v: k for k, v in self.encoder.items()}
self.spm_file = spm_file
self.sp_model = load_spm(spm_file)
self.encoder_size = len(self.encoder)
self.lang_code_to_token = {lang_code: f"__{lang_code}__" for lang_code in FAIRSEQ_LANGUAGE_CODES}
self.lang_token_to_id = {
self.get_lang_token(lang_code): self.encoder_size + i for i, lang_code in enumerate(FAIRSEQ_LANGUAGE_CODES)
}
self.lang_code_to_id = {lang_code: self.encoder_size + i for i, lang_code in enumerate(FAIRSEQ_LANGUAGE_CODES)}
self.id_to_lang_token = {v: k for k, v in self.lang_token_to_id.items()}
self._additional_special_tokens = list(self.lang_token_to_id.keys())
self._src_lang = src_lang if src_lang is not None else "en"
self.tgt_lang = tgt_lang
self.cur_lang_id = self.get_lang_id(self._src_lang)
self.set_src_lang_special_tokens(self._src_lang)
self.num_madeup_words = 8
@property
def vocab_size(self) -> int:
return len(self.encoder) + len(self.lang_token_to_id) + self.num_madeup_words
@property
def src_lang(self) -> str:
return self._src_lang
@src_lang.setter
def src_lang(self, new_src_lang: str) -> None:
self._src_lang = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _tokenize(self, text: str) -> List[str]:
return self.sp_model.EncodeAsPieces(text)
def _convert_token_to_id(self, token):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(token, self.encoder[self.unk_token])
def _convert_id_to_token(self, index: int) -> str:
"""Converts an index (integer) in a token (str) using the decoder."""
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens: List[str]) -> str:
"""Converts a sequence of tokens (strings for sub-words) in a single string."""
out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
return out_string
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` method.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
:obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model."
)
return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))
prefix_ones = [1] * len(self.prefix_tokens)
suffix_ones = [1] * len(self.suffix_tokens)
if token_ids_1 is None:
return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones
return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An MBART sequence has the following format, where ``X`` represents the sequence:
- ``input_ids`` (for encoder) ``X [eos, src_lang_code]``
- ``decoder_input_ids``: (for decoder) ``X [eos, tgt_lang_code]``
BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
separator.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
"""
if token_ids_1 is None:
return self.prefix_tokens + token_ids_0 + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
def get_vocab(self) -> Dict:
vocab = self.encoder.copy()
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__(self) -> Dict:
state = self.__dict__.copy()
state["sp_model"] = None
return state
def __setstate__(self, d: Dict) -> None:
self.__dict__ = d
self.sp_model = load_spm(self.spm_file)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
save_dir = Path(save_directory)
assert save_dir.is_dir(), f"{save_directory} should be a directory"
vocab_save_path = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
spm_save_path = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder, vocab_save_path)
if not spm_save_path.exists():
copyfile(self.spm_file, spm_save_path)
return (str(vocab_save_path), str(spm_save_path))
def prepare_seq2seq_batch(
self,
src_texts: List[str],
src_lang: str = "en",
tgt_texts: Optional[List[str]] = None,
tgt_lang: str = "ro",
**kwargs,
) -> BatchEncoding:
self.src_lang = src_lang
self.tgt_lang = tgt_lang
self.set_src_lang_special_tokens(self.src_lang)
return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
@contextmanager
def as_target_tokenizer(self):
"""
Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to
sequence-to-sequence models that need a slightly different processing for the labels.
"""
self.set_tgt_lang_special_tokens(self.tgt_lang)
yield
self.set_src_lang_special_tokens(self.src_lang)
def set_src_lang_special_tokens(self, src_lang: str) -> None:
"""Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code]."""
lang_token = self.get_lang_token(src_lang)
self.cur_lang_id = self.lang_token_to_id[lang_token]
self.prefix_tokens = [self.cur_lang_id]
self.suffix_tokens = [self.eos_token_id]
def set_tgt_lang_special_tokens(self, tgt_lang: str) -> None:
"""Reset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code]."""
lang_token = self.get_lang_token(tgt_lang)
self.cur_lang_id = self.lang_token_to_id[lang_token]
self.prefix_tokens = [self.cur_lang_id]
self.suffix_tokens = [self.eos_token_id]
def get_lang_token(self, lang: str) -> str:
return self.lang_code_to_token[lang]
def get_lang_id(self, lang: str) -> int:
lang_token = self.get_lang_token(lang)
return self.lang_token_to_id[lang_token]
def load_spm(path: str) -> sentencepiece.SentencePieceProcessor:
spm = sentencepiece.SentencePieceProcessor()
spm.Load(str(path))
return spm
def load_json(path: str) -> Union[Dict, List]:
with open(path, "r") as f:
return json.load(f)
def save_json(data, path: str) -> None:
with open(path, "w") as f:
json.dump(data, f, indent=2)
|
AdaMix/src/transformers/models/m2m_100/tokenization_m2m_100.py/0
|
{
"file_path": "AdaMix/src/transformers/models/m2m_100/tokenization_m2m_100.py",
"repo_id": "AdaMix",
"token_count": 6314
}
| 58 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert ProphetNet checkpoint."""
import argparse
import torch
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
logger = logging.get_logger(__name__)
logging.set_verbosity_info()
def convert_prophetnet_checkpoint_to_pytorch(prophetnet_checkpoint_path: str, pytorch_dump_folder_path: str):
"""
Copy/paste/tweak prohpetnet's weights to our prophetnet structure.
"""
if "xprophetnet" in prophetnet_checkpoint_path:
prophet_old = XLMProphetNetForConditionalGenerationOld.from_pretrained(prophetnet_checkpoint_path)
prophet, loading_info = XLMProphetNetForConditionalGeneration.from_pretrained(
prophetnet_checkpoint_path, output_loading_info=True
)
else:
prophet_old = ProphetNetForConditionalGenerationOld.from_pretrained(prophetnet_checkpoint_path)
prophet, loading_info = ProphetNetForConditionalGeneration.from_pretrained(
prophetnet_checkpoint_path, output_loading_info=True
)
special_keys = ["key_proj", "value_proj", "query_proj"]
mapping = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
attributes = key.split(".")
if attributes[0] == "lm_head":
model = prophet
old_model = prophet_old
else:
model = prophet.prophetnet
old_model = prophet_old.model
is_key_init = False
for attribute in attributes:
if attribute in mapping:
old_attribute = mapping[attribute]
if not hasattr(old_model, old_attribute) and len(old_attribute) > 0:
old_attribute = attribute
elif hasattr(old_model, attribute):
old_attribute = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
model.weight = old_model.weight
logger.info(f"{attribute} is initialized.")
is_key_init = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
model.bias = old_model.bias
logger.info(f"{attribute} is initialized")
is_key_init = True
break
elif attribute in special_keys and hasattr(old_model, "in_proj_weight"):
embed_dim = old_model.in_proj_weight.shape[0] // 3
param = getattr(model, attribute)
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
model.query_proj.weight = torch.nn.Parameter(old_model.in_proj_weight[:embed_dim, :])
model.query_proj.bias = torch.nn.Parameter(old_model.in_proj_bias[:embed_dim])
elif attribute == "key_proj":
model.key_proj.weight = torch.nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :])
model.key_proj.bias = torch.nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim])
elif attribute == "value_proj":
model.value_proj.weight = torch.nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :])
model.value_proj.bias = torch.nn.Parameter(old_model.in_proj_bias[2 * embed_dim :])
is_key_init = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
model.position_embeddings.weight = torch.nn.Parameter(old_model.embed_positions.weight[:512, :])
is_key_init = True
break
if attribute.isdigit():
model = model[int(attribute)]
old_model = old_model[int(old_attribute)]
else:
model = getattr(model, attribute)
if old_attribute == "":
old_model = old_model
else:
if not hasattr(old_model, old_attribute):
raise ValueError(f"{old_model} does not have {old_attribute}")
old_model = getattr(old_model, old_attribute)
if not is_key_init:
raise ValueError(f"{key} was not correctly initialized!")
print(f"Saving model to {pytorch_dump_folder_path}")
prophet.save_pretrained(pytorch_dump_folder_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
args = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
|
AdaMix/src/transformers/models/prophetnet/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py/0
|
{
"file_path": "AdaMix/src/transformers/models/prophetnet/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py",
"repo_id": "AdaMix",
"token_count": 3111
}
| 59 |
# coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
RetriBERT model
"""
import math
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from ...file_utils import add_start_docstrings
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from ..bert.modeling_bert import BertModel
from .configuration_retribert import RetriBertConfig
logger = logging.get_logger(__name__)
RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"yjernite/retribert-base-uncased",
# See all RetriBert models at https://huggingface.co/models?filter=retribert
]
# INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL #
class RetriBertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = RetriBertConfig
load_tf_weights = None
base_model_prefix = "retribert"
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
RETRIBERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.RetriBertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
@add_start_docstrings(
"""Bert Based model to embed queries or document for document retrieval. """,
RETRIBERT_START_DOCSTRING,
)
class RetriBertModel(RetriBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.projection_dim = config.projection_dim
self.bert_query = BertModel(config)
self.bert_doc = None if config.share_encoders else BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.project_query = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
self.project_doc = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
self.ce_loss = nn.CrossEntropyLoss(reduction="mean")
self.init_weights()
def embed_sentences_checkpointed(
self,
input_ids,
attention_mask,
sent_encoder,
checkpoint_batch_size=-1,
):
# reproduces BERT forward pass with checkpointing
if checkpoint_batch_size < 0 or input_ids.shape[0] < checkpoint_batch_size:
return sent_encoder(input_ids, attention_mask=attention_mask)[1]
else:
# prepare implicit variables
device = input_ids.device
input_shape = input_ids.size()
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
head_mask = [None] * sent_encoder.config.num_hidden_layers
extended_attention_mask: torch.Tensor = sent_encoder.get_extended_attention_mask(
attention_mask, input_shape, device
)
# define function for checkpointing
def partial_encode(*inputs):
encoder_outputs = sent_encoder.encoder(
inputs[0],
attention_mask=inputs[1],
head_mask=head_mask,
)
sequence_output = encoder_outputs[0]
pooled_output = sent_encoder.pooler(sequence_output)
return pooled_output
# run embedding layer on everything at once
embedding_output = sent_encoder.embeddings(
input_ids=input_ids, position_ids=None, token_type_ids=token_type_ids, inputs_embeds=None
)
# run encoding and pooling on one mini-batch at a time
pooled_output_list = []
for b in range(math.ceil(input_ids.shape[0] / checkpoint_batch_size)):
b_embedding_output = embedding_output[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size]
b_attention_mask = extended_attention_mask[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size]
pooled_output = checkpoint.checkpoint(partial_encode, b_embedding_output, b_attention_mask)
pooled_output_list.append(pooled_output)
return torch.cat(pooled_output_list, dim=0)
def embed_questions(
self,
input_ids,
attention_mask=None,
checkpoint_batch_size=-1,
):
q_reps = self.embed_sentences_checkpointed(
input_ids,
attention_mask,
self.bert_query,
checkpoint_batch_size,
)
return self.project_query(q_reps)
def embed_answers(
self,
input_ids,
attention_mask=None,
checkpoint_batch_size=-1,
):
a_reps = self.embed_sentences_checkpointed(
input_ids,
attention_mask,
self.bert_query if self.bert_doc is None else self.bert_doc,
checkpoint_batch_size,
)
return self.project_doc(a_reps)
def forward(
self, input_ids_query, attention_mask_query, input_ids_doc, attention_mask_doc, checkpoint_batch_size=-1
):
r"""
Args:
input_ids_query (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary for the queries in a batch.
Indices can be obtained using :class:`~transformers.RetriBertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask_query (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
input_ids_doc (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary for the documents in a batch.
attention_mask_doc (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on documents padding token indices.
checkpoint_batch_size (:obj:`int`, `optional`, defaults to `:obj:`-1`):
If greater than 0, uses gradient checkpointing to only compute sequence representation on
:obj:`checkpoint_batch_size` examples at a time on the GPU. All query representations are still
compared to all document representations in the batch.
Return:
:obj:`torch.FloatTensor`: The bidirectional cross-entropy loss obtained while trying to match each query to
its corresponding document and each document to its corresponding query in the batch
"""
device = input_ids_query.device
q_reps = self.embed_questions(input_ids_query, attention_mask_query, checkpoint_batch_size)
a_reps = self.embed_answers(input_ids_doc, attention_mask_doc, checkpoint_batch_size)
compare_scores = torch.mm(q_reps, a_reps.t())
loss_qa = self.ce_loss(compare_scores, torch.arange(compare_scores.shape[1]).to(device))
loss_aq = self.ce_loss(compare_scores.t(), torch.arange(compare_scores.shape[0]).to(device))
loss = (loss_qa + loss_aq) / 2
return loss
|
AdaMix/src/transformers/models/retribert/modeling_retribert.py/0
|
{
"file_path": "AdaMix/src/transformers/models/retribert/modeling_retribert.py",
"repo_id": "AdaMix",
"token_count": 3881
}
| 60 |
# flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...file_utils import _BaseLazyModule, is_sentencepiece_available, is_torch_available
_import_structure = {
"configuration_speech_to_text": [
"SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Speech2TextConfig",
],
"feature_extraction_speech_to_text": ["Speech2TextFeatureExtractor"],
}
if is_sentencepiece_available():
_import_structure["tokenization_speech_to_text"] = ["Speech2TextTokenizer"]
_import_structure["processing_speech_to_text"] = ["Speech2TextProcessor"]
if is_torch_available():
_import_structure["modeling_speech_to_text"] = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, Speech2TextConfig
from .feature_extraction_speech_to_text import Speech2TextFeatureExtractor
if is_sentencepiece_available():
from .processing_speech_to_text import Speech2TextProcessor
from .tokenization_speech_to_text import Speech2TextTokenizer
if is_torch_available():
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
Speech2TextForConditionalGeneration,
Speech2TextModel,
Speech2TextPreTrainedModel,
)
else:
import importlib
import os
import sys
class _LazyModule(_BaseLazyModule):
"""
Module class that surfaces all objects but only performs associated imports when the objects are requested.
"""
__file__ = globals()["__file__"]
__path__ = [os.path.dirname(__file__)]
def _get_module(self, module_name: str):
return importlib.import_module("." + module_name, self.__name__)
sys.modules[__name__] = _LazyModule(__name__, _import_structure)
|
AdaMix/src/transformers/models/speech_to_text/__init__.py/0
|
{
"file_path": "AdaMix/src/transformers/models/speech_to_text/__init__.py",
"repo_id": "AdaMix",
"token_count": 1001
}
| 61 |
# coding=utf-8
# Copyright 2018 T5 Authors and HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization class for model T5."""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...file_utils import is_sentencepiece_available
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if is_sentencepiece_available():
from .tokenization_t5 import T5Tokenizer
else:
T5Tokenizer = None
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
class T5TokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" T5 tokenizer (backed by HuggingFace's `tokenizers` library). Based on `Unigram
<https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models>`__.
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizerFast` which contains most of the main
methods. Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (:obj:`str`):
`SentencePiece <https://github.com/google/sentencepiece>`__ file (generally has a `.spm` extension) that
contains the vocabulary necessary to instantiate a tokenizer.
eos_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The end of sequence token.
.. note::
When building a sequence using special tokens, this is not the token that is used for the end of
sequence. The token used is the :obj:`sep_token`.
unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
extra_ids (:obj:`int`, `optional`, defaults to 100):
Add a number of extra ids added to the end of the vocabulary for use as sentinels. These tokens are
accessible as "<extra_id_{%d}>" where "{%d}" is a number between 0 and extra_ids-1. Extra tokens are
indexed from the end of the vocabulary up to beginning ("<extra_id_0>" is the last token in the vocabulary
like in T5 preprocessing see `here
<https://github.com/google-research/text-to-text-transfer-transformer/blob/9fd7b14a769417be33bc6c850f9598764913c833/t5/data/preprocessors.py#L2117>`__).
additional_special_tokens (:obj:`List[str]`, `optional`):
Additional special tokens used by the tokenizer.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = T5Tokenizer
prefix_tokens: List[int] = []
def __init__(
self,
vocab_file,
tokenizer_file=None,
eos_token="</s>",
unk_token="<unk>",
pad_token="<pad>",
extra_ids=100,
additional_special_tokens=None,
**kwargs
):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
additional_special_tokens = ["<extra_id_{}>".format(i) for i in range(extra_ids)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
extra_tokens = len(set(filter(lambda x: bool("extra_id_" in x), additional_special_tokens)))
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are provided to T5Tokenizer. "
"In this case the additional_special_tokens must include the extra_ids tokens"
)
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
eos_token=eos_token,
unk_token=unk_token,
pad_token=pad_token,
extra_ids=extra_ids,
additional_special_tokens=additional_special_tokens,
**kwargs,
)
self.vocab_file = vocab_file
self._extra_ids = extra_ids
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
logger.info(f"Copy vocab file to {out_vocab_file}")
return (out_vocab_file,)
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A sequence has the following format:
- single sequence: ``X </s>``
- pair of sequences: ``A </s> B </s>``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
"""
token_ids_0 = token_ids_0 + [self.eos_token_id]
if token_ids_1 is None:
return self.prefix_tokens + token_ids_0
else:
token_ids_1 = token_ids_1 + [self.eos_token_id]
return self.prefix_tokens + token_ids_0 + token_ids_1
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of zeros.
"""
eos = [self.eos_token_id]
if token_ids_1 is None:
return len(token_ids_0 + eos) * [0]
return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
|
AdaMix/src/transformers/models/t5/tokenization_t5_fast.py/0
|
{
"file_path": "AdaMix/src/transformers/models/t5/tokenization_t5_fast.py",
"repo_id": "AdaMix",
"token_count": 3701
}
| 62 |
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Transformer XL configuration """
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class TransfoXLConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a :class:`~transformers.TransfoXLModel` or a
:class:`~transformers.TFTransfoXLModel`. It is used to instantiate a Transformer-XL model according to the
specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a
similar configuration to that of the `Transformer XL <https://huggingface.co/transfo-xl-wt103>`__ architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model
outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information.
Args:
vocab_size (:obj:`int`, `optional`, defaults to 267735):
Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
:obj:`inputs_ids` passed when calling :class:`~transformers.TransfoXLModel` or
:class:`~transformers.TFTransfoXLModel`.
cutoffs (:obj:`List[int]`, `optional`, defaults to :obj:`[20000, 40000, 200000]`):
Cutoffs for the adaptive softmax.
d_model (:obj:`int`, `optional`, defaults to 1024):
Dimensionality of the model's hidden states.
d_embed (:obj:`int`, `optional`, defaults to 1024):
Dimensionality of the embeddings
n_head (:obj:`int`, `optional`, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
d_head (:obj:`int`, `optional`, defaults to 64):
Dimensionality of the model's heads.
d_inner (:obj:`int`, `optional`, defaults to 4096):
Inner dimension in FF
div_val (:obj:`int`, `optional`, defaults to 4):
Divident value for adapative input and softmax
pre_lnorm (:obj:`boolean`, `optional`, defaults to :obj:`False`):
Whether or not to apply LayerNorm to the input instead of the output in the blocks.
n_layer (:obj:`int`, `optional`, defaults to 18):
Number of hidden layers in the Transformer encoder.
mem_len (:obj:`int`, `optional`, defaults to 1600):
Length of the retained previous heads.
clamp_len (:obj:`int`, `optional`, defaults to 1000):
Use the same pos embeddings after clamp_len.
same_length (:obj:`boolean`, `optional`, defaults to :obj:`True`):
Whether or not to use the same attn length for all tokens
proj_share_all_but_first (:obj:`boolean`, `optional`, defaults to :obj:`True`):
True to share all but first projs, False not to share.
attn_type (:obj:`int`, `optional`, defaults to 0):
Attention type. 0 for Transformer-XL, 1 for Shaw et al, 2 for Vaswani et al, 3 for Al Rfou et al.
sample_softmax (:obj:`int`, `optional`, defaults to -1):
Number of samples in the sampled softmax.
adaptive (:obj:`boolean`, `optional`, defaults to :obj:`True`):
Whether or not to use adaptive softmax.
dropout (:obj:`float`, `optional`, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
dropatt (:obj:`float`, `optional`, defaults to 0):
The dropout ratio for the attention probabilities.
untie_r (:obj:`boolean`, `optional`, defaults to :obj:`True`):
Whether ot not to untie relative position biases.
init (:obj:`str`, `optional`, defaults to :obj:`"normal"`):
Parameter initializer to use.
init_range (:obj:`float`, `optional`, defaults to 0.01):
Parameters initialized by U(-init_range, init_range).
proj_init_std (:obj:`float`, `optional`, defaults to 0.01):
Parameters initialized by N(0, init_std)
init_std (:obj:`float`, `optional`, defaults to 0.02):
Parameters initialized by N(0, init_std)
layer_norm_epsilon (:obj:`float`, `optional`, defaults to 1e-5):
The epsilon to use in the layer normalization layers
Examples::
>>> from transformers import TransfoXLConfig, TransfoXLModel
>>> # Initializing a Transformer XL configuration
>>> configuration = TransfoXLConfig()
>>> # Initializing a model from the configuration
>>> model = TransfoXLModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
"""
model_type = "transfo-xl"
keys_to_ignore_at_inference = ["mems"]
def __init__(
self,
vocab_size=267735,
cutoffs=[20000, 40000, 200000],
d_model=1024,
d_embed=1024,
n_head=16,
d_head=64,
d_inner=4096,
div_val=4,
pre_lnorm=False,
n_layer=18,
mem_len=1600,
clamp_len=1000,
same_length=True,
proj_share_all_but_first=True,
attn_type=0,
sample_softmax=-1,
adaptive=True,
dropout=0.1,
dropatt=0.0,
untie_r=True,
init="normal",
init_range=0.01,
proj_init_std=0.01,
init_std=0.02,
layer_norm_epsilon=1e-5,
eos_token_id=0,
**kwargs
):
super().__init__(eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.cutoffs = []
self.cutoffs.extend(cutoffs)
if proj_share_all_but_first:
self.tie_projs = [False] + [True] * len(self.cutoffs)
else:
self.tie_projs = [False] + [False] * len(self.cutoffs)
self.d_model = d_model
self.d_embed = d_embed
self.d_head = d_head
self.d_inner = d_inner
self.div_val = div_val
self.pre_lnorm = pre_lnorm
self.n_layer = n_layer
self.n_head = n_head
self.mem_len = mem_len
self.same_length = same_length
self.attn_type = attn_type
self.clamp_len = clamp_len
self.sample_softmax = sample_softmax
self.adaptive = adaptive
self.dropout = dropout
self.dropatt = dropatt
self.untie_r = untie_r
self.init = init
self.init_range = init_range
self.proj_init_std = proj_init_std
self.init_std = init_std
self.layer_norm_epsilon = layer_norm_epsilon
@property
def max_position_embeddings(self):
# Message copied from Transformer-XL documentation
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.")
return -1
@property
def n_token(self): # Backward compatibility
return self.vocab_size
@n_token.setter
def n_token(self, value): # Backward compatibility
self.vocab_size = value
@property
def hidden_size(self):
return self.d_model
@property
def num_attention_heads(self):
return self.n_head
@property
def num_hidden_layers(self):
return self.n_layer
|
AdaMix/src/transformers/models/transfo_xl/configuration_transfo_xl.py/0
|
{
"file_path": "AdaMix/src/transformers/models/transfo_xl/configuration_transfo_xl.py",
"repo_id": "AdaMix",
"token_count": 3312
}
| 63 |
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Speech processor class for Wav2Vec2
"""
from contextlib import contextmanager
from .feature_extraction_wav2vec2 import Wav2Vec2FeatureExtractor
from .tokenization_wav2vec2 import Wav2Vec2CTCTokenizer
class Wav2Vec2Processor:
r"""
Constructs a Wav2Vec2 processor which wraps a Wav2Vec2 feature extractor and a Wav2Vec2 CTC tokenizer into a single
processor.
:class:`~transformers.Wav2Vec2Processor` offers all the functionalities of
:class:`~transformers.Wav2Vec2FeatureExtractor` and :class:`~transformers.Wav2Vec2CTCTokenizer`. See the docstring
of :meth:`~transformers.Wav2Vec2Processor.__call__` and :meth:`~transformers.Wav2Vec2Processor.decode` for more
information.
Args:
feature_extractor (:obj:`Wav2Vec2FeatureExtractor`):
An instance of :class:`~transformers.Wav2Vec2FeatureExtractor`. The feature extractor is a required input.
tokenizer (:obj:`Wav2Vec2CTCTokenizer`):
An instance of :class:`~transformers.Wav2Vec2CTCTokenizer`. The tokenizer is a required input.
"""
def __init__(self, feature_extractor, tokenizer):
if not isinstance(feature_extractor, Wav2Vec2FeatureExtractor):
raise ValueError(
f"`feature_extractor` has to be of type {Wav2Vec2FeatureExtractor.__class__}, but is {type(feature_extractor)}"
)
if not isinstance(tokenizer, Wav2Vec2CTCTokenizer):
raise ValueError(
f"`tokenizer` has to be of type {Wav2Vec2CTCTokenizer.__class__}, but is {type(tokenizer)}"
)
self.feature_extractor = feature_extractor
self.tokenizer = tokenizer
self.current_processor = self.feature_extractor
def save_pretrained(self, save_directory):
"""
Save a Wav2Vec2 feature_extractor object and Wav2Vec2 tokenizer object to the directory ``save_directory``, so
that it can be re-loaded using the :func:`~transformers.Wav2Vec2Processor.from_pretrained` class method.
.. note::
This class method is simply calling
:meth:`~transformers.feature_extraction_utils.FeatureExtractionMixin.save_pretrained` and
:meth:`~transformers.tokenization_utils_base.PreTrainedTokenizer.save_pretrained`. Please refer to the
docstrings of the methods above for more information.
Args:
save_directory (:obj:`str` or :obj:`os.PathLike`):
Directory where the feature extractor JSON file and the tokenizer files will be saved (directory will
be created if it does not exist).
"""
self.feature_extractor.save_pretrained(save_directory)
self.tokenizer.save_pretrained(save_directory)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
r"""
Instantiate a :class:`~transformers.Wav2Vec2Processor` from a pretrained Wav2Vec2 processor.
.. note::
This class method is simply calling Wav2Vec2FeatureExtractor's
:meth:`~transformers.feature_extraction_utils.FeatureExtractionMixin.from_pretrained` and
Wav2Vec2CTCTokenizer's :meth:`~transformers.tokenization_utils_base.PreTrainedTokenizer.from_pretrained`.
Please refer to the docstrings of the methods above for more information.
Args:
pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`):
This can be either:
- a string, the `model id` of a pretrained feature_extractor hosted inside a model repo on
huggingface.co. Valid model ids can be located at the root-level, like ``bert-base-uncased``, or
namespaced under a user or organization name, like ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing a feature extractor file saved using the
:meth:`~transformers.SequenceFeatureExtractor.save_pretrained` method, e.g.,
``./my_model_directory/``.
- a path or url to a saved feature extractor JSON `file`, e.g.,
``./my_model_directory/feature_extraction_config.json``.
**kwargs
Additional keyword arguments passed along to both :class:`~transformers.SequenceFeatureExtractor` and
:class:`~transformers.PreTrainedTokenizer`
"""
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(pretrained_model_name_or_path, **kwargs)
tokenizer = Wav2Vec2CTCTokenizer.from_pretrained(pretrained_model_name_or_path, **kwargs)
return cls(feature_extractor=feature_extractor, tokenizer=tokenizer)
def __call__(self, *args, **kwargs):
"""
When used in normal mode, this method forwards all its arguments to Wav2Vec2FeatureExtractor's
:meth:`~transformers.Wav2Vec2FeatureExtractor.__call__` and returns its output. If used in the context
:meth:`~transformers.Wav2Vec2Processor.as_target_processor` this method forwards all its arguments to
Wav2Vec2CTCTokenizer's :meth:`~transformers.Wav2Vec2CTCTokenizer.__call__`. Please refer to the doctsring of
the above two methods for more information.
"""
return self.current_processor(*args, **kwargs)
def pad(self, *args, **kwargs):
"""
When used in normal mode, this method forwards all its arguments to Wav2Vec2FeatureExtractor's
:meth:`~transformers.Wav2Vec2FeatureExtractor.pad` and returns its output. If used in the context
:meth:`~transformers.Wav2Vec2Processor.as_target_processor` this method forwards all its arguments to
Wav2Vec2CTCTokenizer's :meth:`~transformers.Wav2Vec2CTCTokenizer.pad`. Please refer to the docstring of the
above two methods for more information.
"""
return self.current_processor.pad(*args, **kwargs)
def batch_decode(self, *args, **kwargs):
"""
This method forwards all its arguments to Wav2Vec2CTCTokenizer's
:meth:`~transformers.PreTrainedTokenizer.batch_decode`. Please refer to the docstring of this method for more
information.
"""
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
"""
This method forwards all its arguments to Wav2Vec2CTCTokenizer's
:meth:`~transformers.PreTrainedTokenizer.decode`. Please refer to the docstring of this method for more
information.
"""
return self.tokenizer.decode(*args, **kwargs)
@contextmanager
def as_target_processor(self):
"""
Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning
Wav2Vec2.
"""
self.current_processor = self.tokenizer
yield
self.current_processor = self.feature_extractor
|
AdaMix/src/transformers/models/wav2vec2/processing_wav2vec2.py/0
|
{
"file_path": "AdaMix/src/transformers/models/wav2vec2/processing_wav2vec2.py",
"repo_id": "AdaMix",
"token_count": 2952
}
| 64 |
from typing import TYPE_CHECKING, Optional, Union
from ..modelcard import ModelCard
from ..tokenization_utils import PreTrainedTokenizer
from .base import ArgumentHandler, Pipeline
if TYPE_CHECKING:
from ..modeling_tf_utils import TFPreTrainedModel
from ..modeling_utils import PreTrainedModel
# Can't use @add_end_docstrings(PIPELINE_INIT_ARGS) here because this one does not accept `binary_output`
class FeatureExtractionPipeline(Pipeline):
"""
Feature extraction pipeline using no model head. This pipeline extracts the hidden states from the base
transformer, which can be used as features in downstream tasks.
This feature extraction pipeline can currently be loaded from :func:`~transformers.pipeline` using the task
identifier: :obj:`"feature-extraction"`.
All models may be used for this pipeline. See a list of all models, including community-contributed models on
`huggingface.co/models <https://huggingface.co/models>`__.
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`):
The framework to use, either :obj:`"pt"` for PyTorch or :obj:`"tf"` for TensorFlow. The specified framework
must be installed.
If no framework is specified, will default to the one currently installed. If no framework is specified and
both frameworks are installed, will default to the framework of the :obj:`model`, or to PyTorch if no model
is provided.
task (:obj:`str`, defaults to :obj:`""`):
A task-identifier for the pipeline.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to -1):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on
the associated CUDA device id.
"""
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
args_parser: ArgumentHandler = None,
device: int = -1,
task: str = "",
):
super().__init__(
model=model,
tokenizer=tokenizer,
modelcard=modelcard,
framework=framework,
args_parser=args_parser,
device=device,
binary_output=True,
task=task,
)
def __call__(self, *args, **kwargs):
"""
Extract the features of the input(s).
Args:
args (:obj:`str` or :obj:`List[str]`): One or several texts (or one list of texts) to get the features of.
Return:
A nested list of :obj:`float`: The features computed by the model.
"""
return super().__call__(*args, **kwargs).tolist()
|
AdaMix/src/transformers/pipelines/feature_extraction.py/0
|
{
"file_path": "AdaMix/src/transformers/pipelines/feature_extraction.py",
"repo_id": "AdaMix",
"token_count": 1402
}
| 65 |
# coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task.
"""
import collections
import gc
import inspect
import math
import os
import random
import re
import shutil
import sys
import time
import warnings
from logging import StreamHandler
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
# Integrations must be imported before ML frameworks:
from .integrations import ( # isort: split
default_hp_search_backend,
get_reporting_integration_callbacks,
hp_params,
is_fairscale_available,
is_optuna_available,
is_ray_tune_available,
run_hp_search_optuna,
run_hp_search_ray,
init_deepspeed,
)
import numpy as np
import torch
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler, SequentialSampler
from .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator
from .file_utils import (
WEIGHTS_NAME,
is_apex_available,
is_datasets_available,
is_in_notebook,
is_sagemaker_distributed_available,
is_torch_tpu_available,
is_training_run_on_sagemaker,
)
from .modeling_utils import PreTrainedModel, unwrap_model
from .optimization import Adafactor, AdamW, get_scheduler
from .tokenization_utils_base import PreTrainedTokenizerBase
from .trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
from .trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedSamplerWithLoop,
DistributedTensorGatherer,
LabelSmoother,
LengthGroupedSampler,
SequentialDistributedSampler,
distributed_broadcast_scalars,
distributed_concat,
get_parameter_names,
nested_concat,
nested_detach,
nested_numpify,
nested_xla_mesh_reduce,
reissue_pt_warnings,
)
from .trainer_utils import (
PREFIX_CHECKPOINT_DIR,
BestRun,
EvalPrediction,
HPSearchBackend,
PredictionOutput,
ShardedDDPOption,
TrainerMemoryTracker,
TrainOutput,
default_compute_objective,
default_hp_space,
denumpify_detensorize,
get_last_checkpoint,
set_seed,
speed_metrics,
)
from .training_args import ParallelMode, TrainingArguments
from .utils import logging
from .utils.modeling_auto_mapping import MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
_is_native_amp_available = False
DEFAULT_CALLBACKS = [DefaultFlowCallback]
DEFAULT_PROGRESS_CALLBACK = ProgressCallback
if is_in_notebook():
from .utils.notebook import NotebookProgressCallback
DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback
if is_apex_available():
from apex import amp
if version.parse(torch.__version__) >= version.parse("1.6"):
_is_native_amp_available = True
from torch.cuda.amp import autocast
if is_datasets_available():
import datasets
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
if is_fairscale_available():
import fairscale
from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
from fairscale.optim import OSS
from fairscale.optim.grad_scaler import ShardedGradScaler
if version.parse(fairscale.__version__) >= version.parse("0.3"):
from fairscale.nn.data_parallel import FullyShardedDataParallel as FullyShardedDDP
from fairscale.nn.wrap import auto_wrap
else:
FullyShardedDDP = None
if is_sagemaker_distributed_available():
import smdistributed.dataparallel.torch.distributed as dist
from smdistributed.dataparallel.torch.parallel.distributed import DistributedDataParallel as DDP
else:
import torch.distributed as dist
if is_training_run_on_sagemaker():
logging.add_handler(StreamHandler(sys.stdout))
if TYPE_CHECKING:
import optuna
logger = logging.get_logger(__name__)
class Trainer:
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.
Args:
model (:class:`~transformers.PreTrainedModel` or :obj:`torch.nn.Module`, `optional`):
The model to train, evaluate or use for predictions. If not provided, a ``model_init`` must be passed.
.. note::
:class:`~transformers.Trainer` is optimized to work with the :class:`~transformers.PreTrainedModel`
provided by the library. You can still use your own models defined as :obj:`torch.nn.Module` as long as
they work the same way as the 🤗 Transformers models.
args (:class:`~transformers.TrainingArguments`, `optional`):
The arguments to tweak for training. Will default to a basic instance of
:class:`~transformers.TrainingArguments` with the ``output_dir`` set to a directory named `tmp_trainer` in
the current directory if not provided.
data_collator (:obj:`DataCollator`, `optional`):
The function to use to form a batch from a list of elements of :obj:`train_dataset` or :obj:`eval_dataset`.
Will default to :func:`~transformers.default_data_collator` if no ``tokenizer`` is provided, an instance of
:func:`~transformers.DataCollatorWithPadding` otherwise.
train_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The dataset to use for training. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The dataset to use for evaluation. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
tokenizer (:class:`PreTrainedTokenizerBase`, `optional`):
The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the
maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an
interrupted training or reuse the fine-tuned model.
model_init (:obj:`Callable[[], PreTrainedModel]`, `optional`):
A function that instantiates the model to be used. If provided, each call to
:meth:`~transformers.Trainer.train` will start from a new instance of the model as given by this function.
The function may have zero argument, or a single one containing the optuna/Ray Tune trial object, to be
able to choose different architectures according to hyper parameters (such as layer count, sizes of inner
layers, dropout probabilities etc).
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
callbacks (List of :obj:`~transformers.TrainerCallback`, `optional`):
A list of callbacks to customize the training loop. Will add those to the list of default callbacks
detailed in :doc:`here <callback>`.
If you want to remove one of the default callbacks used, use the :meth:`Trainer.remove_callback` method.
optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`): A tuple
containing the optimizer and the scheduler to use. Will default to an instance of
:class:`~transformers.AdamW` on your model and a scheduler given by
:func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`.
Important attributes:
- **model** -- Always points to the core model. If using a transformers model, it will be a
:class:`~transformers.PreTrainedModel` subclass.
- **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the
original model. This is the model that should be used for the forward pass. For example, under ``DeepSpeed``,
the inner model is wrapped in ``DeepSpeed`` and then again in ``torch.nn.DistributedDataParallel``. If the
inner model hasn't been wrapped, then ``self.model_wrapped`` is the same as ``self.model``.
- **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from
data parallelism, this means some of the model layers are split on different GPUs).
- **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set
to :obj:`False` if model parallel or deepspeed is used, or if the default
``TrainingArguments.place_model_on_device`` is overridden to return :obj:`False` .
- **is_in_train** -- Whether or not a model is currently running ``train`` (e.g. when ``evaluate`` is called
while in ``train``)
"""
from .trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state
def __init__(
self,
model: Union[PreTrainedModel, torch.nn.Module] = None,
args: TrainingArguments = None,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Dataset] = None,
tokenizer: Optional["PreTrainedTokenizerBase"] = None,
model_init: Callable[[], PreTrainedModel] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
callbacks: Optional[List[TrainerCallback]] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
):
if args is None:
output_dir = "tmp_trainer"
logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.")
args = TrainingArguments(output_dir=output_dir)
self.args = args
# Seed must be set before instantiating the model when using model
set_seed(self.args.seed)
self.hp_name = None
self.deepspeed = None
self.is_in_train = False
# memory metrics - must set up as early as possible
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
# force device and distributed setup init explicitly
args._setup_devices
if model is None:
if model_init is not None:
self.model_init = model_init
model = self.call_model_init()
else:
raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument")
else:
if model_init is not None:
warnings.warn(
"`Trainer` requires either a `model` or `model_init` argument, but not both. "
"`model_init` will overwrite your model when calling the `train` method. This will become a fatal error in the next release.",
FutureWarning,
)
self.model_init = model_init
if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel:
self.is_model_parallel = True
else:
self.is_model_parallel = False
# Setup Sharded DDP training
self.sharded_ddp = None
if len(args.sharded_ddp) > 0:
if args.deepspeed:
raise ValueError(
"Using --sharded_ddp xxx together with --deepspeed is not possible, deactivate one of those flags."
)
if args.local_rank == -1:
raise ValueError("Using sharded DDP only works in distributed training.")
elif not is_fairscale_available():
raise ImportError("Sharded DDP training requires fairscale: `pip install fairscale`.")
elif ShardedDDPOption.SIMPLE not in args.sharded_ddp and FullyShardedDDP is None:
raise ImportError(
"Sharded DDP in a mode other than simple training requires fairscale version >= 0.3, found "
f"{fairscale.__version__}. Upgrade your fairscale library: `pip install --upgrade fairscale`."
)
elif ShardedDDPOption.SIMPLE in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.SIMPLE
elif ShardedDDPOption.ZERO_DP_2 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_2
elif ShardedDDPOption.ZERO_DP_3 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_3
# one place to sort out whether to place the model on device or not
self.place_model_on_device = args.place_model_on_device
if (
self.is_model_parallel
or (args.deepspeed and args.do_train)
or (args.fp16_full_eval and not args.do_train)
or (self.sharded_ddp in [ShardedDDPOption.ZERO_DP_2, ShardedDDPOption.ZERO_DP_3])
):
self.place_model_on_device = False
default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)
self.data_collator = data_collator if data_collator is not None else default_collator
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.tokenizer = tokenizer
# postpone switching model to cuda when:
# 1. MP - since we are trying to fit a much bigger than 1 gpu model
# 2. fp16-enabled DeepSpeed loads the model in half the size and it doesn't need .to() anyway,
# and we only use deepspeed for training at the moment
if self.place_model_on_device:
model = model.to(args.device)
# Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs
if self.is_model_parallel:
self.args._n_gpu = 1
# later use `self.model is self.model_wrapped` to check if it's wrapped or not
self.model_wrapped = model
self.model = model
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):
raise RuntimeError(
"Passing a `model_init` is incompatible with providing the `optimizers` argument."
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)
default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
self.callback_handler = CallbackHandler(
callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler
)
self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)
# Will be set to True by `self._setup_loggers()` on first call to `self.log()`.
self._loggers_initialized = False
# Create output directory if needed
if self.is_world_process_zero():
os.makedirs(self.args.output_dir, exist_ok=True)
if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).")
if args.max_steps > 0:
logger.info("max_steps is given, it will override any value given in num_train_epochs")
# Enforce rules on using datasets with no __len__
if train_dataset is not None and not isinstance(train_dataset, collections.abc.Sized) and args.max_steps <= 0:
raise ValueError("train_dataset does not implement __len__, max_steps has to be specified")
if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
self._signature_columns = None
if is_datasets_available():
if isinstance(train_dataset, datasets.Dataset):
self._remove_unused_columns(self.train_dataset, description="training")
if isinstance(eval_dataset, datasets.Dataset):
self._remove_unused_columns(self.eval_dataset, description="evaluation")
# Mixed precision setup
self.use_apex = False
self.use_amp = False
self.fp16_backend = None
if args.fp16:
if args.fp16_backend == "auto":
self.fp16_backend = "amp" if _is_native_amp_available else "apex"
else:
self.fp16_backend = args.fp16_backend
logger.info(f"Using {self.fp16_backend} fp16 backend")
if args.fp16 and not args.deepspeed: # deepspeed manages its own fp16
if self.fp16_backend == "amp":
self.use_amp = True
self.scaler = ShardedGradScaler() if self.sharded_ddp is not None else torch.cuda.amp.GradScaler()
else:
if not is_apex_available():
raise ImportError(
"Using FP16 with APEX but APEX is not installed, please refer to https://www.github.com/nvidia/apex."
)
self.use_apex = True
# Label smoothing
if self.args.label_smoothing_factor != 0:
self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)
else:
self.label_smoother = None
self.state = TrainerState()
self.control = TrainerControl()
# Internal variable for total_flos used to count as tensors (for distributed + TPU), will be sent in the
# state at each call to self.log.
self._total_flos = None
self.hp_search_backend = None
self.use_tune_checkpoints = False
default_label_names = (
["start_positions", "end_positions"]
if type(self.model).__name__ in MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES.values()
else ["labels"]
)
self.label_names = default_label_names if self.args.label_names is None else self.args.label_names
self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)
# very last
self._memory_tracker.stop_and_update_metrics()
def add_callback(self, callback):
"""
Add a callback to the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will instantiate a member of that class.
"""
self.callback_handler.add_callback(callback)
def pop_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback` and returns it.
If the callback is not found, returns :obj:`None` (and no error is raised).
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will pop the first member of that class found in the list of callbacks.
Returns:
:class:`~transformer.TrainerCallback`: The callback removed, if found.
"""
return self.callback_handler.pop_callback(callback)
def remove_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will remove the first member of that class found in the list of callbacks.
"""
self.callback_handler.remove_callback(callback)
def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None):
if not self.args.remove_unused_columns:
return
if self._signature_columns is None:
# Inspect model forward signature to keep only the arguments it accepts.
signature = inspect.signature(self.model.forward)
self._signature_columns = list(signature.parameters.keys())
# Labels may be named label or label_ids, the default data collator handles that.
self._signature_columns += ["label", "label_ids"]
columns = [k for k in self._signature_columns if k in dataset.column_names]
ignored_columns = list(set(dataset.column_names) - set(self._signature_columns))
if len(ignored_columns) > 0:
dset_description = "" if description is None else f"in the {description} set "
logger.info(
f"The following columns {dset_description} don't have a corresponding argument in "
f"`{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}."
)
dataset.set_format(type=dataset.format["type"], columns=columns, format_kwargs=dataset.format["format_kwargs"])
def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:
if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance(
self.train_dataset, collections.abc.Sized
):
return None
# Build the sampler.
if self.args.group_by_length:
model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None
if self.args.world_size <= 1:
return LengthGroupedSampler(
self.train_dataset, self.args.train_batch_size, model_input_name=model_input_name
)
else:
return DistributedLengthGroupedSampler(
self.train_dataset,
self.args.train_batch_size,
num_replicas=self.args.world_size,
rank=self.args.process_index,
model_input_name=model_input_name,
)
else:
if self.args.world_size <= 1:
return RandomSampler(self.train_dataset)
elif self.args.parallel_mode == ParallelMode.TPU and not self.args.dataloader_drop_last:
# Use a loop for TPUs when drop_last is False to have all batches have the same size.
return DistributedSamplerWithLoop(
self.train_dataset,
batch_size=self.args.per_device_train_batch_size,
num_replicas=self.args.world_size,
rank=self.args.process_index,
)
else:
return DistributedSampler(
self.train_dataset, num_replicas=self.args.world_size, rank=self.args.process_index
)
def get_train_dataloader(self) -> DataLoader:
"""
Returns the training :class:`~torch.utils.data.DataLoader`.
Will use no sampler if :obj:`self.train_dataset` does not implement :obj:`__len__`, a random sampler (adapted
to distributed training if necessary) otherwise.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
train_sampler = self._get_train_sampler()
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
g = torch.Generator()
g.manual_seed(self.args.seed)
return DataLoader(
self.train_dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
worker_init_fn=seed_worker,
generator=g
)
def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.sampler.Sampler]:
if is_torch_tpu_available():
return SequentialDistributedSampler(eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())
elif self.args.local_rank != -1:
return SequentialDistributedSampler(eval_dataset)
else:
return SequentialSampler(eval_dataset)
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
"""
Returns the evaluation :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not
accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
elif eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
elif is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):
self._remove_unused_columns(eval_dataset, description="evaluation")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
eval_sampler = self._get_eval_sampler(eval_dataset)
return DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
"""
Returns the test :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
test_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The test dataset to use. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if not isinstance(test_dataset, collections.abc.Sized):
raise ValueError("test_dataset must implement __len__")
elif is_datasets_available() and isinstance(test_dataset, datasets.Dataset):
self._remove_unused_columns(test_dataset, description="test")
test_sampler = self._get_eval_sampler(test_dataset)
# We use the same batch_size as for eval.
return DataLoader(
test_dataset,
sampler=test_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
pin_memory=self.args.dataloader_pin_memory,
)
def create_optimizer(self) -> torch.optim.Optimizer:
decay_parameters = get_parameter_names(self.model, [torch.nn.LayerNorm])
decay_parameters = [name for name in decay_parameters if "bias" not in name]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if n in decay_parameters],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if n not in decay_parameters],
"weight_decay": 0.0,
},
]
optimizer_cls = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
optimizer_cls = Adafactor
optimizer_kwargs = {"scale_parameter": False, "relative_step": False}
else:
optimizer_cls = AdamW
optimizer_kwargs = {
"betas": (self.args.adam_beta1, self.args.adam_beta2),
"eps": self.args.adam_epsilon,
}
optimizer_kwargs["lr"] = self.args.learning_rate
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
return OSS(
params=optimizer_grouped_parameters,
optim=optimizer_cls,
**optimizer_kwargs,
)
return optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
def create_scheduler(self, optimizer: torch.optim.Optimizer, num_training_steps: int) -> torch.optim.lr_scheduler.LambdaLR:
warmup_steps = (
self.args.warmup_steps
if self.args.warmup_steps > 0
else math.ceil(num_training_steps * self.args.warmup_ratio)
)
return get_scheduler(
self.args.lr_scheduler_type,
optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=num_training_steps,
)
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
"""
if self.optimizer is None:
self.optimizer = self.create_optimizer()
if self.lr_scheduler is None:
self.lr_scheduler = self.create_scheduler(
optimizer=self.optimizer,
num_training_steps=num_training_steps
)
def num_examples(self, dataloader: DataLoader) -> int:
"""
Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its dataset.
Will raise an exception if the underlying dataset does not implement method :obj:`__len__`
"""
return len(dataloader.dataset)
def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]):
""" HP search setup code """
self._trial = trial
if self.hp_search_backend is None or trial is None:
return
params = self.hp_space(trial) if self.hp_search_backend == HPSearchBackend.OPTUNA else trial
for key, value in params.items():
if not hasattr(self.args, key):
raise AttributeError(
f"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`."
)
old_attr = getattr(self.args, key, None)
# Casting value to the proper type
if old_attr is not None:
value = type(old_attr)(value)
setattr(self.args, key, value)
if self.hp_search_backend == HPSearchBackend.OPTUNA:
logger.info("Trial:", trial.params)
def _report_to_hp_search(
self, trial: Union["optuna.Trial", Dict[str, Any]], epoch: int, metrics: Dict[str, float]
):
if self.hp_search_backend is None or trial is None:
return
self.objective = self.compute_objective(metrics.copy())
if self.hp_search_backend == HPSearchBackend.OPTUNA:
import optuna
trial.report(self.objective, epoch)
if trial.should_prune():
raise optuna.TrialPruned()
elif self.hp_search_backend == HPSearchBackend.RAY:
from ray import tune
if self.control.should_save:
self._tune_save_checkpoint()
tune.report(objective=self.objective, **metrics)
def _tune_save_checkpoint(self):
from ray import tune
if not self.use_tune_checkpoints:
return
with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:
output_dir = os.path.join(checkpoint_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}")
self.save_model(output_dir)
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
def call_model_init(self, trial=None):
model_init_argcount = len(inspect.signature(self.model_init).parameters)
if model_init_argcount == 0:
model = self.model_init()
elif model_init_argcount == 1:
model = self.model_init(trial)
else:
raise RuntimeError("model_init should have 0 or 1 argument.")
if model is None:
raise RuntimeError("model_init should not return None.")
return model
def _wrap_model(self, model, training=True):
# already initialized its own DDP and AMP
if self.deepspeed:
return self.deepspeed
# train/eval could be run multiple-times - if already wrapped, don't re-wrap it again
if unwrap_model(model) is not model:
return model
# Mixed precision training with apex (torch < 1.6)
if self.use_apex and training:
model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)
# Multi-gpu training (should be after apex fp16 initialization)
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
if not training:
return model
# Distributed training (should be after apex fp16 initialization)
if self.sharded_ddp is not None:
# Sharded DDP!
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
model = ShardedDDP(model, self.optimizer)
else:
mixed_precision = self.args.fp16
cpu_offload = ShardedDDPOption.OFFLOAD in self.args.sharded_ddp
zero_3 = self.sharded_ddp == ShardedDDPOption.ZERO_DP_3
# XXX: Breaking the self.model convention but I see no way around it for now.
if ShardedDDPOption.AUTO_WRAP in self.args.sharded_ddp:
model = auto_wrap(model)
self.model = model = FullyShardedDDP(
model,
mixed_precision=mixed_precision,
reshard_after_forward=zero_3,
cpu_offload=cpu_offload,
).to(self.args.device)
elif is_sagemaker_distributed_available():
model = DDP(model, device_ids=[dist.get_local_rank()], broadcast_buffers=False)
elif self.args.local_rank != -1:
if self.args.ddp_find_unused_parameters is not None:
find_unused_parameters = self.args.ddp_find_unused_parameters
elif isinstance(model, PreTrainedModel):
# find_unused_parameters breaks checkpointing as per
# https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021
find_unused_parameters = not getattr(model.config, "gradient_checkpointing", False)
else:
find_unused_parameters = True
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.args.local_rank],
output_device=self.args.local_rank,
find_unused_parameters=find_unused_parameters,
)
return model
def train(
self,
resume_from_checkpoint: Optional[Union[str, bool]] = None,
trial: Union["optuna.Trial", Dict[str, Any]] = None,
**kwargs,
):
"""
Main training entry point.
Args:
resume_from_checkpoint (:obj:`str` or :obj:`bool`, `optional`):
If a :obj:`str`, local path to a saved checkpoint as saved by a previous instance of
:class:`~transformers.Trainer`. If a :obj:`bool` and equals `True`, load the last checkpoint in
`args.output_dir` as saved by a previous instance of :class:`~transformers.Trainer`. If present,
training will resume from the model/optimizer/scheduler states loaded here.
trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):
The trial run or the hyperparameter dictionary for hyperparameter search.
kwargs:
Additional keyword arguments used to hide deprecated arguments
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
self.is_in_train = True
if "model_path" in kwargs:
resume_from_checkpoint = kwargs.pop("model_path")
warnings.warn(
"`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` "
"instead.",
FutureWarning,
)
if len(kwargs) > 0:
raise TypeError(f"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.")
# This might change the seed so needs to run first.
self._hp_search_setup(trial)
# Model re-init
model_reloaded = False
if self.model_init is not None:
# Seed must be set before instantiating the model when using model_init.
set_seed(self.args.seed)
self.model = self.call_model_init(trial)
model_reloaded = True
# Reinitializes optimizer and scheduler
self.optimizer, self.lr_scheduler = None, None
# Load potential model checkpoint
if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:
resume_from_checkpoint = get_last_checkpoint(self.args.output_dir)
if resume_from_checkpoint is None:
raise ValueError(f"No valid checkpoint found in output directory ({self.args.output_dir})")
if resume_from_checkpoint is not None and os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)):
logger.info(f"Loading model from {resume_from_checkpoint}).")
state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME), map_location="cpu")
self._load_state_dict_in_model(state_dict)
del state_dict
# If model was re-initialized, put it on the right device and update self.model_wrapped
if model_reloaded:
if self.place_model_on_device:
self.model = self.model.to(self.args.device)
self.model_wrapped = self.model
# Keeping track whether we can can len() on the dataset or not
train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)
# Data loader and number of training steps
train_dataloader = self.get_train_dataloader()
# Setting up training control variables:
# number of training epochs: num_train_epochs
# number of training steps per epoch: num_update_steps_per_epoch
# total number of training steps to execute: max_steps
if train_dataset_is_sized:
num_update_steps_per_epoch = len(train_dataloader) // self.args.gradient_accumulation_steps
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
if self.args.max_steps > 0:
max_steps = self.args.max_steps
num_train_epochs = self.args.max_steps // num_update_steps_per_epoch + int(
self.args.max_steps % num_update_steps_per_epoch > 0
)
else:
max_steps = math.ceil(self.args.num_train_epochs * num_update_steps_per_epoch)
num_train_epochs = math.ceil(self.args.num_train_epochs)
else:
# see __init__. max_steps is set when the dataset has no __len__
max_steps = self.args.max_steps
num_train_epochs = 1
num_update_steps_per_epoch = max_steps
delay_optimizer_creation = self.sharded_ddp is not None and self.sharded_ddp != ShardedDDPOption.SIMPLE
if self.args.deepspeed:
model, optimizer, lr_scheduler = init_deepspeed(self, num_training_steps=max_steps)
self.model = model.module
self.model_wrapped = model # will get further wrapped in DDP
self.deepspeed = model # DeepSpeedEngine object
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
elif not delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
self.state = TrainerState()
self.state.is_hyper_param_search = trial is not None
model = self._wrap_model(self.model_wrapped)
# for the rest of this function `model` is the outside model, whether it was wrapped or not
if model is not self.model:
self.model_wrapped = model
if delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
# Check if saved optimizer or scheduler states exist
self._load_optimizer_and_scheduler(resume_from_checkpoint)
# important: at this point:
# self.model is the Transformers Model
# self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc.
# Train!
if is_torch_tpu_available():
world_size = xm.xrt_world_size()
elif self.args.local_rank != -1:
world_size = dist.get_world_size()
else:
world_size = 1
total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps * world_size
num_examples = (
self.num_examples(train_dataloader)
if train_dataset_is_sized
else total_train_batch_size * self.args.max_steps
)
logger.info("***** Running training *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {self.args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
logger.info(f" Gradient Accumulation steps = {self.args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {max_steps}")
self.state.epoch = 0
start_time = time.time()
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if resume_from_checkpoint is not None and os.path.isfile(
os.path.join(resume_from_checkpoint, "trainer_state.json")
):
self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, "trainer_state.json"))
epochs_trained = self.state.global_step // num_update_steps_per_epoch
if not self.args.ignore_data_skip:
steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)
steps_trained_in_current_epoch *= self.args.gradient_accumulation_steps
else:
steps_trained_in_current_epoch = 0
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {self.state.global_step}")
if not self.args.ignore_data_skip:
logger.info(
f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} "
"batches in the first epoch."
)
# Update the references
self.callback_handler.model = self.model
self.callback_handler.optimizer = self.optimizer
self.callback_handler.lr_scheduler = self.lr_scheduler
self.callback_handler.train_dataloader = train_dataloader
self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None
self.state.trial_params = hp_params(trial) if trial is not None else None
# This should be the same if the state has been saved but in case the training arguments changed, it's safer
# to set this after the load.
self.state.max_steps = max_steps
self.state.num_train_epochs = num_train_epochs
self.state.is_local_process_zero = self.is_local_process_zero()
self.state.is_world_process_zero = self.is_world_process_zero()
# tr_loss is a tensor to avoid synchronization of TPUs through .item()
tr_loss = torch.tensor(0.0).to(self.args.device)
# _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses
self._total_loss_scalar = 0.0
self._globalstep_last_logged = self.state.global_step
self._total_flos = self.state.total_flos
model.zero_grad()
self.control = self.callback_handler.on_train_begin(self.args, self.state, self.control)
# Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.
if not self.args.ignore_data_skip:
for epoch in range(epochs_trained):
# We just need to begin an iteration to create the randomization of the sampler.
for _ in train_dataloader:
break
for epoch in range(epochs_trained, num_train_epochs):
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
if is_torch_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.device]).per_device_loader(
self.args.device
)
epoch_iterator = parallel_loader
else:
epoch_iterator = train_dataloader
# Reset the past mems state at the beginning of each epoch if necessary.
if self.args.past_index >= 0:
self._past = None
steps_in_epoch = (
len(epoch_iterator)
if train_dataset_is_sized
else self.args.max_steps * self.args.gradient_accumulation_steps
)
self.control = self.callback_handler.on_epoch_begin(self.args, self.state, self.control)
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
if (step + 1) % self.args.gradient_accumulation_steps == 0:
self.control = self.callback_handler.on_step_begin(self.args, self.state, self.control)
if (
((step + 1) % self.args.gradient_accumulation_steps != 0)
and self.args.local_rank != -1
and self.args._no_sync_in_gradient_accumulation
):
# Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
with model.no_sync():
tr_loss += self.training_step(model, inputs)
else:
tr_loss += self.training_step(model, inputs)
self._total_flos += float(self.floating_point_ops(inputs))
# Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps
if self.deepspeed:
self.deepspeed.step()
if (step + 1) % self.args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
steps_in_epoch <= self.args.gradient_accumulation_steps
and (step + 1) == steps_in_epoch
):
# Gradient clipping
if self.args.max_grad_norm is not None and self.args.max_grad_norm > 0 and not self.deepspeed:
# deepspeed does its own clipping
if self.use_amp:
# AMP: gradients need unscaling
self.scaler.unscale_(self.optimizer)
if hasattr(self.optimizer, "clip_grad_norm"):
# Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping
self.optimizer.clip_grad_norm(self.args.max_grad_norm)
elif hasattr(model, "clip_grad_norm_"):
# Some models (like FullyShardedDDP) have a specific way to do gradient clipping
model.clip_grad_norm_(self.args.max_grad_norm)
else:
# Revert to normal clipping otherwise, handling Apex or full precision
torch.nn.utils.clip_grad_norm_(
amp.master_params(self.optimizer) if self.use_apex else model.parameters(),
self.args.max_grad_norm,
error_if_nonfinite=False
)
# Optimizer step
if self.deepspeed:
pass # called outside the loop
elif is_torch_tpu_available():
xm.optimizer_step(self.optimizer)
elif self.use_amp:
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.optimizer.step()
if not self.deepspeed:
self.lr_scheduler.step()
model.zero_grad()
self.state.global_step += 1
self.state.epoch = epoch + (step + 1) / steps_in_epoch
self.control = self.callback_handler.on_step_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.control.should_epoch_stop or self.control.should_training_stop:
break
self.control = self.callback_handler.on_epoch_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.args.tpu_metrics_debug or self.args.debug:
if is_torch_tpu_available():
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
else:
logger.warning(
"You enabled PyTorch/XLA debug metrics but you don't have a TPU "
"configured. Check your training configuration if this is unexpected."
)
if self.control.should_training_stop:
break
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
if self.args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
# Wait for everyone to get here so we are sur the model has been saved by process 0.
if is_torch_tpu_available():
xm.rendezvous("load_best_model_at_end")
elif self.args.local_rank != -1:
dist.barrier()
logger.info(
f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
)
if isinstance(self.model, PreTrainedModel):
self.model = self.model.from_pretrained(self.state.best_model_checkpoint)
if self.place_model_on_device:
self.model = self.model.to(self.args.device)
else:
state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME))
self.model.load_state_dict(state_dict)
if self.deepspeed:
self.deepspeed.load_checkpoint(
self.state.best_model_checkpoint, load_optimizer_states=False, load_lr_scheduler_states=False
)
metrics = speed_metrics("train", start_time, self.state.max_steps)
if self._total_flos is not None:
self.store_flos()
metrics["total_flos"] = self.state.total_flos
self.log(metrics)
self.control = self.callback_handler.on_train_end(self.args, self.state, self.control)
# add remaining tr_loss
self._total_loss_scalar += tr_loss.item()
if self.deepspeed:
# free up any memory that might be useful for eval
self.deepspeed = None
self.optimizer = None
self.lr_scheduler = None
self.model_wrapped = self.model
gc.collect() # force memory release
# to restore normal behavior outside of train replay the place_model_on_device logic w/o deepspeed
self.place_model_on_device = self.args.place_model_on_device
if self.is_model_parallel:
self.place_model_on_device = False
self.is_in_train = False
self._memory_tracker.stop_and_update_metrics(metrics)
return TrainOutput(self.state.global_step, self._total_loss_scalar / self.state.global_step, metrics)
def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch):
if self.control.should_log:
logs: Dict[str, float] = {}
tr_loss_scalar = tr_loss.item()
# reset tr_loss to zero
tr_loss -= tr_loss
logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
logs["learning_rate"] = self._get_learning_rate()
self._total_loss_scalar += tr_loss_scalar
self._globalstep_last_logged = self.state.global_step
self.log(logs)
metrics = None
if self.control.should_evaluate:
metrics = self.evaluate()
self._report_to_hp_search(trial, epoch, metrics)
if self.control.should_save:
self._save_checkpoint(model, trial, metrics=metrics)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
def _save_checkpoint(self, model, trial, metrics=None):
# In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we
# want to save except FullyShardedDDP.
# assert unwrap_model(model) is self.model, "internal model should be a reference to self.model"
# Save model checkpoint
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
if self.hp_search_backend is not None and trial is not None:
if self.hp_search_backend == HPSearchBackend.OPTUNA:
run_id = trial.number
else:
from ray import tune
run_id = tune.get_trial_id()
run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}"
run_dir = os.path.join(self.args.output_dir, run_name)
else:
run_dir = self.args.output_dir
self.store_flos()
output_dir = os.path.join(run_dir, checkpoint_folder)
self.save_model(output_dir)
if self.deepspeed:
self.deepspeed.save_checkpoint(output_dir)
# Save optimizer and scheduler
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer.consolidate_state_dict()
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
elif self.is_world_process_zero() and not self.deepspeed:
# deepspeed.save_checkpoint above saves model/optim/sched
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
# Determine the new best metric / best model checkpoint
if metrics is not None and self.args.metric_for_best_model is not None:
metric_to_check = self.args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics[metric_to_check]
operator = np.greater if self.args.greater_is_better else np.less
if (
self.state.best_metric is None
or self.state.best_model_checkpoint is None
or operator(metric_value, self.state.best_metric)
):
self.state.best_metric = metric_value
self.state.best_model_checkpoint = output_dir
# Save the Trainer state
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
# Maybe delete some older checkpoints.
if self.is_world_process_zero():
self._rotate_checkpoints(use_mtime=True, output_dir=run_dir)
def _load_optimizer_and_scheduler(self, checkpoint):
"""If optimizer and scheduler states exist, load them."""
if checkpoint is None:
return
if os.path.isfile(os.path.join(checkpoint, "optimizer.pt")) and os.path.isfile(
os.path.join(checkpoint, "scheduler.pt")
):
# Load in optimizer and scheduler states
if is_torch_tpu_available():
# On TPU we have to take some extra precautions to properly load the states on the right device.
optimizer_state = torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location="cpu")
with warnings.catch_warnings(record=True) as caught_warnings:
lr_scheduler_state = torch.load(os.path.join(checkpoint, "scheduler.pt"), map_location="cpu")
reissue_pt_warnings(caught_warnings)
xm.send_cpu_data_to_device(optimizer_state, self.args.device)
xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)
self.optimizer.load_state_dict(optimizer_state)
self.lr_scheduler.load_state_dict(lr_scheduler_state)
else:
self.optimizer.load_state_dict(
torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location=self.args.device)
)
with warnings.catch_warnings(record=True) as caught_warnings:
self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, "scheduler.pt")))
reissue_pt_warnings(caught_warnings)
if self.deepspeed:
# Not sure how to check if there is a saved deepspeed checkpoint, but since it just return None if it fails to find a deepspeed checkpoint this is sort of a check-n-load function
self.deepspeed.load_checkpoint(checkpoint, load_optimizer_states=True, load_lr_scheduler_states=True)
def hyperparameter_search(
self,
hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None,
compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,
n_trials: int = 20,
direction: str = "minimize",
backend: Optional[Union["str", HPSearchBackend]] = None,
hp_name: Optional[Callable[["optuna.Trial"], str]] = None,
**kwargs,
) -> BestRun:
"""
Launch an hyperparameter search using ``optuna`` or ``Ray Tune``. The optimized quantity is determined by
:obj:`compute_objective`, which defaults to a function returning the evaluation loss when no metric is
provided, the sum of all metrics otherwise.
.. warning::
To use this method, you need to have provided a ``model_init`` when initializing your
:class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible
with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the
method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler.
Args:
hp_space (:obj:`Callable[["optuna.Trial"], Dict[str, float]]`, `optional`):
A function that defines the hyperparameter search space. Will default to
:func:`~transformers.trainer_utils.default_hp_space_optuna` or
:func:`~transformers.trainer_utils.default_hp_space_ray` depending on your backend.
compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`):
A function computing the objective to minimize or maximize from the metrics returned by the
:obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`.
n_trials (:obj:`int`, `optional`, defaults to 100):
The number of trial runs to test.
direction(:obj:`str`, `optional`, defaults to :obj:`"minimize"`):
Whether to optimize greater or lower objects. Can be :obj:`"minimize"` or :obj:`"maximize"`, you should
pick :obj:`"minimize"` when optimizing the validation loss, :obj:`"maximize"` when optimizing one or
several metrics.
backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`):
The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending on which
one is installed. If both are installed, will default to optuna.
kwargs:
Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For
more information see:
- the documentation of `optuna.create_study
<https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html>`__
- the documentation of `tune.run
<https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run>`__
Returns:
:class:`transformers.trainer_utils.BestRun`: All the information about the best run.
"""
if backend is None:
backend = default_hp_search_backend()
if backend is None:
raise RuntimeError(
"At least one of optuna or ray should be installed. "
"To install optuna run `pip install optuna`."
"To install ray run `pip install ray[tune]`."
)
backend = HPSearchBackend(backend)
if backend == HPSearchBackend.OPTUNA and not is_optuna_available():
raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.")
if backend == HPSearchBackend.RAY and not is_ray_tune_available():
raise RuntimeError(
"You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`."
)
self.hp_search_backend = backend
if self.model_init is None:
raise RuntimeError(
"To use hyperparameter search, you need to pass your model through a model_init function."
)
self.hp_space = default_hp_space[backend] if hp_space is None else hp_space
self.hp_name = hp_name
self.compute_objective = default_compute_objective if compute_objective is None else compute_objective
run_hp_search = run_hp_search_optuna if backend == HPSearchBackend.OPTUNA else run_hp_search_ray
best_run = run_hp_search(self, n_trials, direction, **kwargs)
self.hp_search_backend = None
return best_run
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if self.state.epoch is not None:
logs["epoch"] = round(self.state.epoch, 2)
output = {**logs, **{"step": self.state.global_step}}
self.state.log_history.append(output)
self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)
def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:
"""
Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and
handling potential state.
"""
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.to(self.args.device)
if self.args.past_index >= 0 and self._past is not None:
inputs["mems"] = self._past
return inputs
def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
"""
Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to train.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
Return:
:obj:`torch.Tensor`: The tensor with training loss on this batch.
"""
model.train()
inputs = self._prepare_inputs(inputs)
if self.use_amp:
with autocast():
loss = self.compute_loss(model, inputs)
else:
loss = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.args.gradient_accumulation_steps > 1 and not self.deepspeed:
# deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`
loss = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(loss).backward()
elif self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
# loss gets scaled under gradient_accumulation_steps in deepspeed
loss = self.deepspeed.backward(loss)
else:
loss.backward()
return loss.detach()
def compute_loss(self, model, inputs, return_outputs=False):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Subclass and override for custom behavior.
"""
if self.label_smoother is not None and "labels" in inputs:
labels = inputs.pop("labels")
else:
labels = None
outputs = model(**inputs)
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if labels is not None:
loss = self.label_smoother(outputs, labels)
else:
# We don't use .loss here since the model may return tuples instead of ModelOutput.
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
return (loss, outputs) if return_outputs else loss
def is_local_process_zero(self) -> bool:
"""
Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several
machines) main process.
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=True)
else:
return self.args.local_rank in [-1, 0]
def is_world_process_zero(self) -> bool:
"""
Whether or not this process is the global main process (when training in a distributed fashion on several
machines, this is only going to be :obj:`True` for one process).
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=False)
else:
return self.args.local_rank == -1 or dist.get_rank() == 0
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
Will only save from the main process.
"""
if is_torch_tpu_available():
self._save_tpu(output_dir)
elif (
ShardedDDPOption.ZERO_DP_2 in self.args.sharded_ddp or ShardedDDPOption.ZERO_DP_3 in self.args.sharded_ddp
):
state_dict = self.model.state_dict()
if self.is_world_process_zero():
self._save(output_dir, state_dict=state_dict)
elif self.is_world_process_zero():
self._save(output_dir)
def save_partial_model(self, output_dir: Optional[str] = None, keys = None, weights_name="expert_soup"):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
Will only save from the main process.
"""
if is_torch_tpu_available():
self._save_tpu(output_dir)
elif (
ShardedDDPOption.ZERO_DP_2 in self.args.sharded_ddp or ShardedDDPOption.ZERO_DP_3 in self.args.sharded_ddp
):
if self.is_world_process_zero():
state_dict = self.model.state_dict()
if keys is not None:
to_return = {}
for key in keys:
for k in state_dict:
if key in k:
to_return[k] = state_dict[k]
state_dict = to_return
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(state_dict, os.path.join(output_dir,"pytorch_model_"+weights_name+".bin"))
elif self.is_world_process_zero():
state_dict = self.model.state_dict()
if keys is not None:
to_return = {}
for key in keys:
for k in state_dict:
if key in k:
to_return[k] = state_dict[k]
state_dict = to_return
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(state_dict, os.path.join(output_dir,"pytorch_model_"+weights_name+".bin"))
def _save_tpu(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info("Saving model checkpoint to %s", output_dir)
if xm.is_master_ordinal():
os.makedirs(output_dir, exist_ok=True)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
xm.rendezvous("saving_checkpoint")
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
unwrap_model(self.model).save_pretrained(
output_dir,
save_config=self.is_world_process_zero(),
state_dict=self.model.state_dict(),
save_function=xm.save,
)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
state_dict = self.model.state_dict()
xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir, save_config=self.is_world_process_zero(), save_function=xm.save)
if self.tokenizer is not None and self.is_world_process_zero():
self.tokenizer.save_pretrained(output_dir)
def _save(self, output_dir: Optional[str] = None, state_dict=None):
# If we are executing this function, we are the process zero, so we don't check for that.
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info("Saving model checkpoint to %s", output_dir)
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
if state_dict is None:
state_dict = self.model.state_dict()
unwrap_model(self.model).save_pretrained(output_dir, state_dict=state_dict)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
if state_dict is None:
state_dict = self.model.state_dict()
torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir, state_dict=state_dict)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
def store_flos(self):
# Storing the number of floating-point operations that went into the model
if self._total_flos is not None:
if self.args.local_rank != -1:
self.state.total_flos = distributed_broadcast_scalars([self._total_flos]).sum().item()
else:
self.state.total_flos = self._total_flos
def _sorted_checkpoints(
self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False
) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*")]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
# Make sure we don't delete the best model.
if self.state.best_model_checkpoint is not None:
best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))
checkpoints_sorted[best_model_index], checkpoints_sorted[-1] = (
checkpoints_sorted[-1],
checkpoints_sorted[best_model_index],
)
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None:
if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir)
if len(checkpoints_sorted) <= self.args.save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
(pass it to the init :obj:`compute_metrics` argument).
You can also subclass and override this method to inject custom behavior.
Args:
eval_dataset (:obj:`Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,
columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the
:obj:`__len__` method.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
dictionary also contains the epoch number which comes from the training state.
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
eval_dataloader = self.get_eval_dataloader(eval_dataset)
start_time = time.time()
output = self.prediction_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if self.compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
n_samples = len(eval_dataset if eval_dataset is not None else self.eval_dataset)
output.metrics.update(speed_metrics(metric_key_prefix, start_time, n_samples))
self.log(output.metrics)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return output.metrics
def predict(
self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval"
) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:obj:`Dataset`):
Dataset to run the predictions on. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. Has to implement the method :obj:`__len__`
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
.. note::
If your predictions or labels have different sequence length (for instance because you're doing dynamic
padding in a token classification task) the predictions will be padded (on the right) to allow for
concatenation into one array. The padding index is -100.
Returns: `NamedTuple` A namedtuple with the following keys:
- predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`.
- label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some).
- metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset
contained labels).
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
if test_dataset is not None and not isinstance(test_dataset, collections.abc.Sized):
raise ValueError("test_dataset must implement __len__")
test_dataloader = self.get_test_dataloader(test_dataset)
start_time = time.time()
output = self.prediction_loop(
test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix
)
output.metrics.update(speed_metrics(metric_key_prefix, start_time, len(test_dataset)))
self._memory_tracker.stop_and_update_metrics(output.metrics)
return output
def prediction_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Works both with or without labels.
"""
if not isinstance(dataloader.dataset, collections.abc.Sized):
raise ValueError("dataset must implement __len__")
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
if self.args.deepspeed and not self.args.do_train:
# no harm, but flagging to the user that deepspeed config is ignored for eval
# flagging only for when --do_train wasn't passed as only then it's redundant
logger.info("Detected the deepspeed argument but it will not be used for evaluation")
model = self._wrap_model(self.model, training=False)
# if full fp16 is wanted on eval and this ``evaluation`` or ``predict`` isn't called while
# ``train`` is running, half it first and then put on device
if not self.is_in_train and self.args.fp16_full_eval:
model = model.half().to(self.args.device)
batch_size = dataloader.batch_size
num_examples = self.num_examples(dataloader)
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", num_examples)
logger.info(" Batch size = %d", batch_size)
losses_host: torch.Tensor = None
preds_host: Union[torch.Tensor, List[torch.Tensor]] = None
labels_host: Union[torch.Tensor, List[torch.Tensor]] = None
world_size = max(1, self.args.world_size)
eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)
if not prediction_loss_only:
# The actual number of eval_sample can be greater than num_examples in distributed settings (when we pass
# a batch size to the sampler)
make_multiple_of = None
if hasattr(dataloader, "sampler") and isinstance(dataloader.sampler, SequentialDistributedSampler):
make_multiple_of = dataloader.sampler.batch_size
preds_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
labels_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
model.eval()
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
if self.args.past_index >= 0:
self._past = None
self.callback_handler.eval_dataloader = dataloader
for step, inputs in enumerate(dataloader):
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
if loss is not None:
losses = loss.repeat(batch_size)
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host = None, None, None
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
eval_loss = eval_losses_gatherer.finalize()
preds = preds_gatherer.finalize() if not prediction_loss_only else None
label_ids = labels_gatherer.finalize() if not prediction_loss_only else None
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
# To be JSON-serializable, we need to remove numpy types or zero-d tensors
metrics = denumpify_detensorize(metrics)
if eval_loss is not None:
metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def _gather_and_numpify(self, tensors, name):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_tpu_available():
tensors = nested_xla_mesh_reduce(tensors, name)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return nested_numpify(tensors)
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on :obj:`model` using obj:`inputs`.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to evaluate.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (:obj:`bool`):
Whether or not to return the loss only.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
Return:
Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and
labels (each being optional).
"""
has_labels = all(inputs.get(k) is not None for k in self.label_names)
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
# labels may be popped when computing the loss (label smoothing for instance) so we grab them first.
if has_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
with torch.no_grad():
if has_labels:
loss, outputs = self.compute_loss(model, inputs, return_outputs=True)
loss = loss.mean().detach()
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
else:
logits = outputs[1:]
else:
loss = None
if self.use_amp:
with autocast():
outputs = model(**inputs)
else:
outputs = model(**inputs)
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
else:
logits = outputs
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index - 1]
if prediction_loss_only:
return (loss, None, None)
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
return (loss, logits, labels)
def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):
"""
For models that inherit from :class:`~transformers.PreTrainedModel`, uses that method to compute the number of
floating point operations for every backward + forward pass. If using another model, either implement such a
method in the model or subclass and override this method.
Args:
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
Returns:
:obj:`int`: The number of floating-point operations.
"""
if hasattr(self.model, "floating_point_ops"):
return self.model.floating_point_ops(inputs)
else:
return 0
def _load_state_dict_in_model(self, state_dict):
load_result = self.model.load_state_dict(state_dict, strict=False)
if len(load_result.missing_keys) != 0:
if set(load_result.missing_keys) == set(self.model._keys_to_ignore_on_save):
self.model.tie_weights()
else:
logger.warn(f"There were missing keys in the checkpoint model loaded: {load_result.missing_keys}.")
if len(load_result.unexpected_keys) != 0:
logger.warn(f"There were unexpected keys in the checkpoint model loaded: {load_result.unexpected_keys}.")
|
AdaMix/src/transformers/trainer.py/0
|
{
"file_path": "AdaMix/src/transformers/trainer.py",
"repo_id": "AdaMix",
"token_count": 42179
}
| 66 |
{
"modelname": "Template",
"uppercase_modelname": "TEMPLATE",
"lowercase_modelname": "template",
"camelcase_modelname": "Template",
"authors": "The HuggingFace Team",
"checkpoint_identifier": "brand-new-bert-base-cased",
"tokenizer_type": "Based on BERT",
"generate_tensorflow_and_pytorch": "PyTorch & TensorFlow",
"is_encoder_decoder_model": "False"
}
|
AdaMix/templates/adding_a_new_model/tests/encoder-bert-tokenizer.json/0
|
{
"file_path": "AdaMix/templates/adding_a_new_model/tests/encoder-bert-tokenizer.json",
"repo_id": "AdaMix",
"token_count": 142
}
| 67 |
# coding=utf-8
# Copyright 2020 The HuggingFace Team Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
if is_torch_available():
import torch
from transformers import BartForConditionalGeneration, BartTokenizer, top_k_top_p_filtering
from transformers.generation_beam_search import BeamSearchScorer
from transformers.generation_logits_process import (
ForcedBOSTokenLogitsProcessor,
ForcedEOSTokenLogitsProcessor,
HammingDiversityLogitsProcessor,
LogitsProcessorList,
MinLengthLogitsProcessor,
NoBadWordsLogitsProcessor,
NoRepeatNGramLogitsProcessor,
RepetitionPenaltyLogitsProcessor,
TemperatureLogitsWarper,
TopKLogitsWarper,
TopPLogitsWarper,
)
from transformers.generation_stopping_criteria import MaxLengthCriteria, StoppingCriteriaList
from transformers.generation_utils import (
BeamSampleDecoderOnlyOutput,
BeamSampleEncoderDecoderOutput,
BeamSearchDecoderOnlyOutput,
BeamSearchEncoderDecoderOutput,
GreedySearchDecoderOnlyOutput,
GreedySearchEncoderDecoderOutput,
SampleDecoderOnlyOutput,
SampleEncoderDecoderOutput,
)
class GenerationTesterMixin:
model_tester = None
all_generative_model_classes = ()
input_name = "input_ids"
def _get_input_ids_and_config(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
input_ids = inputs_dict[self.input_name]
attention_mask = torch.ones_like(input_ids, dtype=torch.long)
# cut to half length & take max batch_size 3
max_batch_size = 2
sequence_length = input_ids.shape[-1] // 2
input_ids = input_ids[:max_batch_size, :sequence_length]
attention_mask = attention_mask[:max_batch_size, :sequence_length]
# generate max 3 tokens
max_length = input_ids.shape[-1] + 3
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
config.pad_token_id = config.eos_token_id
return config, input_ids, attention_mask, max_length
@staticmethod
def _get_logits_processor_and_kwargs(
input_length,
eos_token_id,
forced_bos_token_id=None,
forced_eos_token_id=None,
max_length=None,
diversity_penalty=None,
):
process_kwargs = {
"min_length": input_length + 1,
"bad_words_ids": [[1, 0]],
"no_repeat_ngram_size": 2,
"repetition_penalty": 1.2,
}
logits_processor = LogitsProcessorList(
(
[
HammingDiversityLogitsProcessor(diversity_penalty, num_beams=2, num_beam_groups=2),
]
if diversity_penalty is not None
else []
)
+ (
[
MinLengthLogitsProcessor(process_kwargs["min_length"], eos_token_id),
]
if eos_token_id is not None
else []
)
+ (
[
ForcedBOSTokenLogitsProcessor(forced_bos_token_id),
]
if forced_bos_token_id is not None
else []
)
+ (
[ForcedEOSTokenLogitsProcessor(max_length, forced_eos_token_id)]
if forced_eos_token_id is not None
else []
)
+ [
NoBadWordsLogitsProcessor(process_kwargs["bad_words_ids"], eos_token_id),
NoRepeatNGramLogitsProcessor(process_kwargs["no_repeat_ngram_size"]),
RepetitionPenaltyLogitsProcessor(process_kwargs["repetition_penalty"]),
]
)
return process_kwargs, logits_processor
@staticmethod
def _get_warper_and_kwargs(num_beams):
warp_kwargs = {"top_k": 10, "top_p": 0.7, "temperature": 0.7}
logits_warper = LogitsProcessorList(
[
TemperatureLogitsWarper(warp_kwargs["temperature"]),
TopKLogitsWarper(top_k=warp_kwargs["top_k"], min_tokens_to_keep=(2 if num_beams > 1 else 1)),
TopPLogitsWarper(top_p=warp_kwargs["top_p"], min_tokens_to_keep=(2 if num_beams > 1 else 1)),
]
)
return warp_kwargs, logits_warper
@staticmethod
def _get_beam_scorer_and_kwargs(batch_size, max_length, num_return_sequences=1):
beam_kwargs = {
"early_stopping": False,
"length_penalty": 2.0,
"num_beams": 2,
"num_return_sequences": num_return_sequences,
}
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
max_length=max_length,
num_beams=beam_kwargs["num_beams"],
device=torch_device,
length_penalty=beam_kwargs["length_penalty"],
do_early_stopping=beam_kwargs["early_stopping"],
num_beam_hyps_to_keep=num_return_sequences,
)
return beam_kwargs, beam_scorer
@staticmethod
def _get_diverse_beam_scorer_and_kwargs(batch_size, max_length, num_return_sequences=1):
beam_kwargs = {
"early_stopping": False,
"length_penalty": 2.0,
"num_beams": 2,
"num_return_sequences": num_return_sequences,
"num_beam_groups": 2, # one beam per group
"diversity_penalty": 2.0,
}
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
max_length=max_length,
num_beams=beam_kwargs["num_beams"],
device=torch_device,
length_penalty=beam_kwargs["length_penalty"],
do_early_stopping=beam_kwargs["early_stopping"],
num_beam_hyps_to_keep=num_return_sequences,
num_beam_groups=beam_kwargs["num_beam_groups"],
)
return beam_kwargs, beam_scorer
@staticmethod
def _get_encoder_outputs(
model, input_ids, attention_mask, output_attentions=None, output_hidden_states=None, num_interleave=1
):
encoder = model.get_encoder()
encoder_outputs = encoder(
input_ids,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave(
num_interleave, dim=0
)
input_ids = torch.zeros_like(input_ids[:, :1]) + model._get_decoder_start_token_id()
attention_mask = None
return encoder_outputs, input_ids, attention_mask
def _greedy_generate(
self,
model,
input_ids,
attention_mask,
max_length,
output_scores=False,
output_attentions=False,
output_hidden_states=False,
return_dict_in_generate=False,
):
if model.config.is_encoder_decoder:
max_length = 4
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
eos_token_id=model.config.eos_token_id,
forced_bos_token_id=model.config.forced_bos_token_id,
forced_eos_token_id=model.config.forced_eos_token_id,
max_length=max_length,
)
kwargs = {}
output_generate = model.generate(
input_ids,
attention_mask=attention_mask,
do_sample=False,
num_beams=1,
max_length=max_length,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
output_scores=output_scores,
return_dict_in_generate=return_dict_in_generate,
**logits_process_kwargs,
)
if model.config.is_encoder_decoder:
encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs(
model,
input_ids,
attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
kwargs["encoder_outputs"] = encoder_outputs
with torch.no_grad():
output_greedy = model.greedy_search(
input_ids,
max_length=max_length,
attention_mask=attention_mask,
logits_processor=logits_processor,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
output_scores=output_scores,
return_dict_in_generate=return_dict_in_generate,
**kwargs,
)
return output_greedy, output_generate
def _sample_generate(
self,
model,
input_ids,
attention_mask,
max_length,
num_return_sequences,
logits_processor,
logits_warper,
logits_warper_kwargs,
process_kwargs,
output_scores=False,
output_attentions=False,
output_hidden_states=False,
return_dict_in_generate=False,
):
torch.manual_seed(0)
output_generate = model.generate(
input_ids,
do_sample=True,
num_beams=1,
max_length=max_length,
num_return_sequences=num_return_sequences,
attention_mask=attention_mask,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
**logits_warper_kwargs,
**process_kwargs,
)
torch.manual_seed(0)
kwargs = {}
if model.config.is_encoder_decoder:
encoder_outputs, input_ids_clone, attention_mask_clone = self._get_encoder_outputs(
model,
input_ids,
attention_mask,
num_interleave=num_return_sequences,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
kwargs["encoder_outputs"] = encoder_outputs
input_ids_clone = input_ids_clone.repeat_interleave(num_return_sequences, dim=0)
else:
attention_mask_clone = attention_mask.repeat_interleave(num_return_sequences, dim=0)
input_ids_clone = input_ids.repeat_interleave(num_return_sequences, dim=0)
with torch.no_grad():
output_sample = model.sample(
input_ids_clone,
attention_mask=attention_mask_clone,
max_length=max_length,
logits_processor=logits_processor,
logits_warper=logits_warper,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
**kwargs,
)
return output_sample, output_generate
def _beam_search_generate(
self,
model,
input_ids,
attention_mask,
max_length,
beam_scorer,
beam_kwargs,
logits_processor,
logits_process_kwargs,
output_scores=False,
output_attentions=False,
output_hidden_states=False,
return_dict_in_generate=False,
):
output_generate = model.generate(
input_ids,
attention_mask=attention_mask,
do_sample=False,
max_length=max_length,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
**beam_kwargs,
**logits_process_kwargs,
)
# beam_search does not automatically interleave `batch_size` dim for `num_beams`
kwargs = {}
if model.config.is_encoder_decoder:
encoder_outputs, input_ids_clone, attention_mask_clone = self._get_encoder_outputs(
model,
input_ids,
attention_mask,
num_interleave=beam_scorer.num_beams,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
kwargs["encoder_outputs"] = encoder_outputs
input_ids_clone = input_ids_clone.repeat_interleave(beam_scorer.num_beams, dim=0)
else:
attention_mask_clone = attention_mask.repeat_interleave(beam_scorer.num_beams, dim=0)
input_ids_clone = input_ids.repeat_interleave(beam_scorer.num_beams, dim=0)
with torch.no_grad():
output_beam_search = model.beam_search(
input_ids_clone,
beam_scorer,
max_length=max_length,
attention_mask=attention_mask_clone,
logits_processor=logits_processor,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
**kwargs,
)
return output_generate, output_beam_search
def _beam_sample_generate(
self,
model,
input_ids,
attention_mask,
max_length,
num_return_sequences,
beam_scorer,
beam_kwargs,
logits_warper,
logits_warper_kwargs,
output_scores=False,
output_attentions=False,
output_hidden_states=False,
return_dict_in_generate=False,
):
torch.manual_seed(0)
output_generate = model.generate(
input_ids,
attention_mask=attention_mask,
do_sample=True,
max_length=max_length,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
**beam_kwargs,
**logits_warper_kwargs,
)
# beam_search does not automatically interleave `batch_size` dim for `num_beams * num_return_sequences`
kwargs = {}
if model.config.is_encoder_decoder:
encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs(
model,
input_ids,
attention_mask,
num_interleave=beam_scorer.num_beams * num_return_sequences,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
kwargs["encoder_outputs"] = encoder_outputs
else:
attention_mask = attention_mask.repeat_interleave(beam_scorer.num_beams * num_return_sequences, dim=0)
torch.manual_seed(0)
with torch.no_grad():
output_beam_sample = model.beam_sample(
input_ids.repeat_interleave(beam_scorer.num_beams * num_return_sequences, dim=0),
beam_scorer,
max_length=max_length,
attention_mask=attention_mask,
logits_warper=logits_warper,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
**kwargs,
)
return output_generate, output_beam_sample
def _group_beam_search_generate(
self,
model,
input_ids,
attention_mask,
max_length,
beam_scorer,
beam_kwargs,
logits_processor,
logits_process_kwargs,
output_scores=False,
output_attentions=False,
output_hidden_states=False,
return_dict_in_generate=False,
):
output_generate = model.generate(
input_ids,
attention_mask=attention_mask,
do_sample=False,
max_length=max_length,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
**beam_kwargs,
**logits_process_kwargs,
)
# group_beam_search does not automatically interleave `batch_size` dim for `num_beams`
kwargs = {}
if model.config.is_encoder_decoder:
encoder_outputs, input_ids_clone, attention_mask_clone = self._get_encoder_outputs(
model,
input_ids,
attention_mask,
num_interleave=beam_scorer.num_beams,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
kwargs["encoder_outputs"] = encoder_outputs
input_ids_clone = input_ids_clone.repeat_interleave(beam_scorer.num_beams, dim=0)
else:
attention_mask_clone = attention_mask.repeat_interleave(beam_scorer.num_beams, dim=0)
input_ids_clone = input_ids.repeat_interleave(beam_scorer.num_beams, dim=0)
with torch.no_grad():
output_group_beam_search = model.group_beam_search(
input_ids_clone,
beam_scorer,
max_length=max_length,
attention_mask=attention_mask_clone,
logits_processor=logits_processor,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
**kwargs,
)
return output_generate, output_group_beam_search
def test_greedy_generate(self):
# check `generate()` and `greedy_search()` are equal
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# test old generation output for backwards compatibility
model = model_class(config).to(torch_device).eval()
output_greedy, output_generate = self._greedy_generate(
model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length
)
self.assertListEqual(output_greedy.tolist(), output_generate.tolist())
def test_greedy_generate_dict_outputs(self):
for model_class in self.all_generative_model_classes:
# disable cache
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
config.use_cache = False
model = model_class(config).to(torch_device).eval()
output_greedy, output_generate = self._greedy_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
if model.config.is_encoder_decoder:
self.assertIsInstance(output_greedy, GreedySearchEncoderDecoderOutput)
self.assertIsInstance(output_generate, GreedySearchEncoderDecoderOutput)
else:
self.assertIsInstance(output_greedy, GreedySearchDecoderOnlyOutput)
self.assertIsInstance(output_generate, GreedySearchDecoderOnlyOutput)
self.assertListEqual(output_generate.sequences.tolist(), output_greedy.sequences.tolist())
for output in (output_greedy, output_generate):
self._check_outputs(output, input_ids, model.config)
def test_greedy_generate_dict_outputs_use_cache(self):
for model_class in self.all_generative_model_classes:
# enable cache
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
if not hasattr(config, "use_cache"):
# only relevant if model has "use_cache"
return
config.use_cache = True
config.is_decoder = True
model = model_class(config).to(torch_device).eval()
output_greedy, output_generate = self._greedy_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
self.assertListEqual(output_generate.sequences.tolist(), output_greedy.sequences.tolist())
for output in (output_greedy, output_generate):
self._check_outputs(output, input_ids, model.config, use_cache=True)
def test_sample_generate(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
model.config.eos_token_id,
forced_bos_token_id=model.config.forced_bos_token_id,
forced_eos_token_id=model.config.forced_eos_token_id,
max_length=max_length,
)
logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1)
# check `generate()` and `sample()` are equal
output_sample, output_generate = self._sample_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
num_return_sequences=1,
logits_processor=logits_processor,
logits_warper=logits_warper,
logits_warper_kwargs=logits_warper_kwargs,
process_kwargs=process_kwargs,
)
self.assertListEqual(output_sample.tolist(), output_generate.tolist())
# check `generate()` and `sample()` yield equal results for `num_return_sequences`
output_sample, output_generate = self._sample_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
num_return_sequences=3,
logits_processor=logits_processor,
logits_warper=logits_warper,
logits_warper_kwargs=logits_warper_kwargs,
process_kwargs=process_kwargs,
)
self.assertListEqual(output_sample.tolist(), output_generate.tolist())
def test_sample_generate_dict_output(self):
for model_class in self.all_generative_model_classes:
# disable cache
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
config.use_cache = False
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
model.config.eos_token_id,
forced_bos_token_id=model.config.forced_bos_token_id,
forced_eos_token_id=model.config.forced_eos_token_id,
max_length=max_length,
)
logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1)
output_sample, output_generate = self._sample_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
num_return_sequences=2,
logits_processor=logits_processor,
logits_warper=logits_warper,
logits_warper_kwargs=logits_warper_kwargs,
process_kwargs=process_kwargs,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
if model.config.is_encoder_decoder:
self.assertIsInstance(output_sample, SampleEncoderDecoderOutput)
self.assertIsInstance(output_generate, SampleEncoderDecoderOutput)
else:
self.assertIsInstance(output_sample, SampleDecoderOnlyOutput)
self.assertIsInstance(output_generate, SampleDecoderOnlyOutput)
self.assertListEqual(output_generate.sequences.tolist(), output_sample.sequences.tolist())
for output in (output_sample, output_generate):
self._check_outputs(output, input_ids, model.config, num_return_sequences=2)
def test_beam_search_generate(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
config.eos_token_id,
config.forced_bos_token_id,
config.forced_eos_token_id,
max_length,
)
beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length)
# check `generate()` and `beam_search()` are equal
output_generate, output_beam_search = self._beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_process_kwargs=logits_process_kwargs,
logits_processor=logits_processor,
)
self.assertListEqual(output_generate.tolist(), output_beam_search.tolist())
# check `generate()` and `beam_search()` are equal for `num_return_sequences`
num_return_sequences = 2
if model.config.is_encoder_decoder:
max_length = 4
beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(
input_ids.shape[0], max_length, num_return_sequences=num_return_sequences
)
output_generate, output_beam_search = self._beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_process_kwargs=logits_process_kwargs,
logits_processor=logits_processor,
)
self.assertListEqual(output_generate.tolist(), output_beam_search.tolist())
def test_beam_search_generate_dict_output(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# disable cache
config.use_cache = False
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
config.eos_token_id,
config.forced_bos_token_id,
config.forced_eos_token_id,
max_length,
)
beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length)
output_generate, output_beam_search = self._beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_process_kwargs=logits_process_kwargs,
logits_processor=logits_processor,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
if model.config.is_encoder_decoder:
self.assertIsInstance(output_beam_search, BeamSearchEncoderDecoderOutput)
self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput)
else:
self.assertIsInstance(output_beam_search, BeamSearchDecoderOnlyOutput)
self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput)
self.assertListEqual(output_generate.sequences.tolist(), output_beam_search.sequences.tolist())
self.assertTrue(
torch.allclose(output_generate["sequences_scores"], output_beam_search["sequences_scores"], atol=1e-3)
)
self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],))
self.assertTrue((output_generate["sequences_scores"] < 0).all().item())
for output in (output_beam_search, output_generate):
self._check_outputs(output, input_ids, model.config, num_return_sequences=beam_scorer.num_beams)
def test_beam_search_generate_dict_outputs_use_cache(self):
for model_class in self.all_generative_model_classes:
# enable cache
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
if not hasattr(config, "use_cache"):
# only relevant if model has "use_cache"
return
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
config.eos_token_id,
config.forced_bos_token_id,
config.forced_eos_token_id,
max_length,
)
beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length)
config.use_cache = True
config.is_decoder = True
model = model_class(config).to(torch_device).eval()
output_beam, output_generate = self._beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_process_kwargs=logits_process_kwargs,
logits_processor=logits_processor,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
self.assertListEqual(output_generate.sequences.tolist(), output_beam.sequences.tolist())
for output in (output_beam, output_generate):
self._check_outputs(
output, input_ids, model.config, use_cache=True, num_return_sequences=beam_scorer.num_beams
)
def test_beam_sample_generate(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1)
model = model_class(config).to(torch_device).eval()
# check `generate()` and `beam_search()` are equal
# change `num_return_sequences = 2` but not for `beam_scorer`
num_return_sequences = 2
if model.config.is_encoder_decoder:
max_length = 4
beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(
input_ids.shape[0] * num_return_sequences, max_length
)
beam_kwargs["num_return_sequences"] = num_return_sequences
output_generate, output_beam_sample = self._beam_sample_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
num_return_sequences=num_return_sequences,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_warper=logits_warper,
logits_warper_kwargs=logits_warper_kwargs,
)
self.assertListEqual(output_generate.tolist(), output_beam_sample.tolist())
def test_beam_sample_generate_dict_output(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# disable cache
config.use_cache = False
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
model = model_class(config).to(torch_device).eval()
logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1)
num_return_sequences = 2
if model.config.is_encoder_decoder:
max_length = 4
beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(
input_ids.shape[0] * num_return_sequences, max_length
)
beam_kwargs["num_return_sequences"] = num_return_sequences
output_beam_sample, output_generate = self._beam_sample_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
num_return_sequences=num_return_sequences,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_warper=logits_warper,
logits_warper_kwargs=logits_warper_kwargs,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
if model.config.is_encoder_decoder:
self.assertIsInstance(output_beam_sample, BeamSampleEncoderDecoderOutput)
self.assertIsInstance(output_generate, BeamSampleEncoderDecoderOutput)
else:
self.assertIsInstance(output_beam_sample, BeamSampleDecoderOnlyOutput)
self.assertIsInstance(output_generate, BeamSampleDecoderOnlyOutput)
self.assertListEqual(output_generate.sequences.tolist(), output_beam_sample.sequences.tolist())
self.assertTrue(
torch.allclose(output_generate["sequences_scores"], output_beam_sample["sequences_scores"], atol=1e-3)
)
self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],))
self.assertTrue((output_generate["sequences_scores"] < 0).all().item())
for output in (output_beam_sample, output_generate):
self._check_outputs(
output, input_ids, model.config, num_return_sequences=num_return_sequences * beam_scorer.num_beams
)
def test_generate_without_input_ids(self):
config, _, _, max_length = self._get_input_ids_and_config()
# if no bos token id => cannot generate from None
if config.bos_token_id is None:
return
for model_class in self.all_generative_model_classes:
model = model_class(config).to(torch_device)
model.eval()
output_ids_generate = model.generate(
do_sample=False,
max_length=max_length,
)
self.assertIsNotNone(output_ids_generate)
def test_group_beam_search_generate(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
config.eos_token_id,
config.forced_bos_token_id,
config.forced_eos_token_id,
max_length,
diversity_penalty=2.0,
)
# check `generate()` and `group_beam_search()` are equal
beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs(input_ids.shape[0], max_length)
output_generate, output_group_beam_search = self._group_beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_processor=logits_processor,
logits_process_kwargs=logits_process_kwargs,
)
self.assertListEqual(output_generate.tolist(), output_group_beam_search.tolist())
# check `generate()` and `group_beam_search()` are equal for `num_return_sequences`
num_return_sequences = 2
if model.config.is_encoder_decoder:
max_length = 4
beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs(
input_ids.shape[0], max_length, num_return_sequences=num_return_sequences
)
output_generate, output_group_beam_search = self._group_beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_processor=logits_processor,
logits_process_kwargs=logits_process_kwargs,
)
self.assertListEqual(output_generate.tolist(), output_group_beam_search.tolist())
def test_group_beam_search_generate_dict_output(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
config.use_cache = False
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
config.eos_token_id,
config.forced_bos_token_id,
config.forced_eos_token_id,
max_length,
diversity_penalty=2.0,
)
num_return_sequences = 1
beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs(
input_ids.shape[0], max_length, num_return_sequences=num_return_sequences
)
output_generate, output_group_beam_search = self._group_beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_processor=logits_processor,
logits_process_kwargs=logits_process_kwargs,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
if model.config.is_encoder_decoder:
self.assertIsInstance(output_group_beam_search, BeamSearchEncoderDecoderOutput)
self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput)
else:
self.assertIsInstance(output_group_beam_search, BeamSearchDecoderOnlyOutput)
self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput)
self.assertListEqual(output_generate.sequences.tolist(), output_group_beam_search.sequences.tolist())
self.assertTrue(
torch.allclose(
output_generate["sequences_scores"], output_group_beam_search["sequences_scores"], atol=1e-3
)
)
self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],))
self.assertTrue((output_generate["sequences_scores"] < 0).all().item())
for output in (output_group_beam_search, output_generate):
self._check_outputs(
output, input_ids, model.config, num_return_sequences=num_return_sequences * beam_scorer.num_beams
)
def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_sequences=1):
batch_size, seq_length = input_ids.shape
num_sequences_in_output = batch_size * num_return_sequences
gen_len = (
output.sequences.shape[-1] - 1 if config.is_encoder_decoder else output.sequences.shape[-1] - seq_length
)
# scores
self._check_scores(num_sequences_in_output, output.scores, length=gen_len, config=config)
# Attentions
if config.is_encoder_decoder:
# encoder
self._check_encoder_attention_for_generate(output.encoder_attentions, batch_size, config, seq_length)
# decoder
self._check_attentions_for_generate(
num_sequences_in_output,
output.decoder_attentions,
min_length=1,
max_length=output.sequences.shape[-1],
config=config,
use_cache=use_cache,
)
else:
# if use_cache first input is equal to no use_cache, so skip here
attentions = output.attentions if not use_cache else output.attentions[1:]
min_length = seq_length if not use_cache else seq_length + 1
self._check_attentions_for_generate(
num_sequences_in_output,
attentions=attentions,
min_length=min_length,
max_length=output.sequences.shape[-1],
config=config,
use_cache=use_cache,
)
# Hidden States
if config.is_encoder_decoder:
# encoder
self._check_encoder_hidden_states_for_generate(
output.encoder_hidden_states, batch_size, config, seq_length
)
# decoder
self._check_hidden_states_for_generate(
num_sequences_in_output,
output.decoder_hidden_states,
min_length=1,
max_length=output.sequences.shape[-1],
config=config,
use_cache=use_cache,
)
else:
# if use_cache first input is equal to no use_cache, so skip here
hidden_states = output.hidden_states if not use_cache else output.hidden_states[1:]
min_length = seq_length if not use_cache else seq_length + 1
self._check_hidden_states_for_generate(
num_sequences_in_output,
hidden_states,
min_length=min_length,
max_length=output.sequences.shape[-1],
config=config,
use_cache=use_cache,
)
def _check_scores(self, batch_size, scores, length, config):
expected_shape = (batch_size, config.vocab_size)
self.assertIsInstance(scores, tuple)
self.assertEqual(len(scores), length)
self.assertListEqual([iter_scores.shape for iter_scores in scores], [expected_shape] * len(scores))
def _check_attentions_for_generate(
self, batch_size, attentions, min_length, max_length, config, use_cache=False, num_beam_groups=1
):
self.assertIsInstance(attentions, tuple)
self.assertListEqual(
[isinstance(iter_attentions, tuple) for iter_attentions in attentions], [True] * len(attentions)
)
self.assertEqual(len(attentions), (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(attentions):
tgt_len = min_length + idx if not use_cache else 1
src_len = min_length + idx
expected_shape = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(iter_attentions)
)
def _check_encoder_attention_for_generate(self, attentions, batch_size, config, seq_length):
encoder_expected_shape = (batch_size, config.num_attention_heads, seq_length, seq_length)
self.assertIsInstance(attentions, tuple)
self.assertListEqual(
[layer_attentions.shape for layer_attentions in attentions],
[encoder_expected_shape] * len(attentions),
)
def _check_hidden_states_for_generate(
self, batch_size, hidden_states, min_length, max_length, config, use_cache=False, num_beam_groups=1
):
self.assertIsInstance(hidden_states, tuple)
self.assertListEqual(
[isinstance(iter_hidden_states, tuple) for iter_hidden_states in hidden_states],
[True] * len(hidden_states),
)
self.assertEqual(len(hidden_states), (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(hidden_states):
seq_len = min_length + idx if not use_cache else 1
expected_shape = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states],
[expected_shape] * len(iter_hidden_states),
)
def _check_encoder_hidden_states_for_generate(self, hidden_states, batch_size, config, seq_length):
encoder_expected_shape = (batch_size, seq_length, config.hidden_size)
self.assertIsInstance(hidden_states, tuple)
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in hidden_states],
[encoder_expected_shape] * len(hidden_states),
)
@require_torch
class UtilsFunctionsTest(unittest.TestCase):
# tests whether the top_k_top_p function behaves as expected
def test_top_k_top_p_filtering(self):
logits = torch.tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276,
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 4 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958,
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 4 highest values <= 0.6
],
dtype=torch.float,
device=torch_device,
)
non_inf_expected_idx = torch.tensor(
[[0, 0], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 20], [1, 27]],
dtype=torch.long,
device=torch_device,
) # expected non filtered idx as noted above
non_inf_expected_output = torch.tensor(
[
8.2221,
8.4321,
7.4402,
9.3845,
6.2712,
8.8275,
7.3858,
9.6770,
], # expected non filtered values as noted above
dtype=torch.float,
device=torch_device,
)
output = top_k_top_p_filtering(logits, top_k=10, top_p=0.6, min_tokens_to_keep=4)
non_inf_output = output[output != -float("inf")].to(device=torch_device)
non_inf_idx = (output != -float("inf")).nonzero().to(device=torch_device)
self.assertTrue(torch.allclose(non_inf_expected_output, non_inf_output, atol=1e-12))
self.assertTrue(torch.all(torch.eq(non_inf_expected_idx, non_inf_idx)))
@require_torch
class GenerationIntegrationTests(unittest.TestCase):
@slow
def test_diverse_beam_search(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood.
The celebrity couple announced the arrival of their son, Silas Randall Timberlake, in statements to People.
"Silas was the middle name of Timberlake's maternal grandfather Bill Bomar, who died in 2012, while Randall is the musician's own middle name, as well as his father's first," People reports.
The couple announced the pregnancy in January, with an Instagram post. It is the first baby for both."""
bart_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
bart_model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn").to(torch_device)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
outputs = bart_model.generate(
input_ids, num_beams=4, num_return_sequences=2, num_beam_groups=4, diversity_penalty=2.0
)
generated_text = bart_tokenizer.batch_decode(outputs, skip_special_tokens=True)
self.assertListEqual(
generated_text,
[
"The couple announced the birth of their son, Silas Randall Timberlake, in a statement. Silas was the middle name of Timberlake's maternal grandfather Bill Bomar. Randall is the musician's own middle name, as well as his father's first. It is the first baby for both of them.",
"Justin Timberlake and Jessica Biel have a son. The baby is named Silas Randall Timberlake. It is the first child for both. The couple announced the pregnancy in January. The name Silas is the middle name of Timberlake's maternal grandfather. It's also his own middle name.",
],
)
def test_max_length_backward_compat_greedy(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random")
bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
max_length = 20
input_ids = input_ids.expand(2, -1)
model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {})
input_ids = bart_model._prepare_decoder_input_ids_for_generation(
input_ids,
decoder_start_token_id=bart_model.config.decoder_start_token_id,
bos_token_id=bart_model.config.bos_token_id,
)
bart_model.greedy_search(
input_ids,
max_length=max_length,
pad_token_id=bart_model.config.pad_token_id,
eos_token_id=bart_model.config.eos_token_id,
**model_kwargs,
)
def test_max_length_backward_compat_sample(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random")
bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
max_length = 20
input_ids = input_ids.expand(2, -1)
model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {})
input_ids = bart_model._prepare_decoder_input_ids_for_generation(
input_ids,
decoder_start_token_id=bart_model.config.decoder_start_token_id,
bos_token_id=bart_model.config.bos_token_id,
)
bart_model.sample(
input_ids,
max_length=max_length,
pad_token_id=bart_model.config.pad_token_id,
eos_token_id=bart_model.config.eos_token_id,
**model_kwargs,
)
def test_max_length_backward_compat_beam_search(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random")
bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
batch_size = 1
max_length = 20
num_beams = 2
input_ids = input_ids.expand(2, -1)
model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {})
input_ids = bart_model._prepare_decoder_input_ids_for_generation(
input_ids,
decoder_start_token_id=bart_model.config.decoder_start_token_id,
bos_token_id=bart_model.config.bos_token_id,
)
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
max_length=max_length,
num_beams=num_beams,
device=torch_device,
)
_ = bart_model.beam_search(
input_ids, num_beams=num_beams, max_length=max_length, beam_scorer=beam_scorer, **model_kwargs
)
def test_max_length_backward_compat_group_beam_search(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random")
bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
batch_size = 1
max_length = 20
num_beams = 6
num_beam_groups = 3
num_return_sequences = num_beams * batch_size
input_ids = input_ids.expand(6, -1)
model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {})
input_ids = bart_model._prepare_decoder_input_ids_for_generation(
input_ids,
decoder_start_token_id=bart_model.config.decoder_start_token_id,
bos_token_id=bart_model.config.bos_token_id,
)
diverse_beam_scorer = BeamSearchScorer(
batch_size=batch_size,
max_length=max_length,
num_beams=num_beams,
device=torch_device,
num_beam_hyps_to_keep=num_return_sequences,
num_beam_groups=num_beam_groups,
)
bart_model.group_beam_search(
input_ids, diverse_beam_scorer, num_beams=num_beams, max_length=max_length, **model_kwargs
)
def test_max_length_warning_if_different(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random")
bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
batch_size = 1
max_length = 20
num_beams = 6
num_beam_groups = 3
num_return_sequences = num_beams * batch_size
stopping_criteria_max_length = 18
stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=stopping_criteria_max_length)])
# Greedy
input_ids = input_ids.expand(6, -1)
model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {})
input_ids = bart_model._prepare_decoder_input_ids_for_generation(
input_ids,
decoder_start_token_id=bart_model.config.decoder_start_token_id,
bos_token_id=bart_model.config.bos_token_id,
)
with self.assertWarns(UserWarning):
bart_model.greedy_search(
input_ids,
max_length=max_length,
pad_token_id=bart_model.config.pad_token_id,
stopping_criteria=stopping_criteria,
eos_token_id=bart_model.config.eos_token_id,
**model_kwargs,
)
# Sample
with self.assertWarns(UserWarning):
bart_model.sample(
input_ids,
max_length=max_length,
stopping_criteria=stopping_criteria,
pad_token_id=bart_model.config.pad_token_id,
eos_token_id=bart_model.config.eos_token_id,
**model_kwargs,
)
# Beam
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
max_length=max_length,
num_beams=num_beams,
device=torch_device,
)
with self.assertWarns(UserWarning):
bart_model.beam_search(
input_ids,
num_beams=num_beams,
stopping_criteria=stopping_criteria,
max_length=max_length,
beam_scorer=beam_scorer,
**model_kwargs,
)
# Grouped beam search
diverse_beam_scorer = BeamSearchScorer(
batch_size=batch_size,
max_length=max_length,
num_beams=num_beams,
device=torch_device,
num_beam_hyps_to_keep=num_return_sequences,
num_beam_groups=num_beam_groups,
)
with self.assertWarns(UserWarning):
bart_model.group_beam_search(
input_ids,
diverse_beam_scorer,
stopping_criteria=stopping_criteria,
num_beams=num_beams,
max_length=max_length,
**model_kwargs,
)
|
AdaMix/tests/test_generation_utils.py/0
|
{
"file_path": "AdaMix/tests/test_generation_utils.py",
"repo_id": "AdaMix",
"token_count": 32147
}
| 68 |
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch ConvBERT model. """
import unittest
from tests.test_modeling_common import floats_tensor
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
ConvBertConfig,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertModel,
)
from transformers.models.convbert.modeling_convbert import CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class ConvBertModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = ConvBertConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()
config.is_decoder = True
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = ConvBertModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = ConvBertForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = ConvBertForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = ConvBertForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = ConvBertForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = ConvBertForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class ConvBertModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
ConvBertModel,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
)
if is_torch_available()
else ()
)
test_pruning = False
test_head_masking = False
def setUp(self):
self.model_tester = ConvBertModelTester(self)
self.config_tester = ConfigTester(self, config_class=ConvBertConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = ConvBertModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_len = getattr(self.model_tester, "seq_length", None)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
chunk_length = getattr(self.model_tester, "chunk_length", None)
if chunk_length is not None and hasattr(self.model_tester, "num_hashes"):
encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]),
[self.model_tester.num_attention_heads / 2, encoder_seq_length, chunk_length, encoder_key_length],
)
else:
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],
)
out_len = len(outputs)
if self.is_encoder_decoder:
correct_outlen = 5
# loss is at first position
if "labels" in inputs_dict:
correct_outlen += 1 # loss is added to beginning
# Question Answering model returns start_logits and end_logits
if model_class in MODEL_FOR_QUESTION_ANSWERING_MAPPING.values():
correct_outlen += 1 # start_logits and end_logits instead of only 1 output
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
self.assertEqual(out_len, correct_outlen)
# decoder attentions
decoder_attentions = outputs.decoder_attentions
self.assertIsInstance(decoder_attentions, (list, tuple))
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
)
# cross attentions
cross_attentions = outputs.cross_attentions
self.assertIsInstance(cross_attentions, (list, tuple))
self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]),
[
self.model_tester.num_attention_heads,
decoder_seq_length,
encoder_key_length,
],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if hasattr(self.model_tester, "num_hidden_states_types"):
added_hidden_states = self.model_tester.num_hidden_states_types
elif self.is_encoder_decoder:
added_hidden_states = 2
else:
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
if chunk_length is not None:
self.assertListEqual(
list(self_attentions[0].shape[-4:]),
[self.model_tester.num_attention_heads / 2, encoder_seq_length, chunk_length, encoder_key_length],
)
else:
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],
)
@require_torch
class ConvBertModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_no_head(self):
model = ConvBertModel.from_pretrained("YituTech/conv-bert-base")
input_ids = torch.tensor([[1, 2, 3, 4, 5, 6]])
output = model(input_ids)[0]
expected_shape = torch.Size((1, 6, 768))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[[[-0.0864, -0.4898, -0.3677], [0.1434, -0.2952, -0.7640], [-0.0112, -0.4432, -0.5432]]]
)
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
|
AdaMix/tests/test_modeling_convbert.py/0
|
{
"file_path": "AdaMix/tests/test_modeling_convbert.py",
"repo_id": "AdaMix",
"token_count": 8441
}
| 69 |
# coding=utf-8
# Copyright 2018 The Microsoft Research Asia LayoutLM Team Authors, The Hugging Face Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor
if is_torch_available():
import torch
from transformers import (
LayoutLMConfig,
LayoutLMForMaskedLM,
LayoutLMForSequenceClassification,
LayoutLMForTokenClassification,
LayoutLMModel,
)
class LayoutLMModelTester:
"""You can also import this e.g from .test_modeling_layoutlm import LayoutLMModelTester """
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
range_bbox=1000,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.range_bbox = range_bbox
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
bbox = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
t = bbox[i, j, 3]
bbox[i, j, 3] = bbox[i, j, 1]
bbox[i, j, 1] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
t = bbox[i, j, 2]
bbox[i, j, 2] = bbox[i, j, 0]
bbox[i, j, 0] = t
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = LayoutLMConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
)
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def create_and_check_model(
self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = LayoutLMModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, bbox, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, bbox, token_type_ids=token_type_ids)
result = model(input_ids, bbox)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_for_masked_lm(
self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = LayoutLMForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, bbox, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_sequence_classification(
self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = LayoutLMForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(
input_ids, bbox, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_token_classification(
self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = LayoutLMForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, bbox, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
bbox,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class LayoutLMModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
LayoutLMModel,
LayoutLMForMaskedLM,
LayoutLMForSequenceClassification,
LayoutLMForTokenClassification,
)
if is_torch_available()
else None
)
def setUp(self):
self.model_tester = LayoutLMModelTester(self)
self.config_tester = ConfigTester(self, config_class=LayoutLMConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_various_embeddings(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
config_and_inputs[0].position_embedding_type = type
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
def prepare_layoutlm_batch_inputs():
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
input_ids = torch.tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]],device=torch_device) # noqa: E231
attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],],device=torch_device) # noqa: E231
bbox = torch.tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]],device=torch_device) # noqa: E231
token_type_ids = torch.tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]],device=torch_device) # noqa: E231
# these are sequence labels (i.e. at the token level)
labels = torch.tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]],device=torch_device) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_torch
class LayoutLMModelIntegrationTest(unittest.TestCase):
@slow
def test_forward_pass_no_head(self):
model = LayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased").to(torch_device)
input_ids, attention_mask, bbox, token_type_ids, labels = prepare_layoutlm_batch_inputs()
# forward pass
outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids)
# test the sequence output on [0, :3, :3]
expected_slice = torch.tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]],
device=torch_device,
)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-3))
# test the pooled output on [1, :3]
expected_slice = torch.tensor([-0.6580, -0.0214, 0.8552], device=torch_device)
self.assertTrue(torch.allclose(outputs.pooler_output[1, :3], expected_slice, atol=1e-3))
@slow
def test_forward_pass_sequence_classification(self):
# initialize model with randomly initialized sequence classification head
model = LayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=2).to(
torch_device
)
input_ids, attention_mask, bbox, token_type_ids, _ = prepare_layoutlm_batch_inputs()
# forward pass
outputs = model(
input_ids=input_ids,
bbox=bbox,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
labels=torch.tensor([1, 1], device=torch_device),
)
# test whether we get a loss as a scalar
loss = outputs.loss
expected_shape = torch.Size([])
self.assertEqual(loss.shape, expected_shape)
# test the shape of the logits
logits = outputs.logits
expected_shape = torch.Size((2, 2))
self.assertEqual(logits.shape, expected_shape)
@slow
def test_forward_pass_token_classification(self):
# initialize model with randomly initialized token classification head
model = LayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=13).to(
torch_device
)
input_ids, attention_mask, bbox, token_type_ids, labels = prepare_layoutlm_batch_inputs()
# forward pass
outputs = model(
input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=labels
)
# test the loss calculation to be around 2.65
# expected_loss = torch.tensor(2.65, device=torch_device)
# The loss is currently somewhat random and can vary between 0.1-0.3 atol.
# self.assertTrue(torch.allclose(outputs.loss, expected_loss, atol=0.1))
# test the shape of the logits
logits = outputs.logits
expected_shape = torch.Size((2, 25, 13))
self.assertEqual(logits.shape, expected_shape)
|
AdaMix/tests/test_modeling_layoutlm.py/0
|
{
"file_path": "AdaMix/tests/test_modeling_layoutlm.py",
"repo_id": "AdaMix",
"token_count": 6762
}
| 70 |
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch Speech2Text model. """
import copy
import inspect
import os
import tempfile
import unittest
from transformers.file_utils import cached_property
from transformers.testing_utils import (
is_torch_available,
require_sentencepiece,
require_tokenizers,
require_torch,
require_torchaudio,
slow,
torch_device,
)
from .test_configuration_common import ConfigTester
from .test_generation_utils import GenerationTesterMixin
from .test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
if is_torch_available():
import torch
from transformers import (
Speech2TextConfig,
Speech2TextForConditionalGeneration,
Speech2TextModel,
Speech2TextProcessor,
)
from transformers.models.speech_to_text.modeling_speech_to_text import Speech2TextDecoder, Speech2TextEncoder
def prepare_speech_to_text_inputs_dict(
config,
input_features,
decoder_input_ids,
attention_mask=None,
decoder_attention_mask=None,
):
if attention_mask is None:
attention_mask = input_features.ne(0)
if decoder_attention_mask is None:
decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id)
return {
# "input_ids": input_features,
"input_features": input_features,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
@require_torch
class Speech2TextModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=4,
num_conv_layers=2,
conv_kernel_sizes=(5, 5),
conv_channels=32,
input_feat_per_channel=24,
input_channels=1,
hidden_act="relu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=20,
max_source_positions=20,
max_target_positions=20,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.num_conv_layers = num_conv_layers
self.conv_kernel_sizes = conv_kernel_sizes
self.conv_channels = conv_channels
self.input_feat_per_channel = input_feat_per_channel
self.input_channels = input_channels
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.max_source_positions = max_source_positions
self.max_target_positions = max_target_positions
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
def prepare_config_and_inputs(self):
input_features = floats_tensor(
[self.batch_size, self.seq_length, self.input_feat_per_channel], self.vocab_size
)
attention_mask = torch.ones([self.batch_size, self.seq_length], dtype=torch.long, device=torch_device)
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(2)
config = Speech2TextConfig(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
num_conv_layers=self.num_conv_layers,
conv_kernel_sizes=self.conv_kernel_sizes,
conv_channels=self.conv_channels,
input_feat_per_channel=self.input_feat_per_channel,
input_channels=self.input_channels,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
max_source_positions=self.max_source_positions,
max_target_positions=self.max_target_positions,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
)
inputs_dict = prepare_speech_to_text_inputs_dict(
config,
input_features=input_features,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
)
return config, inputs_dict
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def get_subsampled_output_lengths(self, input_lengths):
"""
Computes the output length of the convolutional layers
"""
for i in range(self.num_conv_layers):
input_lengths = (input_lengths - 1) // 2 + 1
return input_lengths
def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = Speech2TextModel(config=config).get_decoder().to(torch_device).eval()
input_ids = inputs_dict["decoder_input_ids"]
attention_mask = inputs_dict["decoder_attention_mask"]
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size).clamp(2)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[
"last_hidden_state"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2))
def check_encoder_decoder_model_standalone(self, config, inputs_dict):
model = Speech2TextModel(config=config).to(torch_device).eval()
outputs = model(**inputs_dict)
encoder_last_hidden_state = outputs.encoder_last_hidden_state
last_hidden_state = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
encoder = model.get_encoder()
encoder.save_pretrained(tmpdirname)
encoder = Speech2TextEncoder.from_pretrained(tmpdirname).to(torch_device)
encoder_last_hidden_state_2 = encoder(
inputs_dict["input_features"], attention_mask=inputs_dict["attention_mask"]
)[0]
self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3)
with tempfile.TemporaryDirectory() as tmpdirname:
decoder = model.get_decoder()
decoder.save_pretrained(tmpdirname)
decoder = Speech2TextDecoder.from_pretrained(tmpdirname).to(torch_device)
last_hidden_state_2 = decoder(
input_ids=inputs_dict["decoder_input_ids"],
attention_mask=inputs_dict["decoder_attention_mask"],
encoder_hidden_states=encoder_last_hidden_state,
encoder_attention_mask=inputs_dict["attention_mask"],
)[0]
self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3)
@require_torch
class Speech2TextModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (Speech2TextModel, Speech2TextForConditionalGeneration) if is_torch_available() else ()
all_generative_model_classes = (Speech2TextForConditionalGeneration,) if is_torch_available() else ()
is_encoder_decoder = True
test_pruning = False
test_head_masking = False
test_missing_keys = False
test_torchscript = True
input_name = "input_features"
def setUp(self):
self.model_tester = Speech2TextModelTester(self)
self.config_tester = ConfigTester(self, config_class=Speech2TextConfig)
self.maxDiff = 3000
def test_config(self):
self.config_tester.run_common_tests()
def test_save_load_strict(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], [])
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_encoder_decoder_model_standalone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs)
def test_inputs_embeds(self):
pass
# training is not supported yet
def test_training(self):
pass
def test_training_gradient_checkpointing(self):
pass
def test_generate_fp16(self):
config, input_dict = self.model_tester.prepare_config_and_inputs()
input_features = input_dict["input_features"]
attention_mask = input_dict["attention_mask"]
model = Speech2TextForConditionalGeneration(config).eval().to(torch_device)
if torch_device == "cuda":
input_features = input_features.half()
model.half()
model.generate(input_features, attention_mask=attention_mask)
model.generate(input_features, num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = [
"input_features",
"attention_mask",
"decoder_input_ids",
"decoder_attention_mask",
]
expected_arg_names.extend(
["head_mask", "decoder_head_mask", "encoder_outputs"]
if "head_mask" and "decoder_head_mask" in arg_names
else ["encoder_outputs"]
)
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
if hasattr(self.model_tester, "encoder_seq_length"):
seq_length = self.model_tester.encoder_seq_length
else:
seq_length = self.model_tester.seq_length
subsampled_seq_length = model._get_subsampled_output_lengths(seq_length)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[subsampled_seq_length, self.model_tester.hidden_size],
)
if config.is_encoder_decoder:
hidden_states = outputs.decoder_hidden_states
self.assertIsInstance(hidden_states, (list, tuple))
self.assertEqual(len(hidden_states), expected_num_layers)
seq_len = getattr(self.model_tester, "seq_length", None)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[decoder_seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_len = getattr(self.model_tester, "seq_length", None)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.eval()
subsampled_encoder_seq_length = model._get_subsampled_output_lengths(encoder_seq_length)
subsampled_encoder_key_length = model._get_subsampled_output_lengths(encoder_key_length)
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length],
)
out_len = len(outputs)
correct_outlen = 5
# loss is at first position
if "labels" in inputs_dict:
correct_outlen += 1 # loss is added to beginning
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
self.assertEqual(out_len, correct_outlen)
# decoder attentions
decoder_attentions = outputs.decoder_attentions
self.assertIsInstance(decoder_attentions, (list, tuple))
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
)
# cross attentions
cross_attentions = outputs.cross_attentions
self.assertIsInstance(cross_attentions, (list, tuple))
self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]),
[
self.model_tester.num_attention_heads,
decoder_seq_length,
subsampled_encoder_key_length,
],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
added_hidden_states = 2
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length],
)
def test_resize_tokens_embeddings(self):
(
original_config,
inputs_dict,
) = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings:
return
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config)
model.to(torch_device)
if self.model_tester.is_training is False:
model.eval()
model_vocab_size = config.vocab_size
# Retrieve the embeddings and clone theme
model_embed = model.resize_token_embeddings(model_vocab_size)
cloned_embeddings = model_embed.weight.clone()
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.vocab_size, model_vocab_size + 10)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15)
# make sure that decoder_input_ids are resized
if "decoder_input_ids" in inputs_dict:
inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that adding and removing tokens has not modified the first part of the embedding matrix.
models_equal = True
for p1, p2 in zip(cloned_embeddings, model_embed.weight):
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
def test_resize_embeddings_untied(self):
(
original_config,
inputs_dict,
) = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings:
return
original_config.tie_word_embeddings = False
# if model cannot untied embeddings -> leave test
if original_config.tie_word_embeddings:
return
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config).to(torch_device)
# if no output embeddings -> leave test
if model.get_output_embeddings() is None:
continue
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_vocab_size = config.vocab_size
model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.vocab_size, model_vocab_size + 10)
output_embeds = model.get_output_embeddings()
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
output_embeds = model.get_output_embeddings()
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
if "decoder_input_ids" in inputs_dict:
inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
def test_generate_without_input_ids(self):
pass
@staticmethod
def _get_encoder_outputs(
model, input_ids, attention_mask, output_attentions=None, output_hidden_states=None, num_interleave=1
):
encoder = model.get_encoder()
encoder_outputs = encoder(
input_ids,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave(
num_interleave, dim=0
)
input_ids = input_ids[:, :, 0]
input_ids = torch.zeros_like(input_ids[:, :1], dtype=torch.long) + model._get_decoder_start_token_id()
attention_mask = None
return encoder_outputs, input_ids, attention_mask
def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_sequences=1):
batch_size, seq_length = input_ids.shape[:2]
subsampled_seq_length = self.model_tester.get_subsampled_output_lengths(seq_length)
num_sequences_in_output = batch_size * num_return_sequences
gen_len = (
output.sequences.shape[-1] - 1 if config.is_encoder_decoder else output.sequences.shape[-1] - seq_length
)
# scores
self._check_scores(num_sequences_in_output, output.scores, length=gen_len, config=config)
# Attentions
# encoder
self._check_encoder_attention_for_generate(
output.encoder_attentions, batch_size, config, subsampled_seq_length
)
# decoder
self._check_attentions_for_generate(
num_sequences_in_output,
output.decoder_attentions,
min_length=1,
max_length=output.sequences.shape[-1],
config=config,
use_cache=use_cache,
)
# Hidden States
# encoder
self._check_encoder_hidden_states_for_generate(
output.encoder_hidden_states, batch_size, config, subsampled_seq_length
)
# decoder
self._check_hidden_states_for_generate(
num_sequences_in_output,
output.decoder_hidden_states,
min_length=1,
max_length=output.sequences.shape[-1],
config=config,
use_cache=use_cache,
)
def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript:
return
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
try:
model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored similar to BART afterward
input_features = inputs["input_features"]
attention_mask = inputs["attention_mask"]
decoder_input_ids = inputs["decoder_input_ids"]
decoder_attention_mask = inputs["decoder_attention_mask"]
traced_model = torch.jit.trace(
model, (input_features, attention_mask, decoder_input_ids, decoder_attention_mask)
)
except RuntimeError:
self.fail("Couldn't trace module.")
with tempfile.TemporaryDirectory() as tmp_dir_name:
pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt")
try:
torch.jit.save(traced_model, pt_file_name)
except Exception:
self.fail("Couldn't save module.")
try:
loaded_model = torch.jit.load(pt_file_name)
except Exception:
self.fail("Couldn't load module.")
model.to(torch_device)
model.eval()
loaded_model.to(torch_device)
loaded_model.eval()
model_state_dict = model.state_dict()
loaded_model_state_dict = loaded_model.state_dict()
self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys()))
models_equal = True
for layer_name, p1 in model_state_dict.items():
p2 = loaded_model_state_dict[layer_name]
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
@require_torch
@require_torchaudio
@require_sentencepiece
@require_tokenizers
@slow
class Speech2TextModelIntegrationTests(unittest.TestCase):
@cached_property
def default_processor(self):
return Speech2TextProcessor.from_pretrained("facebook/s2t-small-librispeech-asr")
def _load_datasamples(self, num_samples):
from datasets import load_dataset
import soundfile as sf
# map files to raw
def map_to_array(batch):
speech, _ = sf.read(batch["file"])
batch["speech"] = speech
return batch
ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
ds = ds.select(range(num_samples)).map(map_to_array)
return ds["speech"][:num_samples]
def test_generation_librispeech(self):
model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-librispeech-asr")
model.to(torch_device)
processor = self.default_processor
input_speech = self._load_datasamples(1)
input_features = processor(input_speech, return_tensors="pt").input_features.to(torch_device)
generated_ids = model.generate(input_features)
generated_transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)
EXPECTED_TRANSCRIPTIONS = ["a man said to the universe sir i exist"]
self.assertListEqual(generated_transcript, EXPECTED_TRANSCRIPTIONS)
def test_generation_librispeech_batched(self):
model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-librispeech-asr")
model.to(torch_device)
processor = self.default_processor
input_speech = self._load_datasamples(4)
inputs = processor(input_speech, return_tensors="pt", padding=True)
input_features = inputs.input_features.to(torch_device)
attention_mask = inputs.attention_mask.to(torch_device)
generated_ids = model.generate(input_features, attention_mask=attention_mask)
generated_transcripts = processor.batch_decode(generated_ids, skip_special_tokens=True)
EXPECTED_TRANSCRIPTIONS = [
"a man said to the universe sir i exist",
"sweat covered brion's body trickling into the titleing cloth that was the only garment he wore",
"the cut on his chest still dripping blood the ache of his overstrained eyes even the soaring arena around him with the thousands of spectators were trivialities not worth thinking about",
"his instant of panic was followed by a small sharp blow high on his chest",
]
self.assertListEqual(generated_transcripts, EXPECTED_TRANSCRIPTIONS)
|
AdaMix/tests/test_modeling_speech_to_text.py/0
|
{
"file_path": "AdaMix/tests/test_modeling_speech_to_text.py",
"repo_id": "AdaMix",
"token_count": 14251
}
| 71 |
# coding=utf-8
# Copyright 2020 Huggingface
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class TFDPRModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
projection_dim=0,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.projection_dim = projection_dim
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor(
[self.batch_size, self.seq_length], vocab_size=2
) # follow test_modeling_tf_ctrl.py
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = BertConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
)
config = DPRConfig(projection_dim=self.projection_dim, **config.to_dict())
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def create_and_check_dpr_context_encoder(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFDPRContextEncoder(config=config)
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size))
def create_and_check_dpr_question_encoder(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFDPRQuestionEncoder(config=config)
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size))
def create_and_check_dpr_reader(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFDPRReader(config=config)
result = model(input_ids, attention_mask=input_mask)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.relevance_logits.shape, (self.batch_size,))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class TFDPRModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
test_resize_embeddings = False
test_missing_keys = False
test_pruning = False
test_head_masking = False
test_onnx = False
def setUp(self):
self.model_tester = TFDPRModelTester(self)
self.config_tester = ConfigTester(self, config_class=DPRConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_dpr_context_encoder_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*config_and_inputs)
def test_dpr_question_encoder_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*config_and_inputs)
def test_dpr_reader_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFDPRContextEncoder.from_pretrained(model_name)
self.assertIsNotNone(model)
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFDPRContextEncoder.from_pretrained(model_name)
self.assertIsNotNone(model)
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFDPRQuestionEncoder.from_pretrained(model_name)
self.assertIsNotNone(model)
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFDPRReader.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_tf
class TFDPRModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_no_head(self):
model = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
input_ids = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]]
) # [CLS] hello, is my dog cute? [SEP]
output = model(input_ids)[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
expected_slice = tf.constant(
[
[
0.03236253,
0.12753335,
0.16818509,
0.00279786,
0.3896933,
0.24264945,
0.2178971,
-0.02335227,
-0.08481959,
-0.14324117,
]
]
)
self.assertTrue(numpy.allclose(output[:, :10].numpy(), expected_slice.numpy(), atol=1e-4))
|
AdaMix/tests/test_modeling_tf_dpr.py/0
|
{
"file_path": "AdaMix/tests/test_modeling_tf_dpr.py",
"repo_id": "AdaMix",
"token_count": 4521
}
| 72 |
import json
import os
import shutil
import tempfile
import unittest
from unittest.mock import patch
import numpy as np
from transformers import BartTokenizer
from transformers.file_utils import cached_property, is_datasets_available, is_faiss_available, is_tf_available
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.tokenization_dpr import DPRQuestionEncoderTokenizer
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available() and is_datasets_available() and is_faiss_available():
import tensorflow as tf
from datasets import Dataset
import faiss
from transformers import (
AutoConfig,
RagConfig,
RagRetriever,
RagTokenizer,
TFAutoModel,
TFAutoModelForSeq2SeqLM,
TFRagModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
from transformers.modeling_tf_outputs import TFBaseModelOutput
from .test_modeling_tf_bart import TFBartModelTester
from .test_modeling_tf_dpr import TFDPRModelTester
TOLERANCE = 1e-3
def require_retrieval(test_case):
"""
Decorator marking a test that requires a set of dependencies necessary for pefrorm retrieval with
:class:`~transformers.RagRetriever`.
These tests are skipped when respective libraries are not installed.
"""
if not (is_tf_available() and is_datasets_available() and is_faiss_available()):
test_case = unittest.skip("test requires tensorflow, datasets and faiss")(test_case)
return test_case
@require_tf
@require_retrieval
@require_sentencepiece
class TFRagTestMixin:
all_model_classes = (
(TFRagModel, TFRagTokenForGeneration, TFRagSequenceForGeneration)
if is_tf_available() and is_datasets_available() and is_faiss_available()
else ()
)
all_generative_model_classes = (
(TFRagTokenForGeneration, TFRagSequenceForGeneration)
if is_tf_available() and is_datasets_available() and is_faiss_available()
else ()
)
retrieval_vector_size = 32
n_docs = 3
max_combined_length = 16
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
# DPR tok
vocab_tokens = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
dpr_tokenizer_path = os.path.join(self.tmpdirname, "dpr_tokenizer")
os.makedirs(dpr_tokenizer_path, exist_ok=True)
self.vocab_file = os.path.join(dpr_tokenizer_path, DPR_VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
# BART tok
vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
self.special_tokens_map = {"unk_token": "<unk>"}
bart_tokenizer_path = os.path.join(self.tmpdirname, "bart_tokenizer")
os.makedirs(bart_tokenizer_path, exist_ok=True)
self.vocab_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["vocab_file"])
self.merges_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(merges))
@cached_property
def dpr_tokenizer(self) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer"))
@cached_property
def bart_tokenizer(self) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, "bart_tokenizer"))
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def get_retriever(self, config):
dataset = Dataset.from_dict(
{
"id": ["0", "1", "3"],
"text": ["foo", "bar", "qux"],
"title": ["Foo", "Bar", "Qux"],
"embeddings": [
np.ones(self.retrieval_vector_size),
2 * np.ones(self.retrieval_vector_size),
3 * np.ones(self.retrieval_vector_size),
],
}
)
dataset.add_faiss_index("embeddings", string_factory="Flat", metric_type=faiss.METRIC_INNER_PRODUCT)
tokenizer = self.bart_tokenizer
with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset:
mock_load_dataset.return_value = dataset
retriever = RagRetriever(
config,
question_encoder_tokenizer=self.dpr_tokenizer,
generator_tokenizer=tokenizer,
)
return retriever
def check_model_with_retriever(
self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs
):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
for model_class in self.all_model_classes:
model = model_class(config, retriever=self.get_retriever(config))
self.assertTrue(model.config.is_encoder_decoder)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
)
# logits
self.assertEqual(
outputs.logits.shape,
(self.n_docs * decoder_input_ids.shape[0], decoder_input_ids.shape[1], config.generator.vocab_size),
)
# generator encoder last hidden states
self.assertEqual(
outputs.generator_enc_last_hidden_state.shape,
(self.n_docs * decoder_input_ids.shape[0], self.max_combined_length, config.generator.hidden_size),
)
# doc scores
self.assertEqual(outputs.doc_scores.shape, (input_ids.shape[0], self.n_docs))
def check_model_generate_from_context_input_ids(
self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs
):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
retriever = self.get_retriever(config)
for i, model_class in enumerate(self.all_generative_model_classes):
model = model_class(config)
self.assertTrue(model.config.is_encoder_decoder)
question_hidden_states = model.question_encoder(input_ids, attention_mask=attention_mask)[0]
out = retriever(
input_ids,
question_hidden_states.numpy(),
prefix=config.generator.prefix,
return_tensors="tf",
)
context_input_ids, context_attention_mask, retrieved_doc_embeds = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
retrieved_doc_embeds = tf.cast(retrieved_doc_embeds, tf.float32)
# compute doc_scores
doc_scores = tf.squeeze(
tf.matmul(tf.expand_dims(question_hidden_states, axis=[1]), retrieved_doc_embeds, transpose_b=True),
axis=[1],
)
outputs = model.generate(
context_input_ids=context_input_ids,
context_attention_mask=context_attention_mask,
doc_scores=doc_scores,
)
self.assertIsNotNone(outputs)
def check_model_generate(
self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs
):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
for model_class in self.all_generative_model_classes:
model = model_class(config, retriever=self.get_retriever(config))
self.assertTrue(model.config.is_encoder_decoder)
input_ids = tf.cast(input_ids, tf.int32)
outputs = model.generate(
input_ids=input_ids,
num_beams=2,
num_return_sequences=2,
decoder_start_token_id=config.generator.eos_token_id,
)
self.assertIsNotNone(outputs)
def check_model_without_retriever(
self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs
):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
retriever = self.get_retriever(config)
for model_class in self.all_model_classes:
model = model_class(config)
self.assertTrue(model.config.is_encoder_decoder)
question_hidden_states = model.question_encoder(input_ids, attention_mask=attention_mask)[0]
out = retriever(
input_ids,
question_hidden_states.numpy(),
prefix=config.generator.prefix,
return_tensors="tf",
)
context_input_ids, context_attention_mask, retrieved_doc_embeds = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
retrieved_doc_embeds = tf.cast(retrieved_doc_embeds, tf.float32)
# compute doc_scores
doc_scores = tf.squeeze(
tf.matmul(tf.expand_dims(question_hidden_states, axis=[1]), retrieved_doc_embeds, transpose_b=True),
axis=[1],
)
outputs = model(
input_ids=None,
context_input_ids=context_input_ids,
context_attention_mask=context_attention_mask,
doc_scores=doc_scores,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
)
# logits
self.assertEqual(
outputs.logits.shape,
(self.n_docs * decoder_input_ids.shape[0], decoder_input_ids.shape[1], config.generator.vocab_size),
)
# generator encoder last hidden states
self.assertEqual(
outputs.generator_enc_last_hidden_state.shape,
(self.n_docs * decoder_input_ids.shape[0], self.max_combined_length, config.generator.hidden_size),
)
# doc scores
self.assertEqual(outputs.doc_scores.shape, (input_ids.shape[0], self.n_docs))
def check_model_custom_n_docs(
self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, n_docs, **kwargs
):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
retriever = self.get_retriever(config)
for model_class in self.all_model_classes:
model = model_class(config)
self.assertTrue(model.config.is_encoder_decoder)
question_hidden_states = model.question_encoder(input_ids, attention_mask=attention_mask)[0]
out = retriever(
input_ids,
question_hidden_states.numpy(),
prefix=config.generator.prefix,
return_tensors="tf",
n_docs=n_docs,
)
context_input_ids, context_attention_mask, retrieved_doc_embeds = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
retrieved_doc_embeds = tf.cast(retrieved_doc_embeds, tf.float32)
# compute doc_scores
doc_scores = tf.squeeze(
tf.matmul(tf.expand_dims(question_hidden_states, axis=[1]), retrieved_doc_embeds, transpose_b=True),
axis=[1],
)
outputs = model(
input_ids=None,
context_input_ids=context_input_ids,
context_attention_mask=context_attention_mask,
doc_scores=doc_scores,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
n_docs=n_docs,
)
# logits
self.assertEqual(
outputs.logits.shape,
(n_docs * decoder_input_ids.shape[0], decoder_input_ids.shape[1], config.generator.vocab_size),
)
# generator encoder last hidden states
self.assertEqual(
outputs.generator_enc_last_hidden_state.shape,
(n_docs * decoder_input_ids.shape[0], self.max_combined_length, config.generator.hidden_size),
)
# doc scores
self.assertEqual(outputs.doc_scores.shape, (input_ids.shape[0], n_docs))
def check_model_with_mismatch_n_docs_value(
self,
config,
input_ids,
attention_mask,
decoder_input_ids,
decoder_attention_mask,
retriever_n_docs,
generator_n_docs,
**kwargs
):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
retriever = self.get_retriever(config)
for model_class in self.all_model_classes:
model = model_class(config)
self.assertTrue(model.config.is_encoder_decoder)
question_hidden_states = model.question_encoder(input_ids, attention_mask=attention_mask)[0]
out = retriever(
input_ids,
question_hidden_states.numpy(),
prefix=config.generator.prefix,
return_tensors="tf",
n_docs=retriever_n_docs,
)
context_input_ids, context_attention_mask, retrieved_doc_embeds = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
retrieved_doc_embeds = tf.cast(retrieved_doc_embeds, tf.float32)
# compute doc_scores
doc_scores = tf.squeeze(
tf.matmul(tf.expand_dims(question_hidden_states, axis=[1]), retrieved_doc_embeds, transpose_b=True),
axis=[1],
)
self.assertRaises(
AssertionError,
model.__call__,
input_ids=None,
context_input_ids=context_input_ids,
context_attention_mask=context_attention_mask,
doc_scores=doc_scores,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
n_docs=generator_n_docs,
)
def check_model_with_encoder_outputs(
self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs
):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
for model_class in self.all_model_classes:
model = model_class(config, retriever=self.get_retriever(config))
self.assertTrue(model.config.is_encoder_decoder)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
)
encoder_outputs = TFBaseModelOutput(outputs.generator_enc_last_hidden_state)
# run only generator
outputs = model(
input_ids=None,
encoder_outputs=encoder_outputs,
doc_scores=outputs.doc_scores,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
)
# logits
self.assertEqual(
outputs.logits.shape,
(self.n_docs * decoder_input_ids.shape[0], decoder_input_ids.shape[1], config.generator.vocab_size),
)
# generator encoder last hidden states
self.assertEqual(
outputs.generator_enc_last_hidden_state.shape,
(self.n_docs * decoder_input_ids.shape[0], self.max_combined_length, config.generator.hidden_size),
)
# doc scores
self.assertEqual(outputs.doc_scores.shape, (input_ids.shape[0], self.n_docs))
def test_model_with_retriever(self):
inputs_dict = self.config_and_inputs
self.check_model_with_retriever(**inputs_dict)
def test_model_without_retriever(self):
inputs_dict = self.config_and_inputs
self.check_model_without_retriever(**inputs_dict)
def test_model_generate_from_context_input_ids(self):
inputs_dict = self.config_and_inputs
self.check_model_generate_from_context_input_ids(**inputs_dict)
def test_model_with_encoder_outputs(self):
inputs_dict = self.config_and_inputs
self.check_model_with_encoder_outputs(**inputs_dict)
def test_model_generate(self):
inputs_dict = self.config_and_inputs
self.check_model_generate(**inputs_dict)
def test_model_with_custom_n_docs(self):
inputs_dict = self.config_and_inputs
inputs_dict["n_docs"] = 1
self.check_model_custom_n_docs(**inputs_dict)
def test_model_with_mismatch_n_docs_value(self):
inputs_dict = self.config_and_inputs
inputs_dict["retriever_n_docs"] = 3
inputs_dict["generator_n_docs"] = 2
self.check_model_with_mismatch_n_docs_value(**inputs_dict)
@require_tf
@require_retrieval
class TFRagDPRBartTest(TFRagTestMixin, unittest.TestCase):
@cached_property
def config_and_inputs(self):
question_encoder_tester = TFDPRModelTester(self)
dpr_config_and_inputs = question_encoder_tester.prepare_config_and_inputs()
generator_tester = TFBartModelTester(self)
bart_config_and_inputs = generator_tester.prepare_config_and_inputs_for_common()
(question_encoder_config, input_ids, _, input_mask, _, _, _) = dpr_config_and_inputs
(generator_config, bart_inputs_dict) = bart_config_and_inputs
decoder_input_ids, decoder_attention_mask = bart_inputs_dict["input_ids"], bart_inputs_dict["attention_mask"]
config = RagConfig.from_question_encoder_generator_configs(
question_encoder_config,
generator_config,
n_docs=self.n_docs,
retrieval_vector_size=self.retrieval_vector_size,
max_combined_length=self.max_combined_length,
)
return {
"config": config,
"input_ids": input_ids,
"attention_mask": input_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
}
@require_tf
@require_retrieval
@require_sentencepiece
@require_tokenizers
class TFRagModelIntegrationTests(unittest.TestCase):
@cached_property
def token_model(self):
return TFRagTokenForGeneration.from_pretrained_question_encoder_generator(
"facebook/dpr-question_encoder-single-nq-base", "facebook/bart-large-cnn"
)
@cached_property
def sequence_model(self):
return TFRagSequenceForGeneration.from_pretrained_question_encoder_generator(
"facebook/dpr-question_encoder-single-nq-base", "facebook/bart-large-cnn"
)
def token_model_nq_checkpoint(self, retriever):
return TFRagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever)
def get_rag_config(self):
question_encoder_config = AutoConfig.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
generator_config = AutoConfig.from_pretrained("facebook/bart-large-cnn")
return RagConfig.from_question_encoder_generator_configs(
question_encoder_config,
generator_config,
bos_token_id=0,
decoder_start_token_id=2,
eos_token_id=2,
is_encoder_decoder=True,
pad_token_id=1,
vocab_size=50264,
title_sep=" / ",
doc_sep=" // ",
n_docs=5,
max_combined_length=300,
dataset="wiki_dpr",
dataset_split="train",
index_name="exact",
index_path=None,
use_dummy_dataset=True,
retrieval_vector_size=768,
retrieval_batch_size=8,
)
@slow
def test_rag_sequence_inference(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(
"facebook/dpr-question_encoder-single-nq-base"
)
rag_retriever = RagRetriever(
rag_config,
question_encoder_tokenizer=rag_question_encoder_tokenizer,
generator_tokenizer=rag_decoder_tokenizer,
)
rag_sequence = self.sequence_model
rag_sequence.set_retriever(rag_retriever)
input_ids = rag_question_encoder_tokenizer(
"who sings does he love me with reba", return_tensors="tf"
).input_ids
decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="tf").input_ids
output = rag_sequence(
input_ids,
labels=decoder_input_ids,
)
expected_shape = tf.TensorShape([5, 5, 50264])
self.assertEqual(output.logits.shape, expected_shape)
expected_doc_scores = tf.convert_to_tensor([[75.0286, 74.4998, 74.0804, 74.0306, 73.9504]])
expected_loss = tf.convert_to_tensor([36.7368])
tf.debugging.assert_near(output.loss, expected_loss, atol=1e-3)
tf.debugging.assert_near(output.doc_scores, expected_doc_scores, atol=1e-3)
@slow
def test_rag_token_inference(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(
"facebook/dpr-question_encoder-single-nq-base"
)
rag_retriever = RagRetriever(
rag_config,
question_encoder_tokenizer=rag_question_encoder_tokenizer,
generator_tokenizer=rag_decoder_tokenizer,
)
rag_token = self.token_model
rag_token.set_retriever(rag_retriever)
input_ids = rag_question_encoder_tokenizer(
"who sings does he love me with reba", return_tensors="tf"
).input_ids
decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="tf").input_ids
output = rag_token(
input_ids,
labels=decoder_input_ids,
)
expected_shape = tf.TensorShape([5, 5, 50264])
self.assertEqual(output.logits.shape, expected_shape)
expected_doc_scores = tf.convert_to_tensor([[75.0286, 74.4998, 74.0804, 74.0306, 73.9504]])
expected_loss = tf.convert_to_tensor([36.3557])
tf.debugging.assert_near(output.loss, expected_loss, atol=1e-3)
tf.debugging.assert_near(output.doc_scores, expected_doc_scores, atol=1e-3)
@slow
def test_rag_token_inference_nq_checkpoint(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(
"facebook/dpr-question_encoder-single-nq-base"
)
rag_retriever = RagRetriever(
rag_config,
question_encoder_tokenizer=rag_question_encoder_tokenizer,
generator_tokenizer=rag_decoder_tokenizer,
)
rag_token = self.token_model_nq_checkpoint(retriever=rag_retriever)
# check that outputs after saving and loading are equal
with tempfile.TemporaryDirectory() as tmpdirname:
rag_token.save_pretrained(tmpdirname)
rag_token = TFRagTokenForGeneration.from_pretrained(tmpdirname, retriever=rag_retriever)
input_ids = rag_question_encoder_tokenizer(
"who sings does he love me with reba", return_tensors="tf"
).input_ids
decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="tf").input_ids
output = rag_token(
input_ids,
labels=decoder_input_ids,
)
expected_shape = tf.TensorShape([5, 5, 50265])
self.assertEqual(output.logits.shape, expected_shape)
expected_doc_scores = tf.convert_to_tensor([[62.9402, 62.7107, 62.2382, 62.1194, 61.8578]])
expected_loss = tf.convert_to_tensor([32.521812])
tf.debugging.assert_near(output.loss, expected_loss, atol=1e-3)
tf.debugging.assert_near(output.doc_scores, expected_doc_scores, atol=1e-3)
@slow
def test_rag_token_inference_save_pretrained(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(
"facebook/dpr-question_encoder-single-nq-base"
)
rag_retriever = RagRetriever(
rag_config,
question_encoder_tokenizer=rag_question_encoder_tokenizer,
generator_tokenizer=rag_decoder_tokenizer,
)
rag_token = self.token_model
rag_token.set_retriever(rag_retriever)
input_ids = rag_question_encoder_tokenizer(
"who sings does he love me with reba", return_tensors="tf"
).input_ids
decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="tf").input_ids
# model must run once to be functional before loading/saving works
rag_token(
input_ids,
labels=decoder_input_ids,
)
# check that outputs after saving and loading are equal
with tempfile.TemporaryDirectory() as tmpdirname:
rag_token.save_pretrained(tmpdirname)
rag_token = TFRagTokenForGeneration.from_pretrained(tmpdirname, retriever=rag_retriever)
output = rag_token(
input_ids,
labels=decoder_input_ids,
)
expected_shape = tf.TensorShape([5, 5, 50264])
self.assertEqual(output.logits.shape, expected_shape)
expected_doc_scores = tf.convert_to_tensor([[75.0286, 74.4998, 74.0804, 74.0306, 73.9504]])
expected_loss = tf.convert_to_tensor([36.3557])
tf.debugging.assert_near(output.loss, expected_loss, atol=1e-3)
tf.debugging.assert_near(output.doc_scores, expected_doc_scores, atol=1e-3)
@slow
def test_init_and_from_pretrained(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(
"facebook/dpr-question_encoder-single-nq-base"
)
rag_retriever = RagRetriever(
rag_config,
question_encoder_tokenizer=rag_question_encoder_tokenizer,
generator_tokenizer=rag_decoder_tokenizer,
)
rag_config = RagConfig.from_pretrained("facebook/rag-sequence-base")
rag = TFRagTokenForGeneration(rag_config, retriever=rag_retriever)
input_ids = rag_question_encoder_tokenizer(
"who sings does he love me with reba", return_tensors="tf"
).input_ids
decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="tf").input_ids
rag(
input_ids,
decoder_input_ids=decoder_input_ids,
)
# this should not give any warnings
with tempfile.TemporaryDirectory() as tmpdirname:
rag.save_pretrained(tmpdirname)
rag = TFRagTokenForGeneration.from_pretrained(tmpdirname, retriever=rag_retriever)
@property
def test_data_questions(self):
return [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
]
@slow
def test_rag_token_greedy_search(self):
tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-nq")
retriever = RagRetriever.from_pretrained("facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True)
rag_token = TFRagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever)
# check first two questions
input_dict = tokenizer(
self.test_data_questions[:2],
return_tensors="tf",
padding=True,
truncation=True,
)
input_ids = input_dict.input_ids
attention_mask = input_dict.attention_mask
# make sure only 1 beam is used
rag_token.config.num_beams = 1
output_ids = rag_token.generate(
input_ids,
attention_mask=attention_mask,
)
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
EXPECTED_OUTPUTS = [
" albert einstein",
" september 22, 2017",
]
self.assertListEqual(outputs, EXPECTED_OUTPUTS)
@slow
def test_rag_token_generate_batch(self):
# NOTE: gold labels comes from num_beam=4, so this is effectively beam-search test
tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-nq")
retriever = RagRetriever.from_pretrained("facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True)
rag_token = TFRagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever)
input_dict = tokenizer(
self.test_data_questions,
return_tensors="tf",
padding=True,
truncation=True,
)
input_ids = input_dict.input_ids
attention_mask = input_dict.attention_mask
output_ids = rag_token.generate(
input_ids,
attention_mask=attention_mask,
)
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
EXPECTED_OUTPUTS = [
" albert einstein",
" september 22, 2017",
" amplitude modulation",
" stefan persson",
" april 20, 2018",
" the 1970s",
" 7.1. 2",
" 13",
]
self.assertListEqual(outputs, EXPECTED_OUTPUTS)
@slow
def test_rag_sequence_generate_batch(self):
tokenizer = RagTokenizer.from_pretrained("facebook/rag-sequence-nq")
retriever = RagRetriever.from_pretrained(
"facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True
)
rag_sequence = TFRagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", retriever=retriever)
input_dict = tokenizer(
self.test_data_questions,
return_tensors="tf",
padding=True,
truncation=True,
)
input_ids = input_dict.input_ids
attention_mask = input_dict.attention_mask
output_ids = rag_sequence.generate(
input_ids,
attention_mask=attention_mask,
)
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
EXPECTED_OUTPUTS = [
" albert einstein",
" june 22, 2018",
" amplitude modulation",
" tim besley ( chairman )",
" june 20, 2018",
" 1980",
" 7.0",
" 8",
]
self.assertListEqual(outputs, EXPECTED_OUTPUTS)
@slow
def test_rag_sequence_generate_batch_from_context_input_ids(self):
tokenizer = RagTokenizer.from_pretrained("facebook/rag-sequence-nq")
retriever = RagRetriever.from_pretrained(
"facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True
)
rag_sequence = TFRagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", retriever=retriever)
input_dict = tokenizer(
self.test_data_questions,
return_tensors="tf",
padding=True,
truncation=True,
)
input_ids = input_dict.input_ids
question_hidden_states = rag_sequence.question_encoder(input_ids)[0]
docs_dict = retriever(input_ids.numpy(), question_hidden_states.numpy(), return_tensors="tf")
doc_scores = tf.squeeze(
tf.matmul(
tf.expand_dims(question_hidden_states, axis=[1]), docs_dict["retrieved_doc_embeds"], transpose_b=True
),
axis=[1],
)
output_ids = rag_sequence.generate(
context_input_ids=docs_dict["context_input_ids"],
context_attention_mask=docs_dict["context_attention_mask"],
doc_scores=doc_scores,
do_deduplication=True,
)
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
EXPECTED_OUTPUTS = [
" albert einstein",
" june 22, 2018",
" amplitude modulation",
" tim besley ( chairman )",
" june 20, 2018",
" 1980",
" 7.0",
" 8",
]
self.assertListEqual(outputs, EXPECTED_OUTPUTS)
@require_tf
@require_retrieval
class TFRagModelSaveLoadTests(unittest.TestCase):
def get_rag_config(self):
question_encoder_config = AutoConfig.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
generator_config = AutoConfig.from_pretrained("facebook/bart-large-cnn")
return RagConfig.from_question_encoder_generator_configs(
question_encoder_config,
generator_config,
bos_token_id=0,
decoder_start_token_id=2,
eos_token_id=2,
is_encoder_decoder=True,
pad_token_id=1,
vocab_size=50264,
title_sep=" / ",
doc_sep=" // ",
n_docs=5,
max_combined_length=300,
dataset="wiki_dpr",
dataset_split="train",
index_name="exact",
index_path=None,
use_dummy_dataset=True,
retrieval_vector_size=768,
retrieval_batch_size=8,
)
@slow
def test_rag_sequence_from_pretrained(self):
load_weight_prefix = "tf_rag_model_1"
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(
"facebook/dpr-question_encoder-single-nq-base"
)
rag_retriever = RagRetriever(
rag_config,
question_encoder_tokenizer=rag_question_encoder_tokenizer,
generator_tokenizer=rag_decoder_tokenizer,
)
input_ids = rag_question_encoder_tokenizer(
"who sings does he love me with reba", return_tensors="tf"
).input_ids
decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="tf").input_ids
with tempfile.TemporaryDirectory() as tmp_dirname:
rag_sequence = TFRagSequenceForGeneration.from_pretrained_question_encoder_generator(
"facebook/dpr-question_encoder-single-nq-base",
"facebook/bart-large-cnn",
retriever=rag_retriever,
config=rag_config,
)
# check that the from pretrained methods work
rag_sequence.save_pretrained(tmp_dirname)
rag_sequence.from_pretrained(tmp_dirname, retriever=rag_retriever)
output = rag_sequence(input_ids, labels=decoder_input_ids)
loss_pretrained = output.loss
del rag_sequence
question_encoder = TFAutoModel.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
generator = TFAutoModelForSeq2SeqLM.from_pretrained(
"facebook/bart-large-cnn", load_weight_prefix=load_weight_prefix, name="generator"
)
rag_sequence = TFRagSequenceForGeneration(
config=rag_config, question_encoder=question_encoder, generator=generator, retriever=rag_retriever
)
output = rag_sequence(input_ids, labels=decoder_input_ids)
loss_init = output.loss
self.assertAlmostEqual(loss_pretrained, loss_init, places=4)
@slow
def test_rag_token_from_pretrained(self):
load_weight_prefix = "tf_rag_model_1"
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(
"facebook/dpr-question_encoder-single-nq-base"
)
rag_retriever = RagRetriever(
rag_config,
question_encoder_tokenizer=rag_question_encoder_tokenizer,
generator_tokenizer=rag_decoder_tokenizer,
)
input_ids = rag_question_encoder_tokenizer(
"who sings does he love me with reba", return_tensors="tf"
).input_ids
decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="tf").input_ids
with tempfile.TemporaryDirectory() as tmp_dirname:
rag_token = TFRagTokenForGeneration.from_pretrained_question_encoder_generator(
"facebook/dpr-question_encoder-single-nq-base",
"facebook/bart-large-cnn",
retriever=rag_retriever,
config=rag_config,
)
# check that the from pretrained methods work
rag_token.save_pretrained(tmp_dirname)
rag_token.from_pretrained(tmp_dirname, retriever=rag_retriever)
output = rag_token(input_ids, labels=decoder_input_ids)
loss_pretrained = output.loss
del rag_token
question_encoder = TFAutoModel.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
generator = TFAutoModelForSeq2SeqLM.from_pretrained(
"facebook/bart-large-cnn", load_weight_prefix=load_weight_prefix, name="generator"
)
rag_token = TFRagTokenForGeneration(
config=rag_config, question_encoder=question_encoder, generator=generator, retriever=rag_retriever
)
output = rag_token(input_ids, labels=decoder_input_ids)
loss_init = output.loss
self.assertAlmostEqual(loss_pretrained, loss_init, places=4)
|
AdaMix/tests/test_modeling_tf_rag.py/0
|
{
"file_path": "AdaMix/tests/test_modeling_tf_rag.py",
"repo_id": "AdaMix",
"token_count": 19383
}
| 73 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class OptimizationFTest(unittest.TestCase):
def assertListAlmostEqual(self, list1, list2, tol):
self.assertEqual(len(list1), len(list2))
for a, b in zip(list1, list2):
self.assertAlmostEqual(a, b, delta=tol)
def testGradientAccumulator(self):
accumulator = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0])])
accumulator([tf.constant([-2.0, 1.0])])
accumulator([tf.constant([-1.0, 2.0])])
with self.assertRaises(ValueError):
accumulator([tf.constant([1.0, 1.0]), tf.constant([2.0, 2.0])])
self.assertEqual(accumulator.step, 3)
self.assertEqual(len(accumulator.gradients), 1)
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [-2.0, 5.0], tol=1e-2)
accumulator.reset()
self.assertEqual(accumulator.step, 0)
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [0.0, 0.0], tol=1e-2)
def testGradientAccumulatorDistributionStrategy(self):
context._context = None
ops.enable_eager_execution_internal()
physical_devices = tf.config.list_physical_devices("CPU")
if len(physical_devices) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0], [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()]
)
devices = tf.config.list_logical_devices(device_type="CPU")
strategy = tf.distribute.MirroredStrategy(devices=devices[:2])
with strategy.scope():
accumulator = GradientAccumulator()
variable = tf.Variable([4.0, 3.0])
optimizer, _ = create_optimizer(5e-5, 10, 5)
gradient_placeholder = tf.Variable([0.0, 0.0], trainable=False)
def accumulate_on_replica(gradient):
accumulator([gradient])
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients, [variable])))
@tf.function
def accumulate(grad1, grad2):
with strategy.scope():
local_variables = strategy.experimental_local_results(gradient_placeholder)
local_variables[0].assign(grad1)
local_variables[1].assign(grad2)
strategy.run(accumulate_on_replica, args=(gradient_placeholder,))
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(apply_on_replica)
def _check_local_values(grad1, grad2):
values = strategy.experimental_local_results(accumulator._gradients[0])
self.assertListAlmostEqual(values[0].value(), grad1, tol=1e-2)
self.assertListAlmostEqual(values[1].value(), grad2, tol=1e-2)
accumulate([1.0, 2.0], [-1.0, 1.0])
accumulate([3.0, -1.0], [-1.0, -1.0])
accumulate([-2.0, 2.0], [3.0, -2.0])
self.assertEqual(accumulator.step, 3)
_check_local_values([2.0, 3.0], [1.0, -2.0])
apply_grad()
self.assertListAlmostEqual(variable.value(), [4.0, 3.0], tol=1e-2)
accumulator.reset()
self.assertEqual(accumulator.step, 0)
_check_local_values([0.0, 0.0], [0.0, 0.0])
|
AdaMix/tests/test_optimization_tf.py/0
|
{
"file_path": "AdaMix/tests/test_optimization_tf.py",
"repo_id": "AdaMix",
"token_count": 1782
}
| 74 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import (
require_datasets,
require_faiss,
require_sentencepiece,
require_tokenizers,
require_torch,
)
if is_faiss_available():
import faiss
@require_faiss
@require_datasets
class RagRetrieverTest(TestCase):
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
self.retrieval_vector_size = 8
# DPR tok
vocab_tokens = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
dpr_tokenizer_path = os.path.join(self.tmpdirname, "dpr_tokenizer")
os.makedirs(dpr_tokenizer_path, exist_ok=True)
self.vocab_file = os.path.join(dpr_tokenizer_path, DPR_VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
# BART tok
vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
self.special_tokens_map = {"unk_token": "<unk>"}
bart_tokenizer_path = os.path.join(self.tmpdirname, "bart_tokenizer")
os.makedirs(bart_tokenizer_path, exist_ok=True)
self.vocab_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["vocab_file"])
self.merges_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(merges))
def get_dpr_tokenizer(self) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer"))
def get_bart_tokenizer(self) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, "bart_tokenizer"))
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def get_dummy_dataset(self):
dataset = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size), 2 * np.ones(self.retrieval_vector_size)],
}
)
dataset.add_faiss_index("embeddings", string_factory="Flat", metric_type=faiss.METRIC_INNER_PRODUCT)
return dataset
def get_dummy_canonical_hf_index_retriever(self):
dataset = self.get_dummy_dataset()
config = RagConfig(
retrieval_vector_size=self.retrieval_vector_size,
question_encoder=DPRConfig().to_dict(),
generator=BartConfig().to_dict(),
)
with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset:
mock_load_dataset.return_value = dataset
retriever = RagRetriever(
config,
question_encoder_tokenizer=self.get_dpr_tokenizer(),
generator_tokenizer=self.get_bart_tokenizer(),
)
return retriever
def get_dummy_custom_hf_index_retriever(self, from_disk: bool):
dataset = self.get_dummy_dataset()
config = RagConfig(
retrieval_vector_size=self.retrieval_vector_size,
question_encoder=DPRConfig().to_dict(),
generator=BartConfig().to_dict(),
index_name="custom",
)
if from_disk:
config.passages_path = os.path.join(self.tmpdirname, "dataset")
config.index_path = os.path.join(self.tmpdirname, "index.faiss")
dataset.get_index("embeddings").save(os.path.join(self.tmpdirname, "index.faiss"))
dataset.drop_index("embeddings")
dataset.save_to_disk(os.path.join(self.tmpdirname, "dataset"))
del dataset
retriever = RagRetriever(
config,
question_encoder_tokenizer=self.get_dpr_tokenizer(),
generator_tokenizer=self.get_bart_tokenizer(),
)
else:
retriever = RagRetriever(
config,
question_encoder_tokenizer=self.get_dpr_tokenizer(),
generator_tokenizer=self.get_bart_tokenizer(),
index=CustomHFIndex(config.retrieval_vector_size, dataset),
)
return retriever
def get_dummy_legacy_index_retriever(self):
dataset = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1), 2 * np.ones(self.retrieval_vector_size + 1)],
}
)
dataset.add_faiss_index("embeddings", string_factory="Flat", metric_type=faiss.METRIC_INNER_PRODUCT)
index_file_name = os.path.join(self.tmpdirname, "hf_bert_base.hnswSQ8_correct_phi_128.c_index")
dataset.save_faiss_index("embeddings", index_file_name + ".index.dpr")
pickle.dump(dataset["id"], open(index_file_name + ".index_meta.dpr", "wb"))
passages_file_name = os.path.join(self.tmpdirname, "psgs_w100.tsv.pkl")
passages = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(passages, open(passages_file_name, "wb"))
config = RagConfig(
retrieval_vector_size=self.retrieval_vector_size,
question_encoder=DPRConfig().to_dict(),
generator=BartConfig().to_dict(),
index_name="legacy",
index_path=self.tmpdirname,
)
retriever = RagRetriever(
config, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer()
)
return retriever
def test_canonical_hf_index_retriever_retrieve(self):
n_docs = 1
retriever = self.get_dummy_canonical_hf_index_retriever()
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs)
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size))
self.assertEqual(len(doc_dicts), 2)
self.assertEqual(sorted(doc_dicts[0]), ["embeddings", "id", "text", "title"])
self.assertEqual(len(doc_dicts[0]["id"]), n_docs)
self.assertEqual(doc_dicts[0]["id"][0], "1") # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0], "0") # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]])
def test_canonical_hf_index_retriever_save_and_from_pretrained(self):
retriever = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset:
mock_load_dataset.return_value = self.get_dummy_dataset()
retriever.save_pretrained(tmp_dirname)
retriever = RagRetriever.from_pretrained(tmp_dirname)
self.assertIsInstance(retriever, RagRetriever)
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
out = retriever.retrieve(hidden_states, n_docs=1)
self.assertTrue(out is not None)
def test_custom_hf_index_retriever_retrieve(self):
n_docs = 1
retriever = self.get_dummy_custom_hf_index_retriever(from_disk=False)
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs)
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size))
self.assertEqual(len(doc_dicts), 2)
self.assertEqual(sorted(doc_dicts[0]), ["embeddings", "id", "text", "title"])
self.assertEqual(len(doc_dicts[0]["id"]), n_docs)
self.assertEqual(doc_dicts[0]["id"][0], "1") # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0], "0") # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]])
def test_custom_hf_index_retriever_save_and_from_pretrained(self):
retriever = self.get_dummy_custom_hf_index_retriever(from_disk=False)
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(tmp_dirname)
retriever = RagRetriever.from_pretrained(tmp_dirname)
self.assertIsInstance(retriever, RagRetriever)
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
out = retriever.retrieve(hidden_states, n_docs=1)
self.assertTrue(out is not None)
def test_custom_hf_index_retriever_retrieve_from_disk(self):
n_docs = 1
retriever = self.get_dummy_custom_hf_index_retriever(from_disk=True)
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs)
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size))
self.assertEqual(len(doc_dicts), 2)
self.assertEqual(sorted(doc_dicts[0]), ["embeddings", "id", "text", "title"])
self.assertEqual(len(doc_dicts[0]["id"]), n_docs)
self.assertEqual(doc_dicts[0]["id"][0], "1") # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0], "0") # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]])
def test_custom_hf_index_retriever_save_and_from_pretrained_from_disk(self):
retriever = self.get_dummy_custom_hf_index_retriever(from_disk=True)
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(tmp_dirname)
retriever = RagRetriever.from_pretrained(tmp_dirname)
self.assertIsInstance(retriever, RagRetriever)
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
out = retriever.retrieve(hidden_states, n_docs=1)
self.assertTrue(out is not None)
def test_legacy_index_retriever_retrieve(self):
n_docs = 1
retriever = self.get_dummy_legacy_index_retriever()
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs)
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size))
self.assertEqual(len(doc_dicts), 2)
self.assertEqual(sorted(doc_dicts[0]), ["text", "title"])
self.assertEqual(len(doc_dicts[0]["text"]), n_docs)
self.assertEqual(doc_dicts[0]["text"][0], "bar") # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0], "foo") # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]])
def test_legacy_hf_index_retriever_save_and_from_pretrained(self):
retriever = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(tmp_dirname)
retriever = RagRetriever.from_pretrained(tmp_dirname)
self.assertIsInstance(retriever, RagRetriever)
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
out = retriever.retrieve(hidden_states, n_docs=1)
self.assertTrue(out is not None)
@require_torch
@require_tokenizers
@require_sentencepiece
def test_hf_index_retriever_call(self):
import torch
n_docs = 1
retriever = self.get_dummy_canonical_hf_index_retriever()
question_input_ids = [[5, 7], [10, 11]]
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
out = retriever(question_input_ids, hidden_states, prefix=retriever.config.generator.prefix, n_docs=n_docs)
context_input_ids, context_attention_mask, retrieved_doc_embeds = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size))
self.assertIsInstance(context_input_ids, list)
self.assertIsInstance(context_attention_mask, list)
self.assertIsInstance(retrieved_doc_embeds, np.ndarray)
out = retriever(
question_input_ids,
hidden_states,
prefix=retriever.config.generator.prefix,
n_docs=n_docs,
return_tensors="pt",
)
context_input_ids, context_attention_mask, retrieved_doc_embeds, doc_ids = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size))
self.assertIsInstance(context_input_ids, torch.Tensor)
self.assertIsInstance(context_attention_mask, torch.Tensor)
self.assertIsInstance(retrieved_doc_embeds, torch.Tensor)
|
AdaMix/tests/test_retrieval_rag.py/0
|
{
"file_path": "AdaMix/tests/test_retrieval_rag.py",
"repo_id": "AdaMix",
"token_count": 7696
}
| 75 |
# coding=utf-8
# Copyright 2019 Hugging Face inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from transformers import DebertaV2Tokenizer
from transformers.testing_utils import require_sentencepiece, require_tokenizers
from .test_tokenization_common import TokenizerTesterMixin
SAMPLE_VOCAB = os.path.join(os.path.dirname(os.path.abspath(__file__)), "fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class DebertaV2TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = DebertaV2Tokenizer
rust_tokenizer_class = None
test_rust_tokenizer = False
def setUp(self):
super().setUp()
# We have a SentencePiece fixture for testing
tokenizer = DebertaV2Tokenizer(SAMPLE_VOCAB)
tokenizer.save_pretrained(self.tmpdirname)
def get_input_output_texts(self, tokenizer):
input_text = "this is a test"
output_text = "this is a test"
return input_text, output_text
def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer:
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
sequence = "I was born in 92000, and this is falsé."
tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer()
ids = tokenizer.encode(sequence)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
def test_full_tokenizer(self):
tokenizer = DebertaV2Tokenizer(SAMPLE_VOCAB, keep_accents=True)
tokens = tokenizer.tokenize("This is a test")
self.assertListEqual(tokens, ["▁", "[UNK]", "his", "▁is", "▁a", "▁test"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [13, 1, 4398, 25, 21, 1289])
tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.")
# fmt: off
self.assertListEqual(
tokens,
["▁", "[UNK]", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "[UNK]", "."],
)
ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(ids, [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9])
back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual(
back_tokens,
["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."],
)
# fmt: on
def test_sequence_builders(self):
tokenizer = DebertaV2Tokenizer(SAMPLE_VOCAB)
text = tokenizer.encode("sequence builders")
text_2 = tokenizer.encode("multi-sequence build")
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_2 + [
tokenizer.sep_token_id
]
def test_tokenizer_integration(self):
tokenizer_classes = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class)
for tokenizer_class in tokenizer_classes:
tokenizer = tokenizer_class.from_pretrained("microsoft/deberta-xlarge-v2")
sequences = [
[
"DeBERTa: Decoding-enhanced BERT with Disentangled Attention",
"DeBERTa: Decoding-enhanced BERT with Disentangled Attention",
],
[
"Recent progress in pre-trained neural language models has significantly improved the performance of many natural language processing (NLP) tasks.",
"DeBERTa: Decoding-enhanced BERT with Disentangled Attention",
],
[
"In this paper we propose a new model architecture DeBERTa",
"DeBERTa: Decoding-enhanced BERT with Disentangled Attention",
],
]
encoding = tokenizer(sequences, padding=True)
decoded_sequences = [tokenizer.decode(seq, skip_special_tokens=True) for seq in encoding["input_ids"]]
# fmt: off
expected_encoding = {
'input_ids': [
[1, 1804, 69418, 191, 43, 117056, 18, 44596, 448, 37132, 19, 8655, 10625, 69860, 21149, 2, 1804, 69418, 191, 43, 117056, 18, 44596, 448, 37132, 19, 8655, 10625, 69860, 21149, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 9755, 1944, 11, 1053, 18, 16899, 12730, 1072, 1506, 45, 2497, 2510, 5, 610, 9, 127, 699, 1072, 2101, 36, 99388, 53, 2930, 4, 2, 1804, 69418, 191, 43, 117056, 18, 44596, 448, 37132, 19, 8655, 10625, 69860, 21149, 2],
[1, 84, 32, 778, 42, 9441, 10, 94, 735, 3372, 1804, 69418, 191, 2, 1804, 69418, 191, 43, 117056, 18, 44596, 448, 37132, 19, 8655, 10625, 69860, 21149, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
}
expected_decoded_sequences = [
'DeBERTa: Decoding-enhanced BERT with Disentangled Attention DeBERTa: Decoding-enhanced BERT with Disentangled Attention',
'Recent progress in pre-trained neural language models has significantly improved the performance of many natural language processing (NLP) tasks. DeBERTa: Decoding-enhanced BERT with Disentangled Attention',
'In this paper we propose a new model architecture DeBERTa DeBERTa: Decoding-enhanced BERT with Disentangled Attention'
]
# fmt: on
self.assertDictEqual(encoding.data, expected_encoding)
for expected, decoded in zip(expected_decoded_sequences, decoded_sequences):
self.assertEqual(expected, decoded)
|
AdaMix/tests/test_tokenization_deberta_v2.py/0
|
{
"file_path": "AdaMix/tests/test_tokenization_deberta_v2.py",
"repo_id": "AdaMix",
"token_count": 3660
}
| 76 |
# coding=utf-8
# Copyright 2018 Salesforce and HuggingFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from .test_tokenization_common import TokenizerTesterMixin
class PhobertTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = PhobertTokenizer
def setUp(self):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
vocab = ["T@@", "i", "I", "R@@", "r", "e@@"]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "l à</w>"]
self.special_tokens_map = {"unk_token": "<unk>"}
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
for token in vocab_tokens:
fp.write("{} {}".format(token, vocab_tokens[token]) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(merges))
def get_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return PhobertTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
input_text = "Tôi là VinAI Research"
output_text = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"
return input_text, output_text
def test_full_tokenizer(self):
tokenizer = PhobertTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
text = "Tôi là VinAI Research"
bpe_tokens = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split()
tokens = tokenizer.tokenize(text)
print(tokens)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = tokens + [tokenizer.unk_token]
input_bpe_tokens = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
|
AdaMix/tests/test_tokenization_phobert.py/0
|
{
"file_path": "AdaMix/tests/test_tokenization_phobert.py",
"repo_id": "AdaMix",
"token_count": 1159
}
| 77 |
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_tokenizers, slow
from .test_tokenization_common import TokenizerTesterMixin
SAMPLE_VOCAB = os.path.join(os.path.dirname(os.path.abspath(__file__)), "fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class XLNetTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = XLNetTokenizer
rust_tokenizer_class = XLNetTokenizerFast
test_rust_tokenizer = True
def setUp(self):
super().setUp()
# We have a SentencePiece fixture for testing
tokenizer = XLNetTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname)
def test_full_tokenizer(self):
tokenizer = XLNetTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokens = tokenizer.tokenize("This is a test")
self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [285, 46, 10, 170, 382])
tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
tokens,
[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
],
)
ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(ids, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4])
back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual(
back_tokens,
[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
],
)
def test_tokenizer_lower(self):
tokenizer = XLNetTokenizer(SAMPLE_VOCAB, do_lower_case=True)
tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
tokens,
[
SPIECE_UNDERLINE + "",
"i",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
],
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["▁he", "ll", "o"])
def test_tokenizer_no_lower(self):
tokenizer = XLNetTokenizer(SAMPLE_VOCAB, do_lower_case=False)
tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
tokens,
[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
],
)
@slow
def test_sequence_builders(self):
tokenizer = XLNetTokenizer.from_pretrained("xlnet-base-cased")
text = tokenizer.encode("sequence builders", add_special_tokens=False)
text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_2 + [4, 3]
|
AdaMix/tests/test_tokenization_xlnet.py/0
|
{
"file_path": "AdaMix/tests/test_tokenization_xlnet.py",
"repo_id": "AdaMix",
"token_count": 3308
}
| 78 |
""" Script for downloading all GLUE data.
Original source: https://gist.github.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e
Note: for legal reasons, we are unable to host MRPC.
You can either use the version hosted by the SentEval team, which is already tokenized,
or you can download the original data from (https://download.microsoft.com/download/D/4/6/D46FF87A-F6B9-4252-AA8B-3604ED519838/MSRParaphraseCorpus.msi) and extract the data from it manually.
For Windows users, you can run the .msi file. For Mac and Linux users, consider an external library such as 'cabextract' (see below for an example).
You should then rename and place specific files in a folder (see below for an example).
mkdir MRPC
cabextract MSRParaphraseCorpus.msi -d MRPC
cat MRPC/_2DEC3DBE877E4DB192D17C0256E90F1D | tr -d $'\r' > MRPC/msr_paraphrase_train.txt
cat MRPC/_D7B391F9EAFF4B1B8BCE8F21B20B1B61 | tr -d $'\r' > MRPC/msr_paraphrase_test.txt
rm MRPC/_*
rm MSRParaphraseCorpus.msi
1/30/19: It looks like SentEval is no longer hosting their extracted and tokenized MRPC data, so you'll need to download the data from the original source for now.
2/11/19: It looks like SentEval actually *is* hosting the extracted data. Hooray!
"""
import argparse
import os
import sys
import urllib.request
import zipfile
TASKS = ["CoLA", "SST", "MRPC", "QQP", "STS", "MNLI", "SNLI", "QNLI", "RTE", "WNLI", "diagnostic"]
TASK2PATH = {
"CoLA": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FCoLA.zip?alt=media&token=46d5e637-3411-4188-bc44-5809b5bfb5f4",
"SST": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSST-2.zip?alt=media&token=aabc5f6b-e466-44a2-b9b4-cf6337f84ac8",
"MRPC": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2Fmrpc_dev_ids.tsv?alt=media&token=ec5c0836-31d5-48f4-b431-7480817f1adc",
"QQP": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQQP.zip?alt=media&token=700c6acf-160d-4d89-81d1-de4191d02cb5",
"STS": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSTS-B.zip?alt=media&token=bddb94a7-8706-4e0d-a694-1109e12273b5",
"MNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FMNLI.zip?alt=media&token=50329ea1-e339-40e2-809c-10c40afff3ce",
"SNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSNLI.zip?alt=media&token=4afcfbb2-ff0c-4b2d-a09a-dbf07926f4df",
"QNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQNLIv2.zip?alt=media&token=6fdcf570-0fc5-4631-8456-9505272d1601",
"RTE": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FRTE.zip?alt=media&token=5efa7e85-a0bb-4f19-8ea2-9e1840f077fb",
"WNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FWNLI.zip?alt=media&token=068ad0a0-ded7-4bd7-99a5-5e00222e0faf",
"diagnostic": "https://storage.googleapis.com/mtl-sentence-representations.appspot.com/tsvsWithoutLabels%2FAX.tsv?GoogleAccessId=firebase-adminsdk-0khhl@mtl-sentence-representations.iam.gserviceaccount.com&Expires=2498860800&Signature=DuQ2CSPt2Yfre0C%2BiISrVYrIFaZH1Lc7hBVZDD4ZyR7fZYOMNOUGpi8QxBmTNOrNPjR3z1cggo7WXFfrgECP6FBJSsURv8Ybrue8Ypt%2FTPxbuJ0Xc2FhDi%2BarnecCBFO77RSbfuz%2Bs95hRrYhTnByqu3U%2FYZPaj3tZt5QdfpH2IUROY8LiBXoXS46LE%2FgOQc%2FKN%2BA9SoscRDYsnxHfG0IjXGwHN%2Bf88q6hOmAxeNPx6moDulUF6XMUAaXCSFU%2BnRO2RDL9CapWxj%2BDl7syNyHhB7987hZ80B%2FwFkQ3MEs8auvt5XW1%2Bd4aCU7ytgM69r8JDCwibfhZxpaa4gd50QXQ%3D%3D",
}
MRPC_TRAIN = "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt"
MRPC_TEST = "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt"
def download_and_extract(task, data_dir):
print("Downloading and extracting %s..." % task)
data_file = "%s.zip" % task
urllib.request.urlretrieve(TASK2PATH[task], data_file)
with zipfile.ZipFile(data_file) as zip_ref:
zip_ref.extractall(data_dir)
os.remove(data_file)
print("\tCompleted!")
def format_mrpc(data_dir, path_to_data):
print("Processing MRPC...")
mrpc_dir = os.path.join(data_dir, "MRPC")
if not os.path.isdir(mrpc_dir):
os.mkdir(mrpc_dir)
if path_to_data:
mrpc_train_file = os.path.join(path_to_data, "msr_paraphrase_train.txt")
mrpc_test_file = os.path.join(path_to_data, "msr_paraphrase_test.txt")
else:
print("Local MRPC data not specified, downloading data from %s" % MRPC_TRAIN)
mrpc_train_file = os.path.join(mrpc_dir, "msr_paraphrase_train.txt")
mrpc_test_file = os.path.join(mrpc_dir, "msr_paraphrase_test.txt")
urllib.request.urlretrieve(MRPC_TRAIN, mrpc_train_file)
urllib.request.urlretrieve(MRPC_TEST, mrpc_test_file)
assert os.path.isfile(mrpc_train_file), "Train data not found at %s" % mrpc_train_file
assert os.path.isfile(mrpc_test_file), "Test data not found at %s" % mrpc_test_file
urllib.request.urlretrieve(TASK2PATH["MRPC"], os.path.join(mrpc_dir, "dev_ids.tsv"))
dev_ids = []
with open(os.path.join(mrpc_dir, "dev_ids.tsv"), encoding="utf8") as ids_fh:
for row in ids_fh:
dev_ids.append(row.strip().split("\t"))
with open(mrpc_train_file, encoding="utf8") as data_fh, open(
os.path.join(mrpc_dir, "train.tsv"), "w", encoding="utf8"
) as train_fh, open(os.path.join(mrpc_dir, "dev.tsv"), "w", encoding="utf8") as dev_fh:
header = data_fh.readline()
train_fh.write(header)
dev_fh.write(header)
for row in data_fh:
label, id1, id2, s1, s2 = row.strip().split("\t")
if [id1, id2] in dev_ids:
dev_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2))
else:
train_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2))
with open(mrpc_test_file, encoding="utf8") as data_fh, open(
os.path.join(mrpc_dir, "test.tsv"), "w", encoding="utf8"
) as test_fh:
header = data_fh.readline()
test_fh.write("index\t#1 ID\t#2 ID\t#1 String\t#2 String\n")
for idx, row in enumerate(data_fh):
label, id1, id2, s1, s2 = row.strip().split("\t")
test_fh.write("%d\t%s\t%s\t%s\t%s\n" % (idx, id1, id2, s1, s2))
print("\tCompleted!")
def download_diagnostic(data_dir):
print("Downloading and extracting diagnostic...")
if not os.path.isdir(os.path.join(data_dir, "diagnostic")):
os.mkdir(os.path.join(data_dir, "diagnostic"))
data_file = os.path.join(data_dir, "diagnostic", "diagnostic.tsv")
urllib.request.urlretrieve(TASK2PATH["diagnostic"], data_file)
print("\tCompleted!")
return
def get_tasks(task_names):
task_names = task_names.split(",")
if "all" in task_names:
tasks = TASKS
else:
tasks = []
for task_name in task_names:
assert task_name in TASKS, "Task %s not found!" % task_name
tasks.append(task_name)
return tasks
def main(arguments):
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", help="directory to save data to", type=str, default="glue_data")
parser.add_argument(
"--tasks", help="tasks to download data for as a comma separated string", type=str, default="all"
)
parser.add_argument(
"--path_to_mrpc",
help="path to directory containing extracted MRPC data, msr_paraphrase_train.txt and msr_paraphrase_text.txt",
type=str,
default="",
)
args = parser.parse_args(arguments)
if not os.path.isdir(args.data_dir):
os.mkdir(args.data_dir)
tasks = get_tasks(args.tasks)
for task in tasks:
if task == "MRPC":
format_mrpc(args.data_dir, args.path_to_mrpc)
elif task == "diagnostic":
download_diagnostic(args.data_dir)
else:
download_and_extract(task, args.data_dir)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
AdaMix/utils/download_glue_data.py/0
|
{
"file_path": "AdaMix/utils/download_glue_data.py",
"repo_id": "AdaMix",
"token_count": 3870
}
| 79 |
# !bin/bash
# **Usage**
# - for running default image, training binaries, in windowed mode:
# `$ ./run_docker_image.sh "" training`
# - for running default image, training binaries, in headless mode:
# `$ ./run_docker_image.sh "" training headless`
# - for running a custom image in windowed mode, pass in you image name and tag:
# `$ ./run_docker_image.sh DOCKER_IMAGE_NAME:TAG`
# - for running a custom image in headless mode, pass in you image name and tag, followed by "headless":
# `$ ./run_docker_image.sh DOCKER_IMAGE_NAME:TAG headless`
# This script takes two optional arguments
# 1st argument is the name (and tag) of the dockerfile to run
# by default, it is set to "adrl:10.0-devel-ubuntu18.04"
# else user can specify a docker image as follows:
# $ ./run_docker_image.sh DOCKER_IMAGE_NAME:TAG
DOCKER_IMAGE_NAME=${1:-adrl:10.0-devel-ubuntu18.04}
# 2nd argument: if user passes "headless", binary runs in headless mode
IS_HEADLESS=${2:-notheadless}
# this block is for running X apps in docker
XAUTH=/tmp/.docker.xauth
if [[ ! -f $XAUTH ]]
then
xauth_list=$(xauth nlist :0 | sed -e 's/^..../ffff/')
if [ ! -z "$xauth_list" ]
then
echo $xauth_list | xauth -f $XAUTH nmerge -
else
touch $XAUTH
fi
chmod a+r $XAUTH
fi
# per use the following commented out code for different options
UNREAL_BINARY_COMMAND="bash /home/airsim_user/ADRL/ADRL.sh -windowed -opengl"
# eleminate terminal output and run airsim process in the background
# UNREAL_BINARY_COMMAND="bash /home/airsim_user/ADRL/ADRL.sh -windowed -opengl &>/dev/null &"
# set window resolution
# UNREAL_BINARY_COMMAND="/home/airsim_user/ADRL/ADRL.sh -windowed -ResX=1080 -ResY=720"
# now, let's check if we need to run in headless mode or not
# set SDL_VIDEODRIVER_VALUE to '' if windowed mode, 'offscreen' if headless mode
SDL_VIDEODRIVER_VALUE='';
if [[ $2 = "headless" ]]; then
SDL_VIDEODRIVER_VALUE='offscreen';
fi
# now, set the environment varible SDL_VIDEODRIVER to SDL_VIDEODRIVER_VALUE
# and tell the docker container to execute UNREAL_BINARY_COMMAND
nvidia-docker run -it \
-e SDL_VIDEODRIVER=$SDL_VIDEODRIVER_VALUE \
-e SDL_HINT_CUDA_DEVICE='0' \
--net=host \
--env="DISPLAY=$DISPLAY" \
--env="QT_X11_NO_MITSHM=1" \
--volume="/tmp/.X11-unix:/tmp/.X11-unix:rw" \
-env="XAUTHORITY=$XAUTH" \
--volume="$XAUTH:$XAUTH" \
--runtime=nvidia \
--rm \
$DOCKER_IMAGE_NAME \
/bin/bash -c "$UNREAL_BINARY_COMMAND"
|
AirSim-Drone-Racing-Lab/docker/run_docker_image.sh/0
|
{
"file_path": "AirSim-Drone-Racing-Lab/docker/run_docker_image.sh",
"repo_id": "AirSim-Drone-Racing-Lab",
"token_count": 1017
}
| 80 |
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense, Flatten, Conv2D, BatchNormalization, Lambda, Concatenate, Conv2DTranspose, Reshape, ReLU
class NonLinearTransformer(Model):
def __init__(self):
super(NonLinearTransformer, self).__init__()
self.create_model()
def call(self, x):
return self.network(x)
def create_model(self):
print('[NonLinearTransformer] Starting create_model')
dense0 = tf.keras.layers.Dense(units=64, activation='relu')
dense1 = tf.keras.layers.Dense(units=32, activation='relu')
dense2 = tf.keras.layers.Dense(units=1, activation='linear')
self.network = tf.keras.Sequential([
dense0,
dense1,
dense2],
name='nonlineartransformer')
print('[NonLinearTransformer] Done with create_model')
class TestNet(Model):
def __init__(self):
super(TestNet, self).__init__()
self.create_model()
def call(self, x):
x = tf.keras.layers.Flatten()(x)
return self.network(x)
def create_model(self):
print('[NonLinearTransformer] Starting create_model')
dense0 = tf.keras.layers.Dense(units=64, activation='relu')
dense1 = tf.keras.layers.Dense(units=32, activation='relu')
dense2 = tf.keras.layers.Dense(units=1, activation='linear')
self.network = tf.keras.Sequential([
dense0,
dense1,
dense2],
name='nonlineartransformer')
print('[NonLinearTransformer] Done with create_model')
|
AirSim-Drone-Racing-VAE-Imitation/racing_models/transformer.py/0
|
{
"file_path": "AirSim-Drone-Racing-VAE-Imitation/racing_models/transformer.py",
"repo_id": "AirSim-Drone-Racing-VAE-Imitation",
"token_count": 717
}
| 81 |
#!/bin/sh
# This script builds wheels for the API, SDK, and extension packages in the
# dist/ dir, to be uploaded to PyPI.
set -ev
# Get the latest versions of packaging tools
python -m pip install --upgrade pip build setuptools wheel
BASEDIR=$(dirname $(readlink -f $(dirname $0)))
DISTDIR=dist
(
cd $BASEDIR
mkdir -p $DISTDIR
rm -rf $DISTDIR/*
for d in azure-monitor-opentelemetry; do
(
echo "building $d"
cd "$d"
# Package distribution in dist folder
python setup.py sdist --dist-dir "$BASEDIR/dist/" clean --all
)
done
# Build a wheel for each source distribution
(
cd $DISTDIR
for x in *.tar.gz ; do
pip wheel --no-deps $x
done
)
)
|
ApplicationInsights-Python/scripts/build.sh/0
|
{
"file_path": "ApplicationInsights-Python/scripts/build.sh",
"repo_id": "ApplicationInsights-Python",
"token_count": 256
}
| 82 |
{
"MD004": false,
"MD007": {
"indent": 2
},
"MD013": {
"line_length": 400
},
"MD026": {
"punctuation": ".,;:!。,;:"
},
"MD029": false,
"MD033": false,
"MD036": false,
"blank_lines": false
}
|
AzureTRE/.markdownlint.json/0
|
{
"file_path": "AzureTRE/.markdownlint.json",
"repo_id": "AzureTRE",
"token_count": 115
}
| 83 |
{
"scriptFile": "__init__.py",
"entryPoint": "main",
"bindings": [
{
"name": "msg",
"type": "serviceBusTrigger",
"direction": "in",
"topicName": "%BLOB_CREATED_TOPIC_NAME%",
"subscriptionName": "%TOPIC_SUBSCRIPTION_NAME%",
"connection": "SB_CONNECTION_STRING"
},
{
"type": "eventGrid",
"name": "stepResultEvent",
"topicEndpointUri": "EVENT_GRID_STEP_RESULT_TOPIC_URI_SETTING",
"topicKeySetting": "EVENT_GRID_STEP_RESULT_TOPIC_KEY_SETTING",
"direction": "out"
},
{
"type": "eventGrid",
"name": "dataDeletionEvent",
"topicEndpointUri": "EVENT_GRID_DATA_DELETION_TOPIC_URI_SETTING",
"topicKeySetting": "EVENT_GRID_DATA_DELETION_TOPIC_KEY_SETTING",
"direction": "out"
}
]
}
|
AzureTRE/airlock_processor/BlobCreatedTrigger/function.json/0
|
{
"file_path": "AzureTRE/airlock_processor/BlobCreatedTrigger/function.json",
"repo_id": "AzureTRE",
"token_count": 378
}
| 84 |
# RG
CORE_RESOURCE_GROUP_NAME = "rg-{}"
WORKSPACE_RESOURCE_GROUP_NAME = "rg-{}-ws-{}"
IMPORT_TYPE = "import"
EXPORT_TYPE = "export"
# Import
STORAGE_ACCOUNT_NAME_IMPORT_EXTERNAL = "stalimex"
STORAGE_ACCOUNT_NAME_IMPORT_INPROGRESS = "stalimip"
STORAGE_ACCOUNT_NAME_IMPORT_APPROVED = "stalimappws"
STORAGE_ACCOUNT_NAME_IMPORT_REJECTED = "stalimrej"
STORAGE_ACCOUNT_NAME_IMPORT_BLOCKED = "stalimblocked"
# Export
STORAGE_ACCOUNT_NAME_EXPORT_INTERNAL = "stalexintws"
STORAGE_ACCOUNT_NAME_EXPORT_INPROGRESS = "stalexipws"
STORAGE_ACCOUNT_NAME_EXPORT_APPROVED = "stalexapp"
STORAGE_ACCOUNT_NAME_EXPORT_REJECTED = "stalexrejws"
STORAGE_ACCOUNT_NAME_EXPORT_BLOCKED = "stalexblockedws"
# Stages
STAGE_DRAFT = "draft"
STAGE_SUBMITTED = "submitted"
STAGE_IN_REVIEW = "in_review"
STAGE_APPROVAL_INPROGRESS = "approval_in_progress"
STAGE_APPROVED = "approved"
STAGE_REJECTION_INPROGRESS = "rejection_in_progress"
STAGE_REJECTED = "rejected"
STAGE_CANCELLED = "cancelled"
STAGE_BLOCKING_INPROGRESS = "blocking_in_progress"
STAGE_BLOCKED_BY_SCAN = "blocked_by_scan"
STAGE_FAILED = "failed"
# Messages
NO_FILES_IN_REQUEST_MESSAGE = "Request did not contain any files."
TOO_MANY_FILES_IN_REQUEST_MESSAGE = "Request contained more than 1 file."
UNKNOWN_REASON_MESSAGE = "Request failed due to an unknown reason."
# Event Grid
STEP_RESULT_EVENT_DATA_VERSION = "1.0"
DATA_DELETION_EVENT_DATA_VERSION = "1.0"
NO_THREATS = "No threats found"
|
AzureTRE/airlock_processor/shared_code/constants.py/0
|
{
"file_path": "AzureTRE/airlock_processor/shared_code/constants.py",
"repo_id": "AzureTRE",
"token_count": 609
}
| 85 |
import asyncio
from fastapi import APIRouter, Depends, HTTPException, Header, status, Response
from jsonschema.exceptions import ValidationError
from db.repositories.operations import OperationRepository
from db.errors import DuplicateEntity, MajorVersionUpdateDenied, UserNotAuthorizedToUseTemplate, TargetTemplateVersionDoesNotExist, VersionDowngradeDenied
from api.helpers import get_repository
from api.dependencies.shared_services import get_shared_service_by_id_from_path, get_operation_by_id_from_path
from db.repositories.resource_templates import ResourceTemplateRepository
from db.repositories.resources_history import ResourceHistoryRepository
from db.repositories.shared_services import SharedServiceRepository
from models.domain.resource import ResourceType
from models.schemas.operation import OperationInList, OperationInResponse
from models.schemas.shared_service import RestrictedSharedServiceInResponse, RestrictedSharedServicesInList, SharedServiceInCreate, SharedServicesInList, SharedServiceInResponse
from models.schemas.resource import ResourceHistoryInList, ResourcePatch
from resources import strings
from .workspaces import save_and_deploy_resource, construct_location_header
from azure.cosmos.exceptions import CosmosAccessConditionFailedError
from .resource_helpers import enrich_resource_with_available_upgrades, send_custom_action_message, send_uninstall_message, send_resource_request_message
from services.authentication import get_current_admin_user, get_current_tre_user_or_tre_admin
from models.domain.request_action import RequestAction
from services.logging import logger
shared_services_router = APIRouter(dependencies=[Depends(get_current_tre_user_or_tre_admin)])
def user_is_tre_admin(user):
if "TREAdmin" in user.roles:
return True
return False
@shared_services_router.get("/shared-services", response_model=SharedServicesInList, name=strings.API_GET_ALL_SHARED_SERVICES, dependencies=[Depends(get_current_tre_user_or_tre_admin)])
async def retrieve_shared_services(shared_services_repo=Depends(get_repository(SharedServiceRepository)), user=Depends(get_current_tre_user_or_tre_admin), resource_template_repo=Depends(get_repository(ResourceTemplateRepository))) -> SharedServicesInList:
shared_services = await shared_services_repo.get_active_shared_services()
await asyncio.gather(*[enrich_resource_with_available_upgrades(shared_service, resource_template_repo) for shared_service in shared_services])
if user_is_tre_admin(user):
return SharedServicesInList(sharedServices=shared_services)
else:
return RestrictedSharedServicesInList(sharedServices=shared_services)
@shared_services_router.get("/shared-services/{shared_service_id}", response_model=SharedServiceInResponse, name=strings.API_GET_SHARED_SERVICE_BY_ID, dependencies=[Depends(get_current_tre_user_or_tre_admin), Depends(get_shared_service_by_id_from_path)])
async def retrieve_shared_service_by_id(shared_service=Depends(get_shared_service_by_id_from_path), user=Depends(get_current_tre_user_or_tre_admin), resource_template_repo=Depends(get_repository(ResourceTemplateRepository))):
await enrich_resource_with_available_upgrades(shared_service, resource_template_repo)
if user_is_tre_admin(user):
return SharedServiceInResponse(sharedService=shared_service)
else:
return RestrictedSharedServiceInResponse(sharedService=shared_service)
@shared_services_router.post("/shared-services", status_code=status.HTTP_202_ACCEPTED, response_model=OperationInResponse, name=strings.API_CREATE_SHARED_SERVICE, dependencies=[Depends(get_current_admin_user)])
async def create_shared_service(response: Response, shared_service_input: SharedServiceInCreate, user=Depends(get_current_admin_user), shared_services_repo=Depends(get_repository(SharedServiceRepository)), resource_template_repo=Depends(get_repository(ResourceTemplateRepository)), operations_repo=Depends(get_repository(OperationRepository)), resource_history_repo=Depends(get_repository(ResourceHistoryRepository))) -> OperationInResponse:
try:
shared_service, resource_template = await shared_services_repo.create_shared_service_item(shared_service_input, user.roles)
except (ValidationError, ValueError) as e:
logger.exception("Failed create shared service model instance")
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e))
except DuplicateEntity as e:
logger.exception("Shared service already exists")
raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail=str(e))
except UserNotAuthorizedToUseTemplate as e:
logger.exception("User not authorized to use template")
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=str(e))
operation = await save_and_deploy_resource(
resource=shared_service,
resource_repo=shared_services_repo,
operations_repo=operations_repo,
resource_template_repo=resource_template_repo,
resource_history_repo=resource_history_repo,
user=user,
resource_template=resource_template)
response.headers["Location"] = construct_location_header(operation)
return OperationInResponse(operation=operation)
@shared_services_router.patch("/shared-services/{shared_service_id}",
status_code=status.HTTP_202_ACCEPTED,
response_model=OperationInResponse,
name=strings.API_UPDATE_SHARED_SERVICE,
dependencies=[Depends(get_current_admin_user), Depends(get_shared_service_by_id_from_path)])
async def patch_shared_service(shared_service_patch: ResourcePatch, response: Response, user=Depends(get_current_admin_user), shared_service_repo=Depends(get_repository(SharedServiceRepository)), resource_history_repo=Depends(get_repository(ResourceHistoryRepository)), shared_service=Depends(get_shared_service_by_id_from_path), resource_template_repo=Depends(get_repository(ResourceTemplateRepository)), operations_repo=Depends(get_repository(OperationRepository)), etag: str = Header(...), force_version_update: bool = False) -> SharedServiceInResponse:
try:
patched_shared_service, _ = await shared_service_repo.patch_shared_service(shared_service, shared_service_patch, etag, resource_template_repo, resource_history_repo, user, force_version_update)
operation = await send_resource_request_message(
resource=patched_shared_service,
operations_repo=operations_repo,
resource_repo=shared_service_repo,
user=user,
resource_template_repo=resource_template_repo,
resource_history_repo=resource_history_repo,
action=RequestAction.Upgrade)
response.headers["Location"] = construct_location_header(operation)
return OperationInResponse(operation=operation)
except CosmosAccessConditionFailedError:
raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail=strings.ETAG_CONFLICT)
except ValidationError as v:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=v.message)
except (MajorVersionUpdateDenied, TargetTemplateVersionDoesNotExist, VersionDowngradeDenied) as e:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e))
@shared_services_router.delete("/shared-services/{shared_service_id}", response_model=OperationInResponse, name=strings.API_DELETE_SHARED_SERVICE, dependencies=[Depends(get_current_admin_user)])
async def delete_shared_service(response: Response, user=Depends(get_current_admin_user), shared_service=Depends(get_shared_service_by_id_from_path), operations_repo=Depends(get_repository(OperationRepository)), shared_service_repo=Depends(get_repository(SharedServiceRepository)), resource_template_repo=Depends(get_repository(ResourceTemplateRepository)), resource_history_repo=Depends(get_repository(ResourceHistoryRepository))) -> OperationInResponse:
if shared_service.isEnabled:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=strings.SHARED_SERVICE_NEEDS_TO_BE_DISABLED_BEFORE_DELETION)
operation = await send_uninstall_message(
resource=shared_service,
resource_repo=shared_service_repo,
operations_repo=operations_repo,
resource_type=ResourceType.SharedService,
resource_template_repo=resource_template_repo,
resource_history_repo=resource_history_repo,
user=user)
response.headers["Location"] = construct_location_header(operation)
return OperationInResponse(operation=operation)
@shared_services_router.post("/shared-services/{shared_service_id}/invoke-action", status_code=status.HTTP_202_ACCEPTED, response_model=OperationInResponse, name=strings.API_INVOKE_ACTION_ON_SHARED_SERVICE, dependencies=[Depends(get_current_admin_user)])
async def invoke_action_on_shared_service(response: Response, action: str, user=Depends(get_current_admin_user), shared_service=Depends(get_shared_service_by_id_from_path), resource_template_repo=Depends(get_repository(ResourceTemplateRepository)), operations_repo=Depends(get_repository(OperationRepository)), shared_service_repo=Depends(get_repository(SharedServiceRepository)), resource_history_repo=Depends(get_repository(ResourceHistoryRepository))) -> OperationInResponse:
operation = await send_custom_action_message(
resource=shared_service,
resource_repo=shared_service_repo,
custom_action=action,
resource_type=ResourceType.SharedService,
operations_repo=operations_repo,
resource_template_repo=resource_template_repo,
resource_history_repo=resource_history_repo,
user=user)
response.headers["Location"] = construct_location_header(operation)
return OperationInResponse(operation=operation)
# Shared service operations
@shared_services_router.get("/shared-services/{shared_service_id}/operations", response_model=OperationInList, name=strings.API_GET_RESOURCE_OPERATIONS, dependencies=[Depends(get_current_admin_user), Depends(get_shared_service_by_id_from_path)])
async def retrieve_shared_service_operations_by_shared_service_id(shared_service=Depends(get_shared_service_by_id_from_path), operations_repo=Depends(get_repository(OperationRepository))) -> OperationInList:
return OperationInList(operations=await operations_repo.get_operations_by_resource_id(resource_id=shared_service.id))
@shared_services_router.get("/shared-services/{shared_service_id}/operations/{operation_id}", response_model=OperationInResponse, name=strings.API_GET_RESOURCE_OPERATION_BY_ID, dependencies=[Depends(get_current_admin_user), Depends(get_shared_service_by_id_from_path)])
async def retrieve_shared_service_operation_by_shared_service_id_and_operation_id(shared_service=Depends(get_shared_service_by_id_from_path), operation=Depends(get_operation_by_id_from_path)) -> OperationInResponse:
return OperationInResponse(operation=operation)
# Shared service history
@shared_services_router.get("/shared-services/{shared_service_id}/history", response_model=ResourceHistoryInList, name=strings.API_GET_RESOURCE_HISTORY, dependencies=[Depends(get_current_admin_user)])
async def retrieve_shared_service_history_by_shared_service_id(shared_service=Depends(get_shared_service_by_id_from_path), resource_history_repo=Depends(get_repository(ResourceHistoryRepository))) -> ResourceHistoryInList:
return ResourceHistoryInList(resource_history=await resource_history_repo.get_resource_history_by_resource_id(resource_id=shared_service.id))
|
AzureTRE/api_app/api/routes/shared_services.py/0
|
{
"file_path": "AzureTRE/api_app/api/routes/shared_services.py",
"repo_id": "AzureTRE",
"token_count": 3867
}
| 86 |
import copy
import uuid
from datetime import datetime
from typing import List, Optional
from pydantic import UUID4
from azure.cosmos.exceptions import CosmosResourceNotFoundError, CosmosAccessConditionFailedError
from fastapi import HTTPException, status
from pydantic import parse_obj_as
from models.domain.authentication import User
from db.errors import EntityDoesNotExist
from models.domain.airlock_request import AirlockFile, AirlockRequest, AirlockRequestStatus, \
AirlockReview, AirlockReviewDecision, AirlockRequestHistoryItem, AirlockRequestType, AirlockReviewUserResource
from models.schemas.airlock_request import AirlockRequestInCreate, AirlockReviewInCreate
from core import config
from resources import strings
from db.repositories.base import BaseRepository
from services.logging import logger
class AirlockRequestRepository(BaseRepository):
@classmethod
async def create(cls):
cls = AirlockRequestRepository()
await super().create(config.STATE_STORE_AIRLOCK_REQUESTS_CONTAINER)
return cls
@staticmethod
def get_resource_base_spec_params():
return {"tre_id": config.TRE_ID}
def get_timestamp(self) -> float:
return datetime.utcnow().timestamp()
async def update_airlock_request_item(self, original_request: AirlockRequest, new_request: AirlockRequest, updated_by: User, request_properties: dict) -> AirlockRequest:
history_item = AirlockRequestHistoryItem(
resourceVersion=original_request.resourceVersion,
updatedWhen=original_request.updatedWhen,
updatedBy=original_request.updatedBy,
properties=request_properties
)
new_request.history.append(history_item)
# now update the request props
new_request.resourceVersion = new_request.resourceVersion + 1
new_request.updatedBy = updated_by
new_request.updatedWhen = self.get_timestamp()
await self.upsert_item_with_etag(new_request, new_request.etag)
return new_request
@staticmethod
def airlock_requests_query():
return 'SELECT * FROM c'
def validate_status_update(self, current_status: AirlockRequestStatus, new_status: AirlockRequestStatus):
# Cannot change status from approved
approved_condition = current_status != AirlockRequestStatus.Approved
# Cannot change status from rejected
rejected_condition = current_status != AirlockRequestStatus.Rejected
# Cannot change status from blocked
blocked_condition = current_status != AirlockRequestStatus.Blocked
# If approved-in-progress can only be changed to approved
approved_in_progress_condition = current_status == AirlockRequestStatus.ApprovalInProgress and new_status == AirlockRequestStatus.Approved
# If rejection-in-progress can only be changed to rejected
rejected_in_progress_condition = current_status == AirlockRequestStatus.RejectionInProgress and new_status == AirlockRequestStatus.Rejected
# If blocking-in-progress can only be changed to blocked
blocking_in_progress_condition = current_status == AirlockRequestStatus.BlockingInProgress and new_status == AirlockRequestStatus.Blocked
# If draft can only be changed to submitted
draft_condition = current_status == AirlockRequestStatus.Draft and new_status == AirlockRequestStatus.Submitted
# If submitted needs to get scanned first, which will change the status to either in-review or blocking-in-progress. but if scanner is disabled, it can go straight to in review
submit_condition = current_status == AirlockRequestStatus.Submitted and (new_status == AirlockRequestStatus.InReview or new_status == AirlockRequestStatus.BlockingInProgress)
# If in review can only be changed to either approve in progress or rejected in progress
in_review_condition = current_status == AirlockRequestStatus.InReview and (new_status == AirlockRequestStatus.ApprovalInProgress or new_status == AirlockRequestStatus.RejectionInProgress)
# Cancel is allowed only if the request is not actively changing, i.e. it is currently in draft or in review
cancel_condition = (current_status == AirlockRequestStatus.Draft or current_status == AirlockRequestStatus.InReview) and new_status == AirlockRequestStatus.Cancelled
# Failed is allowed from any non-final status
failed_condition = (current_status == AirlockRequestStatus.Draft
or current_status == AirlockRequestStatus.Submitted
or current_status == AirlockRequestStatus.InReview
or current_status == AirlockRequestStatus.ApprovalInProgress
or current_status == AirlockRequestStatus.RejectionInProgress
or current_status == AirlockRequestStatus.BlockingInProgress) and new_status == AirlockRequestStatus.Failed
return approved_condition and rejected_condition and blocked_condition and (approved_in_progress_condition or rejected_in_progress_condition or blocking_in_progress_condition or draft_condition or submit_condition or in_review_condition or cancel_condition or failed_condition)
def create_airlock_request_item(self, airlock_request_input: AirlockRequestInCreate, workspace_id: str, user) -> AirlockRequest:
full_airlock_request_id = str(uuid.uuid4())
resource_spec_parameters = {**self.get_airlock_request_spec_params()}
airlock_request = AirlockRequest(
id=full_airlock_request_id,
workspaceId=workspace_id,
title=airlock_request_input.title,
businessJustification=airlock_request_input.businessJustification,
type=airlock_request_input.type,
createdBy=user,
createdWhen=datetime.utcnow().timestamp(),
updatedBy=user,
updatedWhen=datetime.utcnow().timestamp(),
properties=resource_spec_parameters,
reviews=[]
)
return airlock_request
async def get_airlock_requests(self, workspace_id: str, creator_user_id: Optional[str] = None, type: Optional[AirlockRequestType] = None, status: Optional[AirlockRequestStatus] = None, order_by: Optional[str] = None, order_ascending=True) -> List[AirlockRequest]:
query = self.airlock_requests_query() + f' WHERE c.workspaceId = "{workspace_id}"'
# optional filters
if creator_user_id:
query += ' AND c.createdBy.id=@user_id'
if status:
query += ' AND c.status=@status'
if type:
query += ' AND c.type=@type'
# optional sorting
if order_by:
query += ' ORDER BY c.' + order_by
query += ' ASC' if order_ascending else ' DESC'
parameters = [
{"name": "@user_id", "value": creator_user_id},
{"name": "@status", "value": status},
{"name": "@type", "value": type},
]
airlock_requests = await self.query(query=query, parameters=parameters)
return parse_obj_as(List[AirlockRequest], airlock_requests)
async def get_airlock_request_by_id(self, airlock_request_id: UUID4) -> AirlockRequest:
try:
airlock_requests = await self.read_item_by_id(str(airlock_request_id))
except CosmosResourceNotFoundError:
raise EntityDoesNotExist
return parse_obj_as(AirlockRequest, airlock_requests)
async def update_airlock_request(
self,
original_request: AirlockRequest,
updated_by: User,
new_status: Optional[AirlockRequestStatus] = None,
request_files: Optional[List[AirlockFile]] = None,
status_message: Optional[str] = None,
airlock_review: Optional[AirlockReview] = None,
review_user_resource: Optional[AirlockReviewUserResource] = None) -> AirlockRequest:
updated_request = self._build_updated_request(
original_request=original_request,
new_status=new_status,
request_files=request_files,
status_message=status_message,
airlock_review=airlock_review,
review_user_resource=review_user_resource,
updated_by=updated_by)
try:
db_response = await self.update_airlock_request_item(original_request, updated_request, updated_by, {"previousStatus": original_request.status})
except CosmosAccessConditionFailedError:
logger.warning(f"ETag mismatch for request ID: '{original_request.id}'. Retrying.")
original_request = await self.get_airlock_request_by_id(original_request.id)
updated_request = self._build_updated_request(original_request=original_request, new_status=new_status, request_files=request_files, status_message=status_message, airlock_review=airlock_review)
db_response = await self.update_airlock_request_item(original_request, updated_request, updated_by, {"previousStatus": original_request.status})
return db_response
def get_airlock_request_spec_params(self):
return self.get_resource_base_spec_params()
def create_airlock_review_item(self, airlock_review_input: AirlockReviewInCreate, reviewer: User) -> AirlockReview:
full_airlock_review_id = str(uuid.uuid4())
airlock_review_decision_from_bool = AirlockReviewDecision.Approved if airlock_review_input.approval else AirlockReviewDecision.Rejected
airlock_review = AirlockReview(
id=full_airlock_review_id,
dateCreated=self.get_timestamp(),
reviewDecision=airlock_review_decision_from_bool,
decisionExplanation=airlock_review_input.decisionExplanation,
reviewer=reviewer
)
return airlock_review
def _build_updated_request(
self,
original_request: AirlockRequest,
new_status: Optional[AirlockRequestStatus] = None,
request_files: Optional[List[AirlockFile]] = None,
status_message: Optional[Optional[str]] = None,
airlock_review: Optional[AirlockReview] = None,
review_user_resource: Optional[AirlockReviewUserResource] = None,
updated_by: Optional[User] = None) -> AirlockRequest:
updated_request = copy.deepcopy(original_request)
if new_status is not None:
self._validate_status_update(current_status=original_request.status, new_status=new_status)
updated_request.status = new_status
if status_message is not None:
updated_request.statusMessage = status_message
if request_files is not None:
updated_request.files = request_files
if airlock_review is not None:
if updated_request.reviews is None:
updated_request.reviews = [airlock_review]
else:
updated_request.reviews.append(airlock_review)
if review_user_resource is not None and updated_by is not None:
updated_request.reviewUserResources[updated_by.id] = review_user_resource
return updated_request
def _validate_status_update(self, current_status, new_status):
if not self.validate_status_update(current_status=current_status, new_status=new_status):
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=strings.AIRLOCK_REQUEST_ILLEGAL_STATUS_CHANGE)
|
AzureTRE/api_app/db/repositories/airlock_requests.py/0
|
{
"file_path": "AzureTRE/api_app/db/repositories/airlock_requests.py",
"repo_id": "AzureTRE",
"token_count": 4302
}
| 87 |
from pydantic import Field
from models.domain.resource import Resource, ResourceType
class WorkspaceService(Resource):
"""
Workspace service request
"""
workspaceId: str = Field("", title="Workspace ID", description="Service target Workspace id")
resourceType = ResourceType.WorkspaceService
|
AzureTRE/api_app/models/domain/workspace_service.py/0
|
{
"file_path": "AzureTRE/api_app/models/domain/workspace_service.py",
"repo_id": "AzureTRE",
"token_count": 86
}
| 88 |
from enum import Enum
from typing import List
from pydantic import BaseModel, Field
from models.domain.resource import ResourceType
from models.domain.workspace import Workspace, WorkspaceAuth
def get_sample_workspace(workspace_id: str, spec_workspace_id: str = "0001") -> dict:
return {
"id": workspace_id,
"templateName": "tre-workspace-base",
"templateVersion": "0.1.0",
"properties": {
"azure_location": "westeurope",
"workspace_id": spec_workspace_id,
"tre_id": "mytre-dev-1234",
"address_space_size": "small",
},
"resourceType": ResourceType.Workspace,
"workspaceURL": ""
}
class AuthProvider(str, Enum):
"""
Auth Provider
"""
AAD = "AAD"
class AuthenticationConfiguration(BaseModel):
provider: AuthProvider = Field(AuthProvider.AAD, title="Authentication Provider")
data: dict = Field({}, title="Authentication information")
class WorkspaceInResponse(BaseModel):
workspace: Workspace
class Config:
schema_extra = {
"example": {
"workspace": get_sample_workspace("933ad738-7265-4b5f-9eae-a1a62928772e")
}
}
class WorkspaceAuthInResponse(BaseModel):
workspaceAuth: WorkspaceAuth
class Config:
schema_extra = {
"example": {
"scopeId": "api://mytre-ws-1233456"
}
}
class WorkspacesInList(BaseModel):
workspaces: List[Workspace]
class Config:
schema_extra = {
"example": {
"workspaces": [
get_sample_workspace("933ad738-7265-4b5f-9eae-a1a62928772e", "0001"),
get_sample_workspace("2fdc9fba-726e-4db6-a1b8-9018a2165748", "0002"),
]
}
}
class WorkspaceInCreate(BaseModel):
templateName: str = Field(title="Workspace type", description="Bundle name")
properties: dict = Field({}, title="Workspace parameters", description="Values for the parameters required by the workspace resource specification")
class Config:
schema_extra = {
"example": {
"templateName": "tre-workspace-base",
"properties": {
"display_name": "the workspace display name",
"description": "workspace description",
"auth_type": "Manual",
"client_id": "<WORKSPACE_CLIENT_ID>",
"client_secret": "<WORKSPACE_CLIENT_SECRET>",
"address_space_size": "small"
}
}
}
|
AzureTRE/api_app/models/schemas/workspace.py/0
|
{
"file_path": "AzureTRE/api_app/models/schemas/workspace.py",
"repo_id": "AzureTRE",
"token_count": 1233
}
| 89 |
import json
from pathlib import Path
from typing import List, Dict, Tuple
def get_system_properties(id_field: str = "workspace_id"):
return {
"tre_id": {
"type": "string"
},
id_field: {
"type": "string"
},
"azure_location": {
"type": "string"
}
}
def merge_required(all_required):
required_lists = [prop_list for prop_list in all_required]
flattened_required = [prop for prop_list in required_lists for prop in prop_list]
return list(set(flattened_required))
def merge_properties(all_properties: List[Dict]) -> Dict:
properties = {}
for prop in all_properties:
properties.update(prop)
return properties
def read_schema(schema_file: str) -> Tuple[List[str], Dict]:
workspace_schema_def = Path(__file__).parent / ".." / "schemas" / schema_file
with open(workspace_schema_def) as schema_f:
schema = json.load(schema_f)
return schema["required"], schema["properties"]
def enrich_template(original_template, extra_properties, is_update: bool = False, is_workspace_scope: bool = True) -> dict:
template = original_template.dict(exclude_none=True)
all_required = [definition[0] for definition in extra_properties] + [template["required"]]
all_properties = [definition[1] for definition in extra_properties] + [template["properties"]]
template["required"] = merge_required(all_required)
template["properties"] = merge_properties(all_properties)
# if this is an update, mark the non-updateable properties as readOnly
# this will help the UI render fields appropriately and know what it can send in a PATCH
if is_update:
for prop in template["properties"].values():
if not prop.get("updateable", False):
prop["readOnly"] = True
if "allOf" in template:
for conditional_property in template["allOf"]:
for condition in ["then", "else"]:
if condition in conditional_property and "properties" in conditional_property[condition]:
for prop in conditional_property[condition]["properties"].values():
if not prop.get("updateable", False):
prop["readOnly"] = True
# if there is an 'allOf' property which is empty, the validator fails - so remove the key
if "allOf" in template and template["allOf"] is None:
template.pop("allOf")
if is_workspace_scope:
id_field = "workspace_id"
else:
id_field = "shared_service_id"
template["system_properties"] = get_system_properties(id_field)
return template
def enrich_workspace_template(template, is_update: bool = False) -> dict:
"""Adds to the provided template all UI and system properties
Args:
template: [Template to which UI and system properties are added].
is_update: [Indicates that the schema is to be used in an update (PATCH) operation]
Returns:
[Dict]: [Enriched template with all required and system properties added]
"""
workspace_default_properties = read_schema('workspace.json')
azure_ad_properties = read_schema('azuread.json')
return enrich_template(template, [workspace_default_properties, azure_ad_properties], is_update=is_update)
def enrich_workspace_service_template(template, is_update: bool = False) -> dict:
"""Adds to the provided template all UI and system properties
Args:
template: [Template to which UI and system properties are added].
is_update: [Indicates that the schema is to be used in an update (PATCH) operation]
Returns:
[Dict]: [Enriched template with all required and system properties added]
"""
workspace_service_default_properties = read_schema('workspace_service.json')
return enrich_template(template, [workspace_service_default_properties], is_update=is_update)
def enrich_shared_service_template(template, is_update: bool = False) -> dict:
"""Adds to the provided template all UI and system properties
Args:
template: [Template to which UI and system properties are added].
Returns:
[Dict]: [Enriched template with all required and system properties added]
"""
shared_service_default_properties = read_schema('shared_service.json')
return enrich_template(template, [shared_service_default_properties], is_update=is_update, is_workspace_scope=False)
def enrich_user_resource_template(template, is_update: bool = False):
"""Adds to the provided template all UI and system properties
Args:
template: [Template to which UI and system properties are added].
is_update: [Indicates that the schema is to be used in an update (PATCH) operation]
Returns:
[Dict]: [Enriched template with all required and system properties added]
"""
user_resource_default_properties = read_schema('user_resource.json')
return enrich_template(template, [user_resource_default_properties], is_update=is_update)
|
AzureTRE/api_app/services/schema_service.py/0
|
{
"file_path": "AzureTRE/api_app/services/schema_service.py",
"repo_id": "AzureTRE",
"token_count": 1782
}
| 90 |
import datetime
from unittest.mock import AsyncMock
import uuid
import pytest
import pytest_asyncio
from mock import patch
import json
from fastapi import HTTPException, status
from api.routes.resource_helpers import save_and_deploy_resource, send_uninstall_message, mask_sensitive_properties, enrich_resource_with_available_upgrades
from db.repositories.resources_history import ResourceHistoryRepository
from tests_ma.test_api.conftest import create_test_user
from resources import strings
from db.repositories.resources import ResourceRepository
from db.repositories.operations import OperationRepository
from models.domain.operation import Status, Operation, OperationStep
from models.domain.resource import AvailableUpgrade, RequestAction, ResourceType
from models.domain.workspace import Workspace
WORKSPACE_ID = '933ad738-7265-4b5f-9eae-a1a62928772e'
FAKE_CREATE_TIME = datetime.datetime(2021, 1, 1, 17, 5, 55)
FAKE_CREATE_TIMESTAMP: float = FAKE_CREATE_TIME.timestamp()
FAKE_UPDATE_TIME = datetime.datetime(2022, 1, 1, 17, 5, 55)
FAKE_UPDATE_TIMESTAMP: float = FAKE_UPDATE_TIME.timestamp()
@pytest_asyncio.fixture
async def resource_repo() -> ResourceRepository:
with patch('api.dependencies.database.Database.get_container_proxy', return_value=AsyncMock()):
resource_repo_mock = await ResourceRepository().create()
yield resource_repo_mock
@pytest_asyncio.fixture
async def operations_repo() -> OperationRepository:
operation_repo_mock = await OperationRepository().create()
yield operation_repo_mock
@pytest_asyncio.fixture
async def resource_history_repo() -> ResourceHistoryRepository:
resource_history_repo_mock = await ResourceHistoryRepository().create()
yield resource_history_repo_mock
def sample_resource(workspace_id=WORKSPACE_ID):
return Workspace(
id=workspace_id,
templateName="tre-workspace-base",
templateVersion="0.1.0",
etag="",
properties={
"client_id": "12345"
},
resourcePath=f'/workspaces/{workspace_id}',
user=create_test_user(),
updatedWhen=FAKE_CREATE_TIMESTAMP
)
def sample_resource_with_secret():
return Workspace(
id=WORKSPACE_ID,
templateName="tre-workspace-base",
templateVersion="0.1.0",
etag="",
properties={
"client_id": "12345",
"secret": "iamsecret",
"prop_with_nested_secret": {
"nested_secret": "iamanestedsecret"
}
},
resourcePath=f'/workspaces/{WORKSPACE_ID}',
user=create_test_user(),
updatedWhen=FAKE_CREATE_TIMESTAMP
)
def sample_resource_operation(resource_id: str, operation_id: str):
operation = Operation(
id=operation_id,
resourceId=resource_id,
resourcePath=f'/workspaces/{resource_id}',
resourceVersion=0,
action="install",
message="test",
Status=Status.Deployed,
createdWhen=FAKE_CREATE_TIMESTAMP,
updatedWhen=FAKE_CREATE_TIMESTAMP,
user=create_test_user(),
steps=[
OperationStep(
id="random-uuid-1",
templateStepId="main",
stepTitle="Main step for resource-id",
resourceAction="install",
resourceType=ResourceType.Workspace,
resourceTemplateName="template1",
resourceId=resource_id,
updatedWhen=FAKE_CREATE_TIMESTAMP,
sourceTemplateResourceId=resource_id
)
]
)
return operation
class TestResourceHelpers:
@patch("api.routes.workspaces.ResourceTemplateRepository")
@patch("api.routes.resource_helpers.send_resource_request_message")
@pytest.mark.asyncio
async def test_save_and_deploy_resource_saves_item(self, _, resource_template_repo, resource_repo, operations_repo, basic_resource_template, resource_history_repo):
resource = sample_resource()
operation = sample_resource_operation(resource_id=resource.id, operation_id=str(uuid.uuid4()))
resource_repo.save_item = AsyncMock(return_value=None)
operations_repo.create_operation_item = AsyncMock(return_value=operation)
await save_and_deploy_resource(
resource=resource,
resource_repo=resource_repo,
operations_repo=operations_repo,
resource_template_repo=resource_template_repo,
resource_history_repo=resource_history_repo,
user=create_test_user(),
resource_template=basic_resource_template)
resource_repo.save_item.assert_called_once_with(resource)
@patch("api.routes.workspaces.ResourceTemplateRepository")
@pytest.mark.asyncio
async def test_save_and_deploy_resource_raises_503_if_save_to_db_fails(self, resource_template_repo, resource_repo, operations_repo, basic_resource_template, resource_history_repo):
resource = sample_resource()
resource_repo.save_item = AsyncMock(side_effect=Exception)
with pytest.raises(HTTPException) as ex:
await save_and_deploy_resource(
resource=resource,
resource_repo=resource_repo,
operations_repo=operations_repo,
resource_template_repo=resource_template_repo,
resource_history_repo=resource_history_repo,
user=create_test_user(),
resource_template=basic_resource_template)
assert ex.value.status_code == status.HTTP_503_SERVICE_UNAVAILABLE
@patch("api.routes.workspaces.ResourceTemplateRepository")
@patch("api.routes.resource_helpers.send_resource_request_message", return_value=None)
@pytest.mark.asyncio
async def test_save_and_deploy_resource_sends_resource_request_message(self, send_resource_request_mock, resource_template_repo, resource_repo, operations_repo, basic_resource_template, resource_history_repo):
resource = sample_resource()
operation = sample_resource_operation(resource_id=resource.id, operation_id=str(uuid.uuid4()))
resource_repo.save_item = AsyncMock(return_value=None)
operations_repo.create_operations_item = AsyncMock(return_value=operation)
user = create_test_user()
await save_and_deploy_resource(
resource=resource,
resource_repo=resource_repo,
operations_repo=operations_repo,
resource_template_repo=resource_template_repo,
resource_history_repo=resource_history_repo,
user=create_test_user(),
resource_template=basic_resource_template)
send_resource_request_mock.assert_called_once_with(
resource=resource,
operations_repo=operations_repo,
resource_repo=resource_repo,
user=user,
resource_template_repo=resource_template_repo,
resource_history_repo=resource_history_repo,
action=RequestAction.Install)
@patch("api.routes.workspaces.ResourceTemplateRepository")
@patch("api.routes.resource_helpers.send_resource_request_message", side_effect=Exception)
@pytest.mark.asyncio
async def test_save_and_deploy_resource_raises_503_if_send_request_fails(self, _, resource_template_repo, resource_repo, operations_repo, basic_resource_template, resource_history_repo):
resource = sample_resource()
resource_repo.save_item = AsyncMock(return_value=None)
resource_repo.delete_item = AsyncMock(return_value=None)
with pytest.raises(HTTPException) as ex:
await save_and_deploy_resource(
resource=resource,
resource_repo=resource_repo,
operations_repo=operations_repo,
resource_template_repo=resource_template_repo,
resource_history_repo=resource_history_repo,
user=create_test_user(),
resource_template=basic_resource_template)
assert ex.value.status_code == status.HTTP_503_SERVICE_UNAVAILABLE
@patch("api.routes.workspaces.ResourceTemplateRepository")
@patch("api.routes.resource_helpers.send_resource_request_message", side_effect=Exception)
@pytest.mark.asyncio
async def test_save_and_deploy_resource_deletes_item_from_db_if_send_request_fails(self, _, resource_template_repo, resource_repo, operations_repo, basic_resource_template, resource_history_repo):
resource = sample_resource()
resource_repo.save_item = AsyncMock(return_value=None)
resource_repo.delete_item = AsyncMock(return_value=None)
operations_repo.create_operation_item = AsyncMock(return_value=None)
with pytest.raises(HTTPException):
await save_and_deploy_resource(
resource=resource,
resource_repo=resource_repo,
operations_repo=operations_repo,
resource_template_repo=resource_template_repo,
resource_history_repo=resource_history_repo,
user=create_test_user(),
resource_template=basic_resource_template)
resource_repo.delete_item.assert_called_once_with(resource.id)
@patch("api.routes.workspaces.ResourceTemplateRepository")
@patch("api.routes.resource_helpers.send_resource_request_message", return_value=None)
@patch("api.routes.workspaces.OperationRepository")
@pytest.mark.asyncio
async def test_send_uninstall_message_sends_uninstall_message(self, operations_repo, send_request_mock, resource_template_repo, resource_repo, resource_history_repo):
resource = sample_resource()
user = create_test_user()
await send_uninstall_message(
resource=resource,
resource_repo=resource_repo,
operations_repo=operations_repo,
resource_type=ResourceType.Workspace,
resource_template_repo=resource_template_repo,
resource_history_repo=resource_history_repo,
user=user)
send_request_mock.assert_called_once_with(
resource=resource,
operations_repo=operations_repo,
resource_repo=resource_repo,
user=user,
resource_template_repo=resource_template_repo,
resource_history_repo=resource_history_repo,
action=RequestAction.UnInstall,
is_cascade=False)
@patch("api.routes.workspaces.ResourceTemplateRepository")
@patch("api.routes.resource_helpers.send_resource_request_message", side_effect=Exception)
@patch("api.routes.workspaces.OperationRepository")
@pytest.mark.asyncio
async def test_send_uninstall_message_raises_503_on_service_bus_exception(self, operations_repo, _, resource_template_repo, resource_repo, basic_resource_template, resource_history_repo):
with pytest.raises(HTTPException) as ex:
await send_uninstall_message(
resource=sample_resource(),
resource_repo=resource_repo,
operations_repo=operations_repo,
resource_type=ResourceType.Workspace,
resource_template_repo=resource_template_repo,
resource_history_repo=resource_history_repo,
user=create_test_user())
assert ex.value.status_code == status.HTTP_503_SERVICE_UNAVAILABLE
@patch("api.routes.workspaces.ResourceTemplateRepository")
@patch("service_bus.resource_request_sender.send_deployment_message")
@pytest.mark.asyncio
async def test_save_and_deploy_masks_secrets(self, send_deployment_message_mock, resource_template_repo, resource_repo, operations_repo, basic_resource_template, resource_history_repo):
resource = sample_resource_with_secret()
step_id = "random-uuid-1"
operation_id = str(uuid.uuid4())
operation = sample_resource_operation(resource_id=resource.id, operation_id=operation_id)
resource_repo.save_item = AsyncMock(return_value=None)
resource_repo.get_resource_by_id = AsyncMock(return_value=resource)
operations_repo.create_operation_item = AsyncMock(return_value=operation)
resource_template_repo.get_template_by_name_and_version = AsyncMock(return_value=basic_resource_template)
user = create_test_user()
await save_and_deploy_resource(
resource=resource,
resource_repo=resource_repo,
operations_repo=operations_repo,
resource_template_repo=resource_template_repo,
resource_history_repo=resource_history_repo,
user=user,
resource_template=basic_resource_template)
# Checking that the resource sent to ServiceBus was the same as the one created
send_deployment_message_mock.assert_called_once_with(
content=json.dumps(resource.get_resource_request_message_payload(operation_id=operation_id, step_id=step_id, action="install")),
correlation_id=operation_id,
session_id=resource.id,
action="install")
# Checking that the item saved had a secret redacted
resource.properties["secret"] = strings.REDACTED_SENSITIVE_VALUE
resource.properties["prop_with_nested_secret"]["nested_secret"] = strings.REDACTED_SENSITIVE_VALUE
resource_repo.save_item.assert_called_once_with(resource)
@patch("api.routes.workspaces.ResourceTemplateRepository")
@pytest.mark.asyncio
async def test_enrich_resource_with_available_upgrades_when_there_are_new_upgrades_returns_relevant_upgrades_only(self, resource_template_repo):
resource_template_repo.get_all_template_versions = AsyncMock(return_value=['0.1.0', '0.1.2', '1.0.0', '1.0.1'])
resource = sample_resource()
await enrich_resource_with_available_upgrades(resource, resource_template_repo)
assert resource.availableUpgrades == [AvailableUpgrade(version='0.1.2', forceUpdateRequired=False),
AvailableUpgrade(version='1.0.0', forceUpdateRequired=True),
AvailableUpgrade(version='1.0.1', forceUpdateRequired=True)]
@patch("api.routes.workspaces.ResourceTemplateRepository")
@pytest.mark.asyncio
async def test_enrich_resource_with_available_upgrades_when_there_are_no_upgrades_returns_empty_list(self, resource_template_repo):
resource_template_repo.get_all_template_versions = AsyncMock(return_value=['0.1.0'])
resource = sample_resource()
await enrich_resource_with_available_upgrades(resource, resource_template_repo)
assert resource.availableUpgrades == []
def test_sensitive_properties_get_masked(self, basic_resource_template):
resource = sample_resource_with_secret()
properties = resource.properties
masked_resource = mask_sensitive_properties(properties, basic_resource_template)
assert masked_resource["client_id"] == "12345"
assert masked_resource["secret"] == strings.REDACTED_SENSITIVE_VALUE
assert masked_resource["prop_with_nested_secret"]["nested_secret"] == strings.REDACTED_SENSITIVE_VALUE
|
AzureTRE/api_app/tests_ma/test_api/test_routes/test_resource_helpers.py/0
|
{
"file_path": "AzureTRE/api_app/tests_ma/test_api/test_routes/test_resource_helpers.py",
"repo_id": "AzureTRE",
"token_count": 6314
}
| 91 |
from unittest.mock import AsyncMock
from mock import patch
import pytest
import pytest_asyncio
from db.repositories.resources_history import ResourceHistoryRepository
from models.domain.resource import Resource, ResourceHistoryItem, ResourceType
from tests_ma.test_api.test_routes.test_resource_helpers import FAKE_CREATE_TIMESTAMP
from tests_ma.test_api.conftest import create_test_user
HISTORY_ID = "59676d53-5356-45b1-981a-180c0b089839"
RESOURCE_ID = "178c1ffe-de57-495b-b1eb-9bc37d3c5087"
USER_ID = "e5accc9a-3961-4da9-b5ee-1bc8a406388b"
RESOURCE_VERSION = 1
@pytest_asyncio.fixture
async def resource_history_repo():
with patch('api.dependencies.database.Database.get_container_proxy', return_value=None):
resource_history_repo = await ResourceHistoryRepository().create()
yield resource_history_repo
@pytest.fixture
def sample_resource() -> Resource:
return Resource(
id=RESOURCE_ID,
isEnabled=True,
resourcePath="/resource/path",
templateName="template_name",
templateVersion="template_version",
properties={
'display_name': 'initial display name',
'description': 'initial description',
'computed_prop': 'computed_val'
},
resourceType=ResourceType.Workspace,
etag="some-etag-value",
resourceVersion=RESOURCE_VERSION,
updatedWhen=FAKE_CREATE_TIMESTAMP,
user=create_test_user()
)
@pytest.fixture
def sample_resource_history() -> ResourceHistoryItem:
return ResourceHistoryItem(
id=HISTORY_ID,
resourceId=RESOURCE_ID,
isEnabled=True,
resourceVersion=RESOURCE_VERSION,
templateVersion="template_version",
properties={
'display_name': 'initial display name',
'description': 'initial description',
'computed_prop': 'computed_val'
},
updatedWhen=FAKE_CREATE_TIMESTAMP,
user=create_test_user()
)
@pytest.mark.asyncio
@patch('db.repositories.resources_history.ResourceHistoryRepository.save_item', return_value=AsyncMock())
async def test_create_resource_history_item(mock_save, resource_history_repo, sample_resource):
resource_history = await resource_history_repo.create_resource_history_item(sample_resource)
# Assertions
assert isinstance(resource_history, ResourceHistoryItem)
mock_save.assert_called_once_with(resource_history)
assert resource_history.id is not None
assert resource_history.resourceId == sample_resource.id
assert resource_history.isEnabled is True
assert resource_history.properties == sample_resource.properties
assert resource_history.resourceVersion == sample_resource.resourceVersion
assert resource_history.updatedWhen == sample_resource.updatedWhen
assert resource_history.user == sample_resource.user
assert resource_history.templateVersion == sample_resource.templateVersion
@pytest.mark.asyncio
@patch('db.repositories.resources_history.ResourceHistoryRepository.save_item', side_effect=Exception)
async def test_create_resource_history_item_throws_error_when_saving(mock_save, resource_history_repo, sample_resource):
with pytest.raises(Exception):
resource_history = await resource_history_repo.create_resource_history_item(sample_resource)
assert mock_save.called
assert resource_history.id is not None
assert resource_history.resourceId == sample_resource.id
assert resource_history.isEnabled is True
assert resource_history.properties == sample_resource.properties
assert resource_history.resourceVersion == sample_resource.resourceVersion
assert resource_history.updatedWhen == sample_resource.updatedWhen
assert resource_history.user == sample_resource.user
assert resource_history.templateVersion == sample_resource.templateVersion
@pytest.mark.asyncio
@patch('db.repositories.resources_history.ResourceHistoryRepository.query')
async def test_get_resource_history_by_resource_id_if_found(mock_query, resource_history_repo, sample_resource_history):
mock_query.return_value = [sample_resource_history]
result = await resource_history_repo.get_resource_history_by_resource_id(RESOURCE_ID)
assert result == mock_query.return_value
@pytest.mark.asyncio
@patch('db.repositories.resources_history.ResourceHistoryRepository.query')
async def test_get_resource_history_by_resource_id_if_not_found(mock_query, resource_history_repo):
mock_query.return_value = []
result = await resource_history_repo.get_resource_history_by_resource_id(RESOURCE_ID)
assert result == mock_query.return_value
|
AzureTRE/api_app/tests_ma/test_db/test_repositories/test_resource_history_repository.py/0
|
{
"file_path": "AzureTRE/api_app/tests_ma/test_db/test_repositories/test_resource_history_repository.py",
"repo_id": "AzureTRE",
"token_count": 1650
}
| 92 |
[flake8]
ignore = E501,W503
|
AzureTRE/cli/.flake8/0
|
{
"file_path": "AzureTRE/cli/.flake8",
"repo_id": "AzureTRE",
"token_count": 13
}
| 93 |
import sys
import click
import logging
from tre.api_client import ApiClient
from tre.output import output, output_option, query_option
@click.command(name="migrations", help="Run migrations")
@output_option()
@query_option()
def migrations(output_format, query) -> None:
log = logging.getLogger(__name__)
client = ApiClient.get_api_client_from_config()
response = client.call_api(log, 'POST', '/api/migrations')
output(
response,
output_format=output_format,
query=query,
default_table_query="migrations")
if not response.is_success:
sys.exit(1)
|
AzureTRE/cli/tre/commands/migrations.py/0
|
{
"file_path": "AzureTRE/cli/tre/commands/migrations.py",
"repo_id": "AzureTRE",
"token_count": 229
}
| 94 |
import logging
import click
from tre.api_client import ApiClient
from tre.output import output, output_option, query_option
from .contexts import UserResourceTemplateContext, pass_user_resource_template_context
def template_name_completion(ctx: click.Context, param: click.Parameter, incomplete: str):
log = logging.getLogger(__name__)
parent_ctx = ctx.parent
workspace_service_name = parent_ctx.params["template_name"]
client = ApiClient.get_api_client_from_config()
response = client.call_api(log, 'GET', f'/api/workspace-service-templates/{workspace_service_name}/user-resource-templates')
if response.is_success:
names = [template["name"] for template in response.json()["templates"]]
return [name for name in names if name.startswith(incomplete)]
@click.group(name="user-resource-template", invoke_without_command=True, help="Perform actions on an user-resource-template")
@click.argument('template_name', required=True, shell_complete=template_name_completion)
@click.pass_context
def user_resource_template(ctx: click.Context, template_name) -> None:
ctx.obj = UserResourceTemplateContext.add_template_name_to_context_obj(ctx, template_name)
@click.command(name="show", help="Show template")
@output_option()
@query_option()
@pass_user_resource_template_context
def user_resource_template_show(user_resource_template_context: UserResourceTemplateContext, output_format, query) -> None:
log = logging.getLogger(__name__)
workspace_service_name = user_resource_template_context.workspace_service_name
if workspace_service_name is None:
raise click.UsageError('Missing workspace service name')
template_name = user_resource_template_context.template_name
if template_name is None:
raise click.UsageError('Missing template name')
client = ApiClient.get_api_client_from_config()
response = client.call_api(
log,
'GET',
f'/api/workspace-service-templates/{workspace_service_name}/user-resource-templates/{template_name}',
)
output(response, output_format=output_format, query=query, default_table_query=r"{id: id, name:name, title: title, version:version, description:description}")
user_resource_template.add_command(user_resource_template_show)
|
AzureTRE/cli/tre/commands/workspace_service_templates/user_resource_templates/user_resource_template.py/0
|
{
"file_path": "AzureTRE/cli/tre/commands/workspace_service_templates/user_resource_templates/user_resource_template.py",
"repo_id": "AzureTRE",
"token_count": 737
}
| 95 |
import json
import sys
import click
import logging
from tre.api_client import ApiClient
from tre.commands.operation import default_operation_table_query_single, operation_show
from tre.output import output, output_option, query_option
from .contexts import pass_workspace_context, WorkspaceContext
from .operation import workspace_operation
from .operations import workspace_operations
from .workspace_services.workspace_service import workspace_service
from .workspace_services.workspace_services import workspace_services
from .airlock.requests import airlocks
from .airlock.request import airlock
def workspace_id_completion(ctx: click.Context, param: click.Parameter, incomplete: str):
log = logging.getLogger(__name__)
client = ApiClient.get_api_client_from_config()
response = client.call_api(log, 'GET', '/api/workspaces')
if response.is_success:
ids = [workspace["id"] for workspace in response.json()["workspaces"]]
return [id for id in ids if id.startswith(incomplete)]
@click.group(invoke_without_command=True, help="Perform actions on an individual workspace")
@click.argument('workspace_id', envvar='TRECLI_WORKSPACE_ID', type=click.UUID, required=True, shell_complete=workspace_id_completion)
@click.pass_context
def workspace(ctx: click.Context, workspace_id: str) -> None:
ctx.obj = WorkspaceContext(workspace_id)
@click.command(name="show", help="Show a workspace")
@output_option()
@query_option()
@pass_workspace_context
def workspace_show(workspace_context: WorkspaceContext, output_format, query):
log = logging.getLogger(__name__)
workspace_id = workspace_context.workspace_id
if workspace_id is None:
raise click.UsageError('Missing workspace ID')
client = ApiClient.get_api_client_from_config()
response = client.call_api(log, 'GET', f'/api/workspaces/{workspace_id}', )
output(
response,
output_format=output_format,
query=query,
default_table_query=r"workspace.{id:id, display_name:properties.display_name, deployment_status:deploymentStatus, workspace_url:workspaceURL}")
return response.text
@click.command(name="update", help="Update a workspace")
@click.option('--etag',
help='The etag of the workspace to update',
required=True)
@click.option('--definition', help='JSON definition for the workspace', required=False)
@click.option('--definition-file', help='File containing JSON definition for the workspace', required=False, type=click.File("r"))
@click.option('--no-wait',
flag_value=True,
default=False)
@output_option()
@query_option()
@click.pass_context
@pass_workspace_context
def workspace_update(workspace_context: WorkspaceContext, ctx: click.Context, etag, definition, definition_file, no_wait, output_format, query, suppress_output: bool = False):
log = logging.getLogger(__name__)
workspace_id = workspace_context.workspace_id
if workspace_id is None:
raise click.UsageError('Missing workspace ID')
if definition is None:
if definition_file is None:
raise click.UsageError('Please specify either a definition or a definition file')
definition = definition_file.read()
definition_dict = json.loads(definition)
client = ApiClient.get_api_client_from_config()
response = client.call_api(
log,
'PATCH',
f'/api/workspaces/{workspace_id}',
headers={'etag': etag},
json_data=definition_dict)
if no_wait:
output(response, output_format=output_format, query=query, default_table_query=default_operation_table_query_single())
if not response.is_success:
sys.exit(1)
else:
operation_url = response.headers['location']
operation_show(log, operation_url, no_wait=False, output_format=output_format, query=query, suppress_output=suppress_output)
@click.command(name="set-enabled", help="Enable/disable a workspace")
@click.option('--etag',
help='The etag of the workspace to update',
required=True)
@click.option('--enable/--disable', is_flag=True, required=True)
@click.option('--no-wait',
flag_value=True,
default=False)
@output_option()
@query_option()
@pass_workspace_context
def workspace_set_enabled(workspace_context: WorkspaceContext, etag, enable, no_wait, output_format, query, suppress_output: bool = False):
log = logging.getLogger(__name__)
workspace_id = workspace_context.workspace_id
if workspace_id is None:
raise click.UsageError('Missing workspace ID')
client = ApiClient.get_api_client_from_config()
click.echo(f"Setting isEnabled to {enable}...", err=True)
response = client.call_api(
log,
'PATCH',
f'/api/workspaces/{workspace_id}',
headers={'etag': etag},
json_data={'isEnabled': enable})
if no_wait:
if not suppress_output or not response.is_success:
output(response, output_format=output_format, query=query, default_table_query=default_operation_table_query_single())
else:
operation_url = response.headers['location']
operation_show(log, operation_url, no_wait=False, output_format=output_format, query=query, suppress_output=suppress_output)
@click.command(name="delete", help="Delete a workspace")
@click.option('--yes', is_flag=True, default=False)
@click.option('--no-wait',
flag_value=True,
default=False)
@click.option('--ensure-disabled',
help="Ensure disabled before deleting (resources are required to be disabled before deleting)",
flag_value=True,
default=False)
@output_option()
@query_option()
@click.pass_context
@pass_workspace_context
def workspace_delete(workspace_context: WorkspaceContext, ctx: click.Context, yes, no_wait, ensure_disabled, output_format, query):
log = logging.getLogger(__name__)
workspace_id = workspace_context.workspace_id
if workspace_id is None:
raise click.UsageError('Missing workspace ID')
if not yes:
click.confirm("Are you sure you want to delete this workspace?", err=True, abort=True)
client = ApiClient.get_api_client_from_config()
if ensure_disabled:
response = client.call_api(log, 'GET', f'/api/workspaces/{workspace_id}')
workspace_json = response.json()
if workspace_json['workspace']['isEnabled']:
etag = workspace_json['workspace']['_etag']
ctx.invoke(
workspace_set_enabled,
etag=etag,
enable=False,
no_wait=False,
suppress_output=True
)
click.echo("Deleting workspace...", err=True)
response = client.call_api(log, 'DELETE', f'/api/workspaces/{workspace_id}')
if no_wait:
output(response, output_format=output_format, query=query, default_table_query=default_operation_table_query_single())
if not response.is_success:
sys.exit(1)
else:
operation_url = response.headers['location']
operation_show(log, operation_url, no_wait, output_format=output_format, query=query)
@click.command(name="invoke-action", help="Invoke an action on a workspace")
@click.argument("action-name", required=True)
@click.option("--no-wait", flag_value=True, default=False)
@output_option()
@query_option()
@pass_workspace_context
def workspace_service_invoke_action(
workspace_context: WorkspaceContext,
action_name,
no_wait,
output_format,
query,
):
log = logging.getLogger(__name__)
workspace_id = workspace_context.workspace_id
if workspace_id is None:
raise click.UsageError('Missing workspace ID')
client = ApiClient.get_api_client_from_config()
click.echo(f"Invoking action {action_name}...\n", err=True)
response = client.call_api(
log,
"POST",
f"/api/workspaces/{workspace_id}/invoke-action",
params={"action": action_name},
)
if no_wait:
output(response, output_format=output_format, query=query)
if not response.is_success:
sys.exit(1)
else:
operation_url = response.headers["location"]
operation_show(
log,
operation_url,
no_wait=False,
output_format=output_format,
query=query,
)
workspace.add_command(workspace_show)
workspace.add_command(workspace_update)
workspace.add_command(workspace_set_enabled)
workspace.add_command(workspace_delete)
workspace.add_command(workspace_operations)
workspace.add_command(workspace_operation)
workspace.add_command(workspace_services)
workspace.add_command(workspace_service)
workspace.add_command(airlock)
workspace.add_command(airlocks)
|
AzureTRE/cli/tre/commands/workspaces/workspace.py/0
|
{
"file_path": "AzureTRE/cli/tre/commands/workspaces/workspace.py",
"repo_id": "AzureTRE",
"token_count": 3326
}
| 96 |
---
# After you have replaced all the __CHANGE_ME__ values in this file, you need to
# run `make auth` to setup the Auth.
tre_id: __CHANGE_ME__
location: __CHANGE_ME__
management:
mgmt_resource_group_name: __CHANGE_ME__
mgmt_storage_account_name: __CHANGE_ME__
terraform_state_container_name: tfstate
acr_name: __CHANGE_ME__
# Azure Resource Manager credentials used for CI/CD pipelines
arm_subscription_id: __CHANGE_ME__
# If you want to override the currently signed in credentials
# You would do this if running commands like `make terraform-install DIR=./templates/workspaces/base`
# arm_tenant_id: __CHANGE_ME__
# arm_client_id: __CHANGE_ME__
# arm_client_secret: __CHANGE_ME__
tre:
# If your local machine/build agent cannot get the public IP
# address from https://ipecho.net/plain, then you can circumvent
# this by setting this Environment variable. This blockage can
# be caused by trying to deploy TRE in an Office environment where
# this website is blocked. This value is the public facing IP
# address of the deploying machine.
# public_deployment_ip_address: __CHANGE_ME__
core_address_space: 10.1.0.0/22
tre_address_space: 10.0.0.0/12
core_app_service_plan_sku: P1v2
resource_processor_vmss_sku: Standard_B2s
enable_swagger: true
enable_airlock_malware_scanning: true
# TODO: move to RP default with https://github.com/microsoft/AzureTRE/issues/2948
workspace_app_service_plan_sku: P1v2
# The TRE Web UI is deployed by default.
# Uncomment the following to disable deployment of the Web UI.
# deploy_ui: false
# If you want to use TRE_URL to point to your local TRE API instance or be configured to another cloud provider
# uncomment and set this variable
# tre_url: __CHANGE_ME__
authentication:
aad_tenant_id: __CHANGE_ME__
# Setting AUTO_WORKSPACE_APP_REGISTRATION to false will:
# create an identity with `Application.ReadWrite.OwnedBy`.
# Setting AUTO_WORKSPACE_APP_REGISTRATION to true will:
# create an identity with `Application.ReadWrite.All` and `Directory.Read.All`.
# When this is true, create Workspaces will also create an AAD Application automatically.
# When this is false, the AAD Application will need creating manually.
auto_workspace_app_registration: true
# Setting AUTO_WORKSPACE_GROUP_CREATION to true will create an identity with `Group.ReadWrite.All`
auto_workspace_group_creation: false
resource_processor:
# The number of processes to start in the resource processor VMSS image
resource_processor_number_processes_per_instance: 5
# This setting provides a way to pass environment values to the resource processor
# to use as a source of bundle parameter values
# For example, to specify your image_gallery_id for use in VM user resources with custom VM images:
# yamllint disable-line rule:line-length
# rp_bundle_values: '{"custom_key_1":"custom_value_1","image_gallery_id":"/subscriptions/<subscription-id>/resourceGroups/<your-rg>/providers/Microsoft.Compute/galleries/<your-gallery-name>"}'
developer_settings:
# Locks will not be added to stateful resources so they can be easily removed
# stateful_resources_locked: false
# This setting will enable your local machine to be able to
# communicate with Service Bus and Cosmos. It will also allow deploying
# the base workspace.
# enable_local_debugging: true
# Used by the API and Resource processor application to change log level
# Can be "ERROR", "WARNING", "INFO", "DEBUG"
# logging_level: "INFO"
|
AzureTRE/config.sample.yaml/0
|
{
"file_path": "AzureTRE/config.sample.yaml",
"repo_id": "AzureTRE",
"token_count": 1055
}
| 97 |
data "azurerm_client_config" "deployer" {}
data "azurerm_monitor_diagnostic_categories" "agw" {
resource_id = azurerm_application_gateway.agw.id
depends_on = [
azurerm_application_gateway.agw
]
}
|
AzureTRE/core/terraform/appgateway/data.tf/0
|
{
"file_path": "AzureTRE/core/terraform/appgateway/data.tf",
"repo_id": "AzureTRE",
"token_count": 84
}
| 98 |
#!/bin/bash
set -o errexit
set -o pipefail
set -o nounset
# set -o xtrace
# This is where we can migrate any Terraform before we plan and apply
# For instance deprecated Terraform resources
# shellcheck disable=SC1091
source ./migrate.sh
PLAN_FILE="tfplan$$"
TS=$(date +"%s")
LOG_FILE="${TS}-tre-core.log"
# This variables are loaded in for us
# shellcheck disable=SC2154
../../devops/scripts/terraform_wrapper.sh \
-g "${TF_VAR_mgmt_resource_group_name}" \
-s "${TF_VAR_mgmt_storage_account_name}" \
-n "${TF_VAR_terraform_state_container_name}" \
-k "${TRE_ID}" \
-l "${LOG_FILE}" \
-c "terraform plan -out ${PLAN_FILE} && \
terraform apply -input=false -auto-approve ${PLAN_FILE} && \
terraform output -json > ../tre_output.json"
./update_tags.sh
|
AzureTRE/core/terraform/deploy.sh/0
|
{
"file_path": "AzureTRE/core/terraform/deploy.sh",
"repo_id": "AzureTRE",
"token_count": 300
}
| 99 |
variable "tre_id" {
type = string
}
variable "location" {
type = string
}
variable "resource_group_name" {
type = string
}
variable "core_address_space" {
type = string
}
variable "arm_environment" {
type = string
}
|
AzureTRE/core/terraform/network/variables.tf/0
|
{
"file_path": "AzureTRE/core/terraform/network/variables.tf",
"repo_id": "AzureTRE",
"token_count": 77
}
| 100 |
#!/bin/bash
set -o errexit
set -o pipefail
set -o nounset
# Uncomment this line to see each command for debugging (careful: this will show secrets!)
# set -o xtrace
# Get the directory that this script is in
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
echo -e "\n\e[34m╔══════════════════════════════════════╗"
echo -e "║ \e[33mAzure TRE Makefile\e[34m ║"
echo -e "╚══════════════════════════════════════╝"
echo -e "\n\e[34m»»» ✅ \e[96mChecking pre-reqs\e[0m..."
echo -e "\n\e[96mChecking for Azure CLI\e[0m..."
if ! command -v az &> /dev/null; then
echo -e "\e[31m»»» ⚠️ Azure CLI is not installed! 😥 Please go to http://aka.ms/cli to set it up or rebuild your devcontainer"
exit 1
fi
if [[ "${1:-?}" == *"env"* ]]; then
if [ -z "${USE_ENV_VARS_NOT_FILES:-}" ]; then
# We only do this for local builds
echo -e "\n\e[96mLoading local environment variables\e[0m..."
if [ ! -f "config.yaml" ]; then
echo -e "\e[31m»»» ⚠️ Your config.yaml file has not been setup! 😥 Please create a config.yaml file."
exit 1
fi
# shellcheck disable=SC1091
. "$DIR/load_and_validate_env.sh"
fi
fi
if [[ "${1:-?}" != *"nodocker"* ]]; then
echo -e "\n\e[96mChecking for Docker\e[0m..."
if ! command -v docker &> /dev/null; then
echo -e "\e[31m»»» ⚠️ Docker is not installed! 😥 Please go to https://docs.docker.com/engine/install/ to set it up or rebuild your devcontainer"
exit 1
fi
fi
if [[ "${1:-?}" == *"certbot"* ]]; then
echo -e "\n\e[96mChecking for Certbot\e[0m..."
if ! /opt/certbot/bin/certbot --version > /dev/null 2>&1; then
echo -e "\e[31m»»» ⚠️ Certbot is not installed! 😥 Please go to https://certbot.eff.org/lets-encrypt/pip-other to set it up or rebuild your devcontainer"
exit 1
fi
fi
if [[ "${1:-?}" == *"porter"* ]]; then
echo -e "\n\e[96mChecking for porter\e[0m..."
if ! command -v porter &> /dev/null; then
echo -e "\e[31m»»» ⚠️ Porter is not installed! 😥 Please go to https://porter.sh/install/ to set it up or rebuild your devcontainer"
exit 1
fi
fi
# This is called if we are in a CI system and we will login
# with a Service Principal.
if [ -n "${TF_IN_AUTOMATION:-}" ]; then
az cloud set --name "$AZURE_ENVIRONMENT"
az login --service-principal -u "$ARM_CLIENT_ID" -p "$ARM_CLIENT_SECRET" --tenant "$ARM_TENANT_ID"
az account set -s "$ARM_SUBSCRIPTION_ID"
fi
SUB_NAME=$(az account show --query name -o tsv)
SUB_ID=$(az account show --query id -o tsv)
export SUB_ID
TENANT_ID=$(az account show --query tenantId -o tsv)
export TENANT_ID
if [ -z "$SUB_NAME" ]; then
echo -e "\n\e[31m»»» ⚠️ You are not logged in to Azure!"
exit 1
fi
echo -e "\e[34m»»» 🔨 \e[96mAzure details from logged on user \e[0m"
echo -e "\e[34m»»» • \e[96mSubscription: \e[33m$SUB_NAME\e[0m"
echo -e "\e[34m»»» • \e[96mTenant: \e[33m$TENANT_ID\e[0m\n"
# This shouldn't be here but since other scripts don't use this option we must reset it.
# For tracking: https://github.com/microsoft/AzureTRE/issues/1672
set +o nounset
|
AzureTRE/devops/scripts/check_dependencies.sh/0
|
{
"file_path": "AzureTRE/devops/scripts/check_dependencies.sh",
"repo_id": "AzureTRE",
"token_count": 1312
}
| 101 |
#!/bin/bash
# This script adds missing env vars that are needed to run porter commands locally.
# If a bundle defines a parameter that isn't in the environment it will be added.
# When/if this issue will be address, we could remove the script:
# https://github.com/getporter/porter/issues/2474
set -o errexit
set -o pipefail
# set -o xtrace
while read -r env_var_name; do
if [[ -z "${!env_var_name}" ]]; then
echo "${env_var_name} doesn't exist."
# shellcheck disable=SC2086
declare -g $env_var_name=
export "${env_var_name?}"
fi
done < <(jq -r '.parameters[].source.env' parameters.json)
|
AzureTRE/devops/scripts/porter_local_env.sh/0
|
{
"file_path": "AzureTRE/devops/scripts/porter_local_env.sh",
"repo_id": "AzureTRE",
"token_count": 214
}
| 102 |
{
"$schema": "https://json-schema.org/draft/2019-09/schema",
"type": "object",
"properties": {
"id": {
"type": "string",
"description": "Assignment id."
},
"userId": {
"type": "string",
"description": "User id.",
"format": "uuid"
},
"ResourceId": {
"type": "string",
"description": "Resource id.",
"format": "uuid"
},
"permissions": {
"type": "array",
"items": {
"type": "string",
"enum": [
"WorkspaceWrite",
"WorkspaceRead",
"WorkspaceDelete",
"WorkspaceCreateService",
"WorkspaceUserRead",
"WorkspaceUserManage",
"ServiceRead",
"ServiceWrite",
"ServiceDelete"
]
}
},
"etag": {
"type": "string",
"description": "ETag."
}
},
"required": [
"id",
"userId",
"resourceId",
"permissions",
"etag"
],
"additionalProperties": false
}
|
AzureTRE/docs/schemas/resource-assignment.json/0
|
{
"file_path": "AzureTRE/docs/schemas/resource-assignment.json",
"repo_id": "AzureTRE",
"token_count": 768
}
| 103 |
# AzureTRE Deployment repo
AzureTRE has an OSS deployment repository which you can find [here.](https://github.com/microsoft/AzureTRE-Deployment)
It contains all the required tooling to develop your custom templates and deploy the Azure TRE:
- Github Actions implementing AzureTRE automation, including running deployments to Azure
- Configuration specific to deployment
- Directories setup for: workspace, workspace service and user resource template definitions
- Devcontainer setup
## Create your own copy of the Azure TRE deployment repo
To get started with AzureTRE follow the next steps:
!!! note
The following steps in this guide should be done using the deployment repo.
1. Go to [AzureTRE Deployment repository](https://github.com/microsoft/AzureTRE-Deployment)
1. Click on use this template to set up your project from this template:
[](../../assets/using-tre/use_template.png)
1. Follow the steps in this [Github templates guide](https://docs.github.com/en/repositories/creating-and-managing-repositories/creating-a-repository-from-a-template) to set up the repo.
## Clone the Azure TRE Deployment git repository
!!! tip
If using Windows please clone the repository to a Linux file system, i.e. to `/xxx` rather than `c:\`, for example within Windows Subsytem for Linux. If you clone the repository to a Windows file system you will likely hit issues with file permissions as described in this issue: <https://github.com/microsoft/AzureTRE/issues/1395>
```cmd
git clone https://github.com/<your_username>/AzureTRE-Deployment.git
```
1. Open the cloned repository in Visual Studio Code and connect to the development container.
```cmd
code .
```
!!! tip
Visual Studio Code should recognize the available development container and ask you to open the folder using it. For additional details on connecting to remote containers, please see the [Open an existing folder in a container](https://code.visualstudio.com/docs/remote/containers#_quick-start-open-an-existing-folder-in-a-container) quickstart.
When you start the development container for the first time, the container will be built. This usually takes a few minutes. **Please use the development container for all further steps.**
## Next steps
* [AD Tenant Choices](./ad-tenant-choices.md)
|
AzureTRE/docs/tre-admins/setup-instructions/deployment-repo.md/0
|
{
"file_path": "AzureTRE/docs/tre-admins/setup-instructions/deployment-repo.md",
"repo_id": "AzureTRE",
"token_count": 627
}
| 104 |
# TRE CLI
**WARNING - this CLI is currently experimental**
This guide will cover various components of AzureTRE CLI such as installation, login, general command structure and other components that enable operating the CLI.
## Installation
It is recommended to use CLI within the dev container. It should be installed automatically. To install it manually, run `make install-cli`.
## Shell completion
The `tre` cli supports shell completion. To enable, run `source <(_TRE_COMPLETE=bash_source tre)` (or add to your profile).
Other shells are supported, see [the click docs](https://click.palletsprojects.com/en/8.1.x/shell-completion/#enabling-completion).
## Login
The CLI allows you to log in using either a device code flow or client credentials flow.
### Device code flow (interactive)
To log in using device code flow, run:
```bash
tre login device-code --base-url https://mytre.westeurope.cloudapp.azure.com/
```
This will prompt you to copy a device code and nagivate to <https://microsoft.com/devicelogin> to complete the login flow interactively.
You can specify `--no-verify` to disable SSL cert verification.
On versions of the API prior to '0.5.7', you will need to pass some additional parameters:
```bash
tre login device-code \
--base-url https://mytre.westeurope.cloudapp.azure.com/ \
--client-id <API_CLIENT_ID> \
--aad-tenant-id <AAD_TENANT_ID> \
--api-scope <ROOT_API_SCOPE>
```
!!! info
the API scope is usually of the form `api://<API_CLIENT_ID>/user_impersonation`
!!! info
when using device code flow, you need to ensure that the app registrations for the root API and any workspaces you access have device code flow enabled. (Automating this is tracked in [#2709](https://github.com/microsoft/AzureTRE/issues/2709) )
#### Workspace authentication
Since the API scope for each workspace is different, the token returned when authenticating against the root API isn't valid against a workspace.
When running interactively, the CLI will prompt you when it needs to reauthenticate for a workspace API.
You can pre-emptively get an authentication token for a workspace using the `--workspace` option. This can be specified multiple times to authenticate against multiple workspaces at once. You can also using `--all-workspaces` to get a token for all workspaces in one command.
### Client credentials (service)
To log in using client credentials flow (for a service principal), run:
```bash
tre login client-credentials \
--base-url https://mytre.westeurope.cloudapp.azure.com/ \
--client-id <SERVICE_PRINICPAL_CLIENT_ID> \
--client-secret <SERVICE_PRINCIPAL_CLIENT_SECRET>
```
You can specify `--no-verify` to disable SSL cert verification.
On versions of the API prior to '0.5.7', you will need to pass some additional parameters:
```bash
tre login client-credentials \
--base-url https://mytre.westeurope.cloudapp.azure.com/ \
--client-id <SERVICE_PRINICPAL_CLIENT_ID> \
--client-secret <SERVICE_PRINCIPAL_CLIENT_SECRET> \
--aad-tenant-id <AAD_TENANT_ID> \
--api-scope <ROOT_API_SCOPE>
```
!!! info
the API scope is usually of the form `api://<API_CLIENT_ID>/user_impersonation`
## General command structure
The general command structure for the CLI is:
```bash
tre plural_noun cmd
# or
tre singular_noun id cmd
```
For example:
```bash
# list workspaces
tre workspaces list
## show an individual workspace
tre workspace 567f17d6-1abb-450f-991a-19398f89b3c2 show
```
This pattern is nested for sub-resources, e.g. operations for a workspace:
```bash
## list operations for a workspace
tre workspace 567f17d6-1abb-450f-991a-19398f89b3c2 operations list
## show an individual operation for a workspace
tre workspace 567f17d6-1abb-450f-991a-19398f89b3c2 operation 0f66839f-8727-43db-b2d6-6c7197712e36 show
```
## Asynchronous operations
Many operations in TRE are asynchronous, and the corresponding API endpoints return a `202 Accepted` response with a `Location` header pointing to an operation endpoint.
The commands corresponding to these asynchronous operations will poll this resulting operation and wait until it has completed. If you don't want this behaviour, you can pass the `--no-wait` option.
## Command output
### Output formats
Most commands support formatting output as `table` (default), `json`, `jsonc`, `raw`, or `none` via the `--output` option. This can also be controlled using the `TRECLI_OUTPUT` environment variable, i.e. set `TRECLI_OUTPUT` to `table` to default to the table output format.
| Option | Description |
| ------- | ----------------------------------------------------------------------------- |
| `table` | Works well for interactive use |
| `json` | Plain JSON output, ideal for parsing via `jq` or other tools |
| `jsonc` | Coloured, formatted JSON |
| `raw` | Results are output as-is. Useful with `--query` when capturing a single value |
| `none` | No output |
### Querying output
Most commands support [JMESPath](https://jmespath.org/) queries for the output via the `--query` option.
For example, to get a list of workspace IDs run `tre workspaces list --query workspaces[].id`.
This can be combined with `--output table`, e.g. `tre workspaces list -o table --query "workspaces[].{id:id, name: properties.display_name}"`. Note that the query result must be an object with named properties (or an array of such objects)
### Capturing results
Some of the commands in the CLI output progress information (e.g. `tre workspace new ...`).
When the CLI outputs progress information, it outputs it to stderr. The final result of the command is output to stdout.
This gives a good experience when chaining commands together, e.g.:
```bash
# Set the workspace to use
WORKSPACE_ID=567f17d6-1abb-450f-991a-19398f89b3c2
# Get the workspace etag
ETAG=$(tre workspace $WORKSPACE_ID show --query workspace._etag --output json)
# Disable the workspace (this is an asynchronous operation)
OPERATION=$(tre workspace $WORKSPACE_ID set-enabled --etag $ETAG --enable --output json)
# ^ this last command will output progress information while waiting for the operation to complete.
# And OPERATION contains the JSON describing the completed operation
# allowing you to query the status property etc
echo $OPERATION
```
## Passing definitions
When creating new resources (e.g. workspaces), you need to pass in a definition. This can be passed in various ways: as an inline value, from a file, or from stdin.
To pass a definition inline, use the `--definition` option and include the JSON content, e.g. `tre workspace new --definition '{"templateName":...}'`
To load a definition from a file, use the `--definition-file` option, e.g. `tre workspace new --definition-file my-worspace.json`
To pass a definition via stdin, use `--definition-file -` (note the `-` to signal reading from stdin).
Reading from stdin allows you to take some interesting approaches to specifying the definition.
For example, you can use HEREDOC syntax to describe the JSON payload over multiple lines:
```bash
cat << EOF | tre workspaces new --definition-file -
{
"templateName": "my-workspace",
"properties": {
"display_name": $DISPLAY_NAME,
...
}
}
EOF
```
Or you can load the content from a file that contains embedded environment variables and use `envsubst` to substitute them:
`cat my-workspace.json | envsubst | tre workspace new --definition file -`
## Overriding the API URL
When you run `tre login` you specify the base URL for the API, but when you are developing AzureTRE you may want to make calls against the locally running API.
To support this, you can set the `TRECLI_BASE_URL` environment variable and that will override the API endpoint used by the CLI.
## Example usage
### Creating an import airlock request
```bash
# Set the ID of the workspace to create the import request for
WORKSPACE_ID=__ADD_ID_HERE__
# Create the airlock request - change the justification as appropriate
request=$(tre workspace $WORKSPACE_ID airlock-requests new --type import --title "Ant" --justification "It's import-ant" --output json)
request_id=$(echo $request | jq -r .airlockRequest.id)
# Get the storage upload URL
upload_url=$(tre workspace $WORKSPACE_ID airlock-request $request_id get-url --query containerUrl --output raw)
# Use the az CLI to upload ant.txt from the current directory (change as required)
az storage blob upload-batch --source . --pattern ant.txt --destination $upload_url
# Submit the request for review
tre workspace $WORKSPACE_ID airlock-request $request_id submit
```
|
AzureTRE/docs/tre-developers/CLI.md/0
|
{
"file_path": "AzureTRE/docs/tre-developers/CLI.md",
"repo_id": "AzureTRE",
"token_count": 2718
}
| 105 |
# Guacamole User Resource Service bundle (Windows)
This is a User Resource Service template. It defines a VM to be used by TRE Airlock Managers with [Guacamole server](https://guacamole.apache.org/).
It blocks all inbound traffic to the internet and allows only RDP connections from within the vnet.
It also blocks all outbound traffic except for traffic to Airlock Export In-Review storage account within the workspace.For more information about Airlock, see [overview page](../../azure-tre-overview/airlock.md).
Data that needs to be reviewed will be downloaded onto the VM during VM creation, and available on Desktop.
It can be only deployed by an Airlock Manager.
## Prerequisites
- [A base workspace bundle installed](../workspaces/base.md)
- [A guacamole workspace service bundle installed](../workspace-services/guacamole.md)
|
AzureTRE/docs/tre-templates/user-resources/export-reviewvm.md/0
|
{
"file_path": "AzureTRE/docs/tre-templates/user-resources/export-reviewvm.md",
"repo_id": "AzureTRE",
"token_count": 216
}
| 106 |
# Airlock Import Review workspace
Airlock Import Review workspace is used as part of Review workflow for [Airlock](../../azure-tre-overview/airlock.md).
It allows to review Airlock Data Import requests from, by providing a workspace to spin up VMs in that then can access the in-progress storage account.
The workspace is built upon the base workspace template. It adds a private endpoint to connect to Imlort In-Progress storage account, and disables shared storage for VMs.
|
AzureTRE/docs/tre-templates/workspaces/airlock-import-review.md/0
|
{
"file_path": "AzureTRE/docs/tre-templates/workspaces/airlock-import-review.md",
"repo_id": "AzureTRE",
"token_count": 114
}
| 107 |
# Local Development
This guide will cover how to setup local development environment to add custom templates to AzureTRE and deploy AzureTRE from the local machine.
## Local Development Setup
### Prerequisites
To deploy an Azure TRE instance, the following assets and tools are required:
* [Azure subscription](https://azure.microsoft.com)
* [Microsoft Entra ID](https://learn.microsoft.com/en-gb/entra/fundamentals/whatis) tenant in which you can create application registrations
* Git client such as [Git](https://git-scm.com/) or [GitHub Desktop](https://desktop.github.com/)
* [Docker Desktop](https://www.docker.com/products/docker-desktop)
### Development container
The AzureTRE Deployment solution contains a [development container](https://code.visualstudio.com/docs/remote/containers) with all the required tooling to develop and deploy the AzureTRE and your custom templates to it. To deploy and extend an AzureTRE instance using the provided development container you will also need:
* [Visual Studio Code](https://code.visualstudio.com)
* [Remote containers extension for Visual Studio Code](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers)
The files in AzureTRE Deployment repo for the dev container are located in `/.devcontainer/` folder.
Having the prerequisites and the development container, to start local development follow the next steps:
1. Clone the project you have created from the AzureTRE Deployment template `git clone <your_project>`
1. Open it in Visual Studio Code
1. VSCode will recognize the devcontainer is set up in and will ask to reopen in Devcontainer:

After the devcontainer is built, you will see the AzureTRE folder which you can use as a reference for your templates. In addition the sample.env files will be added.
## Local Deployment
To run AzureTRE deploy locally:
1. Open your project in VScode devcontainer.
2. Fill in all the required configuration. Follow [this guide](https://github.com/microsoft/AzureTRE-Deployment#congiguration-setup) to set it up.
3. run `make all`
!!! tip
The Makefile in the AzureTRE deployment repository sources the make commands from AzureTRE that it references. This allows you to add your commands and also use the same make commands used in the AzureTRE.
Having all the env vars in place:
## How to Contribute to our Documentation
If you have any comments or suggestions about our documentation then you can visit our GitHub project and either raise a new issue, or comment on one of the existing ones.
You can find our existing documentation issues on GitHub by clicking on the link below:
[Existing Documentation Issues](https://github.com/microsoft/AzureTRE/issues?q=is%3Aissue+is%3Aopen+label%3Adocumentation)
Or, you can raise a new issue by clicking on this link:
[Report an Issue or Make a Suggestion](https://github.com/microsoft/AzureTRE/issues/new/choose)
**Thank you for your patience and support!**
|
AzureTRE/docs/using-tre/local-development/local-development.md/0
|
{
"file_path": "AzureTRE/docs/using-tre/local-development/local-development.md",
"repo_id": "AzureTRE",
"token_count": 790
}
| 108 |
[pytest]
markers =
smoke: marks tests as smoke (run sometimes, relatively fast)
extended: marks tests as extended (run less frequently, relatively slow)
extended_aad
shared_services
performance: marks tests for performance evaluation
timeout: used to set test timeout with pytest-timeout
airlock: only airlock related
workspace_services
asyncio_mode = auto
log_cli = 1
log_cli_level = INFO
log_cli_format = %(asctime)s [%(levelname)8s] %(message)s (%(filename)s:%(lineno)s)
log_cli_date_format=%Y-%m-%d %H:%M:%S
|
AzureTRE/e2e_tests/pytest.ini/0
|
{
"file_path": "AzureTRE/e2e_tests/pytest.ini",
"repo_id": "AzureTRE",
"token_count": 194
}
| 109 |
import pytest
from e2e_tests.conftest import get_workspace_owner_token, disable_and_delete_ws_resource
from helpers import check_aad_auth_redirect
from resources.resource import get_resource, post_resource
from resources import strings
pytestmark = pytest.mark.asyncio
workspace_services = [
strings.AZUREML_SERVICE,
strings.GITEA_SERVICE,
strings.MLFLOW_SERVICE,
strings.MYSQL_SERVICE,
strings.HEALTH_SERVICE,
]
@pytest.mark.extended
@pytest.mark.timeout(75 * 60)
async def test_create_guacamole_service_into_base_workspace(setup_test_workspace_and_guacamole_service, verify) -> None:
_, workspace_id, workspace_service_path, _ = setup_test_workspace_and_guacamole_service
workspace_owner_token = await get_workspace_owner_token(workspace_id, verify)
await ping_guacamole_workspace_service(workspace_service_path, workspace_owner_token, verify)
user_resource_payload = {
"templateName": strings.GUACAMOLE_WINDOWS_USER_RESOURCE,
"properties": {
"display_name": "My VM",
"description": "Will be using this VM for my research",
"os_image": "Windows 10"
}
}
_, _ = await post_resource(user_resource_payload, f'/api{workspace_service_path}/{strings.API_USER_RESOURCES}', workspace_owner_token, verify, method="POST")
@pytest.mark.extended_aad
@pytest.mark.timeout(75 * 60)
async def test_create_guacamole_service_into_aad_workspace(setup_test_aad_workspace, verify) -> None:
"""This test will create a Guacamole service but will create a workspace and automatically register the AAD Application"""
workspace_path, workspace_id = setup_test_aad_workspace
workspace_owner_token = await get_workspace_owner_token(workspace_id, verify)
workspace_service_payload = {
"templateName": strings.GUACAMOLE_SERVICE,
"properties": {
"display_name": "Workspace service test",
"description": "Workspace service for E2E test"
}
}
workspace_service_path, _ = await post_resource(workspace_service_payload, f'/api{workspace_path}/{strings.API_WORKSPACE_SERVICES}', workspace_owner_token, verify)
await ping_guacamole_workspace_service(workspace_service_path, workspace_owner_token, verify)
async def ping_guacamole_workspace_service(workspace_service_path, access_token, verify) -> None:
workspace_service = await get_resource(f"/api{workspace_service_path}", access_token, verify)
endpoint = workspace_service["workspaceService"]["properties"]["admin_connection_uri"]
await check_aad_auth_redirect(endpoint, verify)
@pytest.mark.workspace_services
@pytest.mark.timeout(45 * 60)
@pytest.mark.parametrize("template_name", workspace_services)
async def test_install_workspace_service(template_name, verify, setup_test_workspace) -> None:
workspace_path, workspace_id = setup_test_workspace
workspace_owner_token = await get_workspace_owner_token(workspace_id, verify)
service_payload = {
"templateName": template_name,
"properties": {
"display_name": f"{template_name} test",
"description": "Workspace service for E2E test"
}
}
workspace_service_path, _ = await post_resource(service_payload, f'/api{workspace_path}/{strings.API_WORKSPACE_SERVICES}', workspace_owner_token, verify)
await disable_and_delete_ws_resource(workspace_service_path, workspace_id, verify)
|
AzureTRE/e2e_tests/test_workspace_services.py/0
|
{
"file_path": "AzureTRE/e2e_tests/test_workspace_services.py",
"repo_id": "AzureTRE",
"token_count": 1254
}
| 110 |
#!/bin/bash
set -o errexit
set -o pipefail
set -o nounset
# Uncomment this line to see each command for debugging (careful: this will show secrets!)
# set -o xtrace
export PORTER_HOME=${PORTER_HOME:-~/.porter}
export PORTER_MIRROR=${PORTER_MIRROR:-https://cdn.porter.sh}
PORTER_VERSION=${PORTER_VERSION:-latest}
echo "Installing porter@$PORTER_VERSION to $PORTER_HOME from $PORTER_MIRROR"
mkdir -p "$PORTER_HOME/runtimes"
curl -fsSLo "$PORTER_HOME/porter" "$PORTER_MIRROR/$PORTER_VERSION/porter-linux-amd64"
chmod +x "$PORTER_HOME/porter"
ln -s "$PORTER_HOME/porter" "$PORTER_HOME/runtimes/porter-runtime"
echo "Installed $("${PORTER_HOME}"/porter version)"
"${PORTER_HOME}/porter" mixin install exec --version "$PORTER_VERSION"
"${PORTER_HOME}/porter" mixin install terraform --version "$PORTER_TERRAFORM_MIXIN_VERSION"
"${PORTER_HOME}/porter" mixin install az --version "$PORTER_AZ_MIXIN_VERSION"
"${PORTER_HOME}/porter" plugin install azure --version "$PORTER_AZURE_PLUGIN_VERSION"
echo "Installation complete."
|
AzureTRE/resource_processor/scripts/porter-v1.sh/0
|
{
"file_path": "AzureTRE/resource_processor/scripts/porter-v1.sh",
"repo_id": "AzureTRE",
"token_count": 374
}
| 111 |
TRE_ID="__CHANGE_ME__"
TRE_URL="__CHANGE_ME__"
SMTP_SERVER_ADDRESS="__CHANGE_ME__"
SMTP_USERNAME="__CHANGE_ME__"
SMTP_PASSWORD="__CHANGE_ME__"
SMTP_FROM_EMAIL="__CHANGE_ME__"
TRE_RESOURCE_ID="__CHANGE_ME__"
SMTP_SERVER_PORT="25"
|
AzureTRE/templates/shared_services/airlock_notifier/.env.sample/0
|
{
"file_path": "AzureTRE/templates/shared_services/airlock_notifier/.env.sample",
"repo_id": "AzureTRE",
"token_count": 117
}
| 112 |
# Azure Provider source and version being used
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "=3.57.0"
}
local = {
source = "hashicorp/local"
version = "=2.4.0"
}
}
backend "azurerm" {}
}
provider "azurerm" {
features {}
}
|
AzureTRE/templates/shared_services/airlock_notifier/terraform/providers.tf/0
|
{
"file_path": "AzureTRE/templates/shared_services/airlock_notifier/terraform/providers.tf",
"repo_id": "AzureTRE",
"token_count": 143
}
| 113 |
resource "azurerm_storage_account" "cyclecloud" {
name = local.storage_name
location = data.azurerm_resource_group.rg.location
resource_group_name = data.azurerm_resource_group.rg.name
account_tier = "Standard"
account_replication_type = "GRS"
tags = local.tre_shared_service_tags
lifecycle { ignore_changes = [tags] }
}
data "azurerm_private_dns_zone" "blobcore" {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.blob.core.windows.net"]
resource_group_name = local.core_resource_group_name
}
resource "azurerm_private_endpoint" "stgblobpe" {
name = "pe-${local.storage_name}"
location = data.azurerm_resource_group.rg.location
resource_group_name = data.azurerm_resource_group.rg.name
subnet_id = data.azurerm_subnet.shared.id
tags = local.tre_shared_service_tags
lifecycle { ignore_changes = [tags] }
private_dns_zone_group {
name = "private-dns-zone-group"
private_dns_zone_ids = [data.azurerm_private_dns_zone.blobcore.id]
}
private_service_connection {
name = "pesc-${local.storage_name}"
private_connection_resource_id = azurerm_storage_account.cyclecloud.id
is_manual_connection = false
subresource_names = ["Blob"]
}
}
|
AzureTRE/templates/shared_services/cyclecloud/terraform/storage.tf/0
|
{
"file_path": "AzureTRE/templates/shared_services/cyclecloud/terraform/storage.tf",
"repo_id": "AzureTRE",
"token_count": 652
}
| 114 |
variable "tre_id" {
type = string
description = "Unique TRE ID"
}
variable "tre_resource_id" {
type = string
description = "Unique TRE Resource ID"
}
variable "arm_environment" {
type = string
}
|
AzureTRE/templates/shared_services/databricks-auth/terraform/variables.tf/0
|
{
"file_path": "AzureTRE/templates/shared_services/databricks-auth/terraform/variables.tf",
"repo_id": "AzureTRE",
"token_count": 82
}
| 115 |
variable "tre_id" {
type = string
description = "Unique TRE ID"
}
variable "microsoft_graph_fqdn" {
type = string
description = "Microsoft Graph FQDN"
}
variable "tre_resource_id" {
type = string
description = "Resource ID"
}
variable "api_driven_rule_collections_b64" {
type = string
default = "W10=" #b64 for []
}
variable "api_driven_network_rule_collections_b64" {
type = string
default = "W10=" #b64 for []
}
variable "sku_tier" {
type = string
default = "Standard"
}
|
AzureTRE/templates/shared_services/firewall/terraform/variables.tf/0
|
{
"file_path": "AzureTRE/templates/shared_services/firewall/terraform/variables.tf",
"repo_id": "AzureTRE",
"token_count": 211
}
| 116 |
resource "random_password" "password" {
length = 20
min_upper = 2
min_lower = 2
min_numeric = 2
min_special = 2
}
resource "azurerm_mysql_flexible_server" "gitea" {
name = "mysql-${var.tre_id}"
resource_group_name = local.core_resource_group_name
location = data.azurerm_resource_group.rg.location
administrator_login = "mysqladmin"
administrator_password = random_password.password.result
sku_name = local.sql_sku[var.sql_sku].value
version = "8.0.21"
backup_retention_days = 7
geo_redundant_backup_enabled = false
tags = local.tre_shared_service_tags
lifecycle { ignore_changes = [tags, zone] }
}
resource "azurerm_mysql_flexible_database" "gitea" {
name = "gitea"
resource_group_name = local.core_resource_group_name
server_name = azurerm_mysql_flexible_server.gitea.name
charset = "utf8"
collation = "utf8_unicode_ci"
}
moved {
from = azurerm_private_endpoint.private-endpoint
to = azurerm_private_endpoint.private_endpoint
}
resource "azurerm_private_endpoint" "private_endpoint" {
name = "pe-${azurerm_mysql_flexible_server.gitea.name}"
location = data.azurerm_resource_group.rg.location
resource_group_name = local.core_resource_group_name
subnet_id = data.azurerm_subnet.shared.id
tags = local.tre_shared_service_tags
private_service_connection {
private_connection_resource_id = azurerm_mysql_flexible_server.gitea.id
name = "psc-${azurerm_mysql_flexible_server.gitea.name}"
subresource_names = ["mysqlServer"]
is_manual_connection = false
}
private_dns_zone_group {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.mysql.database.azure.com"]
private_dns_zone_ids = [data.azurerm_private_dns_zone.mysql.id]
}
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_key_vault_secret" "db_password" {
name = "${azurerm_mysql_flexible_server.gitea.name}-administrator-password"
value = random_password.password.result
key_vault_id = data.azurerm_key_vault.keyvault.id
tags = local.tre_shared_service_tags
depends_on = [
azurerm_key_vault_access_policy.gitea_policy
]
lifecycle { ignore_changes = [tags] }
}
|
AzureTRE/templates/shared_services/gitea/terraform/mysql.tf/0
|
{
"file_path": "AzureTRE/templates/shared_services/gitea/terraform/mysql.tf",
"repo_id": "AzureTRE",
"token_count": 1154
}
| 117 |
#!/bin/bash
export TF_LOG=""
terraform init -input=false -backend=true -reconfigure -upgrade \
-backend-config="resource_group_name=${TF_VAR_mgmt_resource_group_name:?}" \
-backend-config="storage_account_name=${TF_VAR_mgmt_storage_account_name:?}" \
-backend-config="container_name=${TF_VAR_terraform_state_container_name:?}" \
-backend-config="key=${TRE_ID:?}-shared-service-sonatype-nexus"
terraform destroy -auto-approve
|
AzureTRE/templates/shared_services/sonatype-nexus-vm/terraform/destroy.sh/0
|
{
"file_path": "AzureTRE/templates/shared_services/sonatype-nexus-vm/terraform/destroy.sh",
"repo_id": "AzureTRE",
"token_count": 178
}
| 118 |
#!/bin/bash
set -o errexit
set -o pipefail
set -o nounset
export TF_LOG=""
terraform init -input=false -backend=true -reconfigure -upgrade \
-backend-config="resource_group_name=${TF_VAR_mgmt_resource_group_name?}" \
-backend-config="storage_account_name=${TF_VAR_mgmt_storage_account_name?}" \
-backend-config="container_name=${TF_VAR_terraform_state_container_name?}" \
-backend-config="key=${TRE_ID?}${TF_VAR_workspace_id?}${TF_VAR_id?}azureml"
terraform plan
terraform apply -auto-approve
|
AzureTRE/templates/workspace_services/azureml/terraform/deploy.sh/0
|
{
"file_path": "AzureTRE/templates/workspace_services/azureml/terraform/deploy.sh",
"repo_id": "AzureTRE",
"token_count": 213
}
| 119 |
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/azure/azapi" {
version = "1.1.0"
constraints = "1.1.0"
hashes = [
"h1:IR+AHCwfjl1c0baWwfOwZ6QZtHj41H2syTgHkJtAr/M=",
"zh:2a25df6325a49f9e821f0b02c7da86167fc19a3bac647cd1edf231300f29d077",
"zh:2b443a836a39724663fe455d4deee408ff3a2d9a8b86f8408aa7db2e8aa743f8",
"zh:364ed09ddfc50d9bed8d930f7de489cb654a9908feb139413a097823a50075fd",
"zh:523bc005f56ae785867d230d55c29f59db4b599dbc6c38b4d03ea55a79458916",
"zh:60ded375fdb305b60bcb4d9e596dbb222cab166bad1b4958199b05a72aaeacfd",
"zh:61e69c58642fead6814e511c872b7c0a6478ec6af4ab758b4512607d910ac078",
"zh:823b2154ae2262dabcbd11aac992e3cc29eae0f7baa96bee1e3e2fe1ece8730b",
"zh:870ea9cc24807ef5142e4cad0281dac7173f7b6bf818a79762b6c690d12d4c4b",
"zh:9094ae76ed66cb328a4f35bd18b9140fb6fc6859c2e46431ec73c018bcb58d96",
"zh:d89149cfd01cb70012459536b4d36490b58e43312440562e5910bd5160537858",
"zh:dba7ec06171ca062fc423ba5b4776a5600444e45e57f4d1cb043bdc3eee538b7",
"zh:ff5bd6883d9ac8334e043434246357a55107411e9a962856c1d17e47ee15ac37",
]
}
provider "registry.terraform.io/hashicorp/azurerm" {
version = "3.37.0"
constraints = "3.37.0"
hashes = [
"h1:83XTgyPKUKt706IjTLHo9HL0KN5m+DwmSKuVQv6dNb4=",
"zh:2a7bda0b7679d1c791c762103a22f333b544b6e6776c4177f33bafc9cc28c919",
"zh:49ff49670c349f918017315838a43ece09bf6f1bf7721b992f1cadbceb273c62",
"zh:55c9346d03380585e17616b79c4233b726d6fb9efa1921848834fc881e5d7d54",
"zh:5ab117b56a4236ea29926e9d95c27d7bf8ae6706d0fffb76c0b1bfe67bf3a78e",
"zh:5cfc086d5d56308edb3e68aac5f8a448ddc6e56541be7b152ae886399e9b2c69",
"zh:7a8929ed38152aac6652711f32193c8582bc996f8fa73879a3ac7a9bf88d2460",
"zh:895294e90a37f719975fcd2269b95e973147e48ec0ebb9c2fe472bc93531b49c",
"zh:8baa5e2b6e5b02df5b45d253a3aea93f22619920cf9577290d682b59a6d5664b",
"zh:b146a732c7909238c10d216b92a35092be4f72a0509a4c6742cc3245bf3b3bf3",
"zh:cedef898ccd512a6519eae3dff7eb0d581d2c3dad8e0001992da16ad1d7fded8",
"zh:f016d9ba94ea88476883b4d63cff88a0225974e0a8b8c3e8555f73c5de6f7119",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
]
}
|
AzureTRE/templates/workspace_services/azureml/user_resources/aml_compute/terraform/.terraform.lock.hcl/0
|
{
"file_path": "AzureTRE/templates/workspace_services/azureml/user_resources/aml_compute/terraform/.terraform.lock.hcl",
"repo_id": "AzureTRE",
"token_count": 1342
}
| 120 |
{
"australiacentral": {
"webappDestinationAddresses": ["13.75.218.172/32"],
"sccRelayDestinationAddresses": ["13.75.164.249/32"],
"sccRelayDomains": ["tunnel.australiaeast.azuredatabricks.net"],
"controlPlaneNatDestinationAddresses": ["13.70.105.50/32"],
"extendedInfrastructureDestinationAddresses": ["20.53.145.128/28"],
" ": ["dblogprodausteast.blob.core.windows.net"],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodaustc.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodaustc2.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-australiaeast-observabilityeventhubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-australiacentral-prod-metastore.mysql.database.azure.com"
]
},
"australiacentral2": {
"webappDestinationAddresses": ["13.75.218.172/32"],
"sccRelayDestinationAddresses": ["13.75.164.249/32"],
"sccRelayDomains": ["tunnel.australiaeast.azuredatabricks.net"],
"controlPlaneNatDestinationAddresses": ["13.70.105.50/32"],
"extendedInfrastructureDestinationAddresses": ["20.53.145.128/28"],
"logBlobStorageDomains": ["dblogprodausteast.blob.core.windows.net"],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodaustc2.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodaustc.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-australiaeast-observabilityeventhubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-australiacentral2-prod-metastore.mysql.database.azure.com"
]
},
"australiaeast": {
"webappDestinationAddresses": ["13.75.218.172/32"],
"sccRelayDestinationAddresses": ["13.75.164.249/32"],
"sccRelayDomains": ["tunnel.australiaeast.azuredatabricks.net"],
"controlPlaneNatDestinationAddresses": ["13.70.105.50/32"],
"extendedInfrastructureDestinationAddresses": ["20.53.145.128/28"],
"logBlobStorageDomains": ["dblogprodausteast.blob.core.windows.net"],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodauste.blob.core.windows.net",
"arprodaustea1.blob.core.windows.net",
"arprodaustea2.blob.core.windows.net",
"arprodaustea3.blob.core.windows.net",
"arprodaustea4.blob.core.windows.net",
"arprodaustea5.blob.core.windows.net",
"arprodaustea6.blob.core.windows.net",
"arprodaustea7.blob.core.windows.net",
"arprodaustea8.blob.core.windows.net",
"arprodaustea9.blob.core.windows.net",
"arprodaustea10.blob.core.windows.net",
"arprodaustea11.blob.core.windows.net",
"arprodaustea12.blob.core.windows.net",
"arprodaustea13.blob.core.windows.net",
"arprodaustea14.blob.core.windows.net",
"arprodaustea15.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodaustse.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-australiaeast-observabilityeventhubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-australiaeast-prod-metastore.mysql.database.azure.com",
"consolidated-australiaeast-prod-metastore-addl-1.mysql.database.azure.com"
]
},
"australiasoutheast": {
"webappDestinationAddresses": ["13.75.218.172/32"],
"sccRelayDestinationAddresses": ["13.75.164.249/32"],
"sccRelayDomains": ["tunnel.australiaeast.azuredatabricks.net"],
"controlPlaneNatDestinationAddresses": ["13.70.105.50/32"],
"extendedInfrastructureDestinationAddresses": ["20.53.145.128/28"],
"logBlobStorageDomains": ["dblogprodausteast.blob.core.windows.net"],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodaustse.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodauste.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-australiaeast-observabilityeventhubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-australiasoutheast-prod-metastore.mysql.database.azure.com"
]
},
"brazilsouth": {
"webappDestinationAddresses": ["104.41.54.118/32"],
"sccRelayDestinationAddresses": ["23.97.106.142/32"],
"sccRelayDomains": ["tunnel.brazilsouth.azuredatabricks.net"],
"controlPlaneNatDestinationAddresses": ["191.232.53.223/32"],
"extendedInfrastructureDestinationAddresses": ["20.197.222.144/28"],
"logBlobStorageDomains": ["dblogprodbrazilsou.blob.core.windows.net."],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodbrazilsou.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodsafrican.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-brazilsouth-observabilityeventhubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-brazilsouth-prod-metastore.mysql.database.azure.com"
]
},
"canadacentral": {
"webappDestinationAddresses": ["13.71.184.74/32"],
"sccRelayDestinationAddresses": ["13.88.249.244/32"],
"sccRelayDomains": ["tunnel.canadacentral.azuredatabricks.net"],
"controlPlaneNatDestinationAddresses": ["40.85.223.25/32"],
"extendedInfrastructureDestinationAddresses": ["52.139.4.160/28"],
"logBlobStorageDomains": ["dblogprodcacentral.blob.core.windows.net"],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodcacentral.blob.core.windows.net",
"arprodcacentrala1.blob.core.windows.net",
"arprodcacentrala2.blob.core.windows.net",
"arprodcacentrala3.blob.core.windows.net",
"arprodcacentrala4.blob.core.windows.net",
"arprodcacentrala5.blob.core.windows.net",
"arprodcacentrala6.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodcaeast.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-canadacentral-observabilityeventhubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-canadacentral-prod-metastore.mysql.database.azure.com"
]
},
"canadaeast": {
"webappDestinationAddresses": ["13.71.184.74/32"],
"sccRelayDestinationAddresses": ["13.88.249.244/32"],
"sccRelayDomains": ["tunnel.canadacentral.azuredatabricks.net"],
"controlPlaneNatDestinationAddresses": ["40.85.223.25/32"],
"extendedInfrastructureDestinationAddresses": ["52.139.4.160/28"],
"logBlobStorageDomains": ["dblogprodcacentral.blob.core.windows.net"],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodcaeast.blob.core.windows.net",
"arprodcaeasta1.blob.core.windows.net",
"arprodcaeasta2.blob.core.windows.net",
"arprodcaeasta3.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodcacentral.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-canadacentral-observabilityeventhubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-canadaeast-prod-metastore.mysql.database.azure.com"
]
},
"centralindia": {
"webappDestinationAddresses": ["104.211.89.81/32"],
"sccRelayDestinationAddresses": ["52.172.133.58/32"],
"sccRelayDomains": ["tunnel.centralindia.azuredatabricks.net"],
"controlPlaneNatDestinationAddresses": ["104.211.101.14/32"],
"extendedInfrastructureDestinationAddresses": ["20.193.246.208/28"],
"logBlobStorageDomains": ["dblogprodcindia.blob.core.windows.net"],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodcindia.blob.core.windows.net",
"arprodcindiaa1.blob.core.windows.net",
"arprodcindiaa2.blob.core.windows.net",
"arprodcindiaa3.blob.core.windows.net",
"arprodcindiaa4.blob.core.windows.net",
"arprodcindiaa5.blob.core.windows.net",
"arprodcindiaa6.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodwindia.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-centralindia-observabilityeventhubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-centralindia-prod-metastore.mysql.database.azure.com"
]
},
"centralus": {
"webappDestinationAddresses": ["40.70.58.221/32", "20.37.156.209/32"],
"sccRelayDestinationAddresses": ["13.86.58.215/32", "52.247.0.200/32"],
"sccRelayDomains": [
"tunnel.eastus2.azuredatabricks.net",
"tunnel.centralusc2.azuredatabricks.net"
],
"controlPlaneNatDestinationAddresses": [
"23.101.152.95/32",
"20.37.156.208/32"
],
"extendedInfrastructureDestinationAddresses": ["20.57.106.0/28"],
"logBlobStorageDomains": [
"dblogprodwestus.blob.core.windows.net",
"dblogprodcentralus.blob.core.windows.net"
],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodcus.blob.core.windows.net",
"arprodcusa1.blob.core.windows.net",
"arprodcusa2.blob.core.windows.net",
"arprodcusa3.blob.core.windows.net",
"arprodcusa4.blob.core.windows.net",
"arprodcusa5.blob.core.windows.net",
"arprodcusa6.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodscus.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-westus-observabilityEventHubs.servicebus.windows.net",
"prod-centralusc2-observabilityeventhubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-centralus-prod-metastore.mysql.database.azure.com",
"consolidated-centralus-prod-metastore-addl-1.mysql.database.azure.com",
"consolidated-centralus-prod-metastore-addl-2.mysql.database.azure.com",
"consolidated-centralus-prod-metastore-addl-3.mysql.database.azure.com",
"consolidated-centralusc2-prod-metastore-0.mysql.database.azure.com",
"consolidated-centralusc2-prod-metastore-1.mysql.database.azure.com",
"consolidated-centralusc2-prod-metastore-2.mysql.database.azure.com",
"consolidated-centralusc2-prod-metastore-3.mysql.database.azure.com"
]
},
"chinaeast2": {
"webappDestinationAddresses": ["52.130.1.64/32"],
"sccRelayDestinationAddresses": ["52.130.1.65/32"],
"sccRelayDomains": ["tunnel.chinaeast2.databricks.azure.cn"],
"controlPlaneNatDestinationAddresses": ["52.130.1.65/32"],
"extendedInfrastructureDestinationAddresses": [],
"logBlobStorageDomains": ["dblogprodchinaeast2.blob.core.chinacloudapi.cn"],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodcne2.blob.core.chinacloudapi.cn"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodcnn2.blob.core.chinacloudapi.cn"
],
"eventHubEndpointDomains": [
"prod-chinaeast2-observabilityeventhubs.servicebus.chinacloudapi.cn"
],
"metastoreDomains": [
"consolidated-chinaeast2-prod-metastore-0.mysql.database.chinacloudapi.cn"
]
},
"chinaeast3": {
"webappDestinationAddresses": ["52.130.1.64/32"],
"sccRelayDestinationAddresses": ["52.130.1.65/32"],
"sccRelayDomains": ["tunnel.chinaeast2.databricks.azure.cn"],
"controlPlaneNatDestinationAddresses": ["52.130.1.65/32"],
"extendedInfrastructureDestinationAddresses": [],
"logBlobStorageDomains": ["dblogprodchinaeast3.blob.core.chinacloudapi.cn"],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodcne3.blob.core.chinacloudapi.cn"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodcne3.blob.core.chinacloudapi.cn"
],
"eventHubEndpointDomains": [
"prod-chinaeast2-observabilityeventhubs.servicebus.chinacloudapi.cn"
],
"metastoreDomains": [
"consolidated-chinaeast3-prod-metastore-0.mysql.database.chinacloudapi.cn"
]
},
"chinanorth2": {
"webappDestinationAddresses": ["52.130.16.113/32"],
"sccRelayDestinationAddresses": ["52.130.16.112/32"],
"sccRelayDomains": ["tunnel.chinanorth2.databricks.azure.cn"],
"controlPlaneNatDestinationAddresses": ["52.130.16.112/32"],
"extendedInfrastructureDestinationAddresses": [],
"logBlobStorageDomains": [
"dblogprodchinanorth2.blob.core.chinacloudapi.cn"
],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodcnn2.blob.core.chinacloudapi.cn"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodcnn2.blob.core.chinacloudapi.cn "
],
"eventHubEndpointDomains": [
"prod-chinanorth2-observabilityeventhubs.servicebus.chinacloudapi.cn"
],
"metastoreDomains": [
"consolidated-chinanorth2-prod-metastore-0.mysql.database.chinacloudapi.cn"
]
},
"chinanorth3": {
"webappDestinationAddresses": ["52.130.16.113/32"],
"sccRelayDestinationAddresses": ["52.130.16.112/32"],
"sccRelayDomains": ["tunnel.chinanorth2.databricks.azure.cn"],
"controlPlaneNatDestinationAddresses": ["52.130.16.112/32"],
"extendedInfrastructureDestinationAddresses": [],
"logBlobStorageDomains": [
"dblogprodchinanorth3.blob.core.chinacloudapi.cn"
],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodcnn3.blob.core.chinacloudapi.cn"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodcnn3.blob.core.chinacloudapi.cn"
],
"eventHubEndpointDomains": [
"prod-chinanorth2-observabilityeventhubs.servicebus.chinacloudapi.cn"
],
"metastoreDomains": [
"consolidated-chinanorth3-prod-metastore-0.mysql.database.chinacloudapi.cn"
]
},
"eastasia": {
"webappDestinationAddresses": ["52.187.145.107/32"],
"sccRelayDestinationAddresses": ["102.37.41.3/32"],
"sccRelayDomains": ["tunnel.southeastasia.azuredatabricks.net"],
"controlPlaneNatDestinationAddresses": ["52.187.0.85/32"],
"extendedInfrastructureDestinationAddresses": ["20.195.104.64/28"],
"logBlobStorageDomains": [
"dblogprodseasia.blob.core.windows.net",
"dblogprodeastasia.blob.core.windows.net"
],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodeap.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodseap.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-eastasiac2-observabilityeventhubs.servicebus.windows.net",
"prod-southeastasia-observabilityeventhubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-eastasia-prod-metastore.mysql.database.azure.com",
"consolidated-eastasiac2-prod-metastore-0.mysql.database.azure.com"
]
},
"eastus": {
"webappDestinationAddresses": [
"40.70.58.221/32",
"20.42.4.209/32",
"20.42.4.211/32"
],
"sccRelayDestinationAddresses": ["52.247.0.200/32", "52.146.50.16/32"],
"sccRelayDomains": [
"tunnel.eastus2.azuredatabricks.net",
"tunnel.eastusc3.azuredatabricks.net"
],
"controlPlaneNatDestinationAddresses": [
"23.101.152.95/32",
"20.42.4.208/32",
"20.42.4.210/32"
],
"extendedInfrastructureDestinationAddresses": ["20.57.106.0/28"],
"logBlobStorageDomains": [
"dblogprodwestus.blob.core.windows.net",
"dblogprodeastus.blob.core.windows.net"
],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodeastus.blob.core.windows.net",
"arprodeastusa1.blob.core.windows.net",
"arprodeastusa2.blob.core.windows.net",
"arprodeastusa3.blob.core.windows.net",
"arprodeastusa4.blob.core.windows.net",
"arprodeastusa5.blob.core.windows.net",
"arprodeastusa6.blob.core.windows.net",
"arprodeastusa7.blob.core.windows.net",
"arprodeastusa8.blob.core.windows.net",
"arprodeastusa9.blob.core.windows.net",
"arprodeastusa10.blob.core.windows.net",
"arprodeastusa11.blob.core.windows.net",
"arprodeastusa12.blob.core.windows.net",
"arprodeastusa13.blob.core.windows.net",
"arprodeastusa14.blob.core.windows.net",
"arprodeastusa15.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodeastus2.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-westus-observabilityEventHubs.servicebus.windows.net",
"prod-eastusc2-observabilityeventhubs.servicebus.windows.net",
"prod-eastusc3-observabilityeventhubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-eastus-prod-metastore.mysql.database.azure.com",
"consolidated-eastus-prod-metastore-addl-1.mysql.database.azure.com",
"consolidated-eastus-prod-metastore-addl-2.mysql.database.azure.com",
"consolidated-eastus-prod-metastore-addl-3.mysql.database.azure.com",
"consolidated-eastus-prod-metastore-addl-4.mysql.database.azure.com",
"consolidated-eastusc2-prod-metastore-0.mysql.database.azure.com",
"consolidated-eastusc3-prod-metastore-0.mysql.database.azure.com",
"consolidated-eastusc3-prod-metastore-1.mysql.database.azure.com",
"consolidated-eastusc3-prod-metastore-2.mysql.database.azure.com",
"consolidated-eastusc3-prod-metastore-3.mysql.database.azure.com"
]
},
"eastus2": {
"webappDestinationAddresses": ["40.70.58.221/32", "20.41.4.113/32"],
"sccRelayDestinationAddresses": ["52.247.0.200/32", "20.186.83.56/32"],
"sccRelayDomains": [
"tunnel.eastus2.azuredatabricks.net",
"tunnel.eastus2c2.azuredatabricks.net"
],
"controlPlaneNatDestinationAddresses": [
"23.101.152.95/32",
"20.41.4.112/32"
],
"extendedInfrastructureDestinationAddresses": ["20.57.106.0/28"],
"logBlobStorageDomains": [
"dblogprodwestus.blob.core.windows.net",
"dblogprodeastus2.blob.core.windows.net"
],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodeastus2.blob.core.windows.net",
"arprodeastus2a1.blob.core.windows.net",
"arprodeastus2a2.blob.core.windows.net",
"arprodeastus2a3.blob.core.windows.net",
"arprodeastus2a4.blob.core.windows.net",
"arprodeastus2a5.blob.core.windows.net",
"arprodeastus2a6.blob.core.windows.net",
"arprodeastus2a7.blob.core.windows.net",
"arprodeastus2a8.blob.core.windows.net",
"arprodeastus2a9.blob.core.windows.net",
"arprodeastus2a10.blob.core.windows.net",
"arprodeastus2a11.blob.core.windows.net",
"arprodeastus2a12.blob.core.windows.net",
"arprodeastus2a13.blob.core.windows.net",
"arprodeastus2a14.blob.core.windows.net",
"arprodeastus2a15.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodeastus.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-westus-observabilityEventHubs.servicebus.windows.net",
"prod-eastus2c2-observabilityeventhubs.servicebus.windows.net",
"prod-eastus2c3-observabilityeventhubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-eastus2-prod-metastore.mysql.database.azure.com",
"consolidated-eastus2-prod-metastore-addl-1.mysql.database.azure.com",
"consolidated-eastus2-prod-metastore-addl-2.mysql.database.azure.com",
"consolidated-eastus2-prod-metastore-addl-3.mysql.database.azure.com",
"consolidated-eastus2c2-prod-metastore-0.mysql.database.azure.com",
"consolidated-eastus2c2-prod-metastore-1.mysql.database.azure.com",
"consolidated-eastus2c2-prod-metastore-2.mysql.database.azure.com",
"consolidated-eastus2c2-prod-metastore-3.mysql.database.azure.com",
"consolidated-eastus2c3-prod-metastore-0.mysql.database.azure.com"
]
},
"eastus2euap": {
"webappDestinationAddresses": [""],
"sccRelayDestinationAddresses": [""],
"sccRelayDomains": [""],
"controlPlaneNatDestinationAddresses": [""],
"extendedInfrastructureDestinationAddresses": [],
"logBlobStorageDomains": [""],
"artifactBlobStoragePrimaryDomains": [""],
"artifactBlobStorageSecondaryDomains": [""],
"eventHubEndpointDomains": [""],
"metastoreDomains": [""]
},
"francecentral": {
"webappDestinationAddresses": ["40.89.168.225/32"],
"sccRelayDestinationAddresses": ["51.103.18.111/32"],
"sccRelayDomains": ["tunnel.francecentral.azuredatabricks.net"],
"controlPlaneNatDestinationAddresses": ["40.89.171.101/32"],
"extendedInfrastructureDestinationAddresses": ["20.74.69.128/28"],
"logBlobStorageDomains": ["dblogprodfrcentral.blob.core.windows.net"],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodfrcentral.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodukwest.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-francecentral-observabilityeventhubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-francecentral-prod-metastore.mysql.database.azure.com"
]
},
"germanywestcentral": {
"webappDestinationAddresses": ["20.52.93.41/32"],
"sccRelayDestinationAddresses": ["20.52.93.40/32"],
"sccRelayDomains": ["tunnel.germanywestcentral.azuredatabricks.net"],
"controlPlaneNatDestinationAddresses": ["20.52.93.42/32"],
"extendedInfrastructureDestinationAddresses": ["20.52.93.40/29"],
"logBlobStorageDomains": ["dblogprodgerwescen.blob.core.windows.net"],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodgerwescen.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodswissnor.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-germanywestcentral-observabilityEventHubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-germanywestcentral-prod-metastore-0.mysql.database.azure.com"
]
},
"japaneast": {
"webappDestinationAddresses": ["52.246.160.72/32"],
"sccRelayDestinationAddresses": ["20.46.121.76/32"],
"sccRelayDomains": ["tunnel.japaneast.azuredatabricks.net"],
"controlPlaneNatDestinationAddresses": ["13.78.19.235/32"],
"extendedInfrastructureDestinationAddresses": ["20.78.226.176/28"],
"logBlobStorageDomains": ["dblogprodjapaneast.blob.core.windows.net"],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodjapaneast.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodjapanwest.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-japaneast-observabilityeventhubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-japaneast-prod-metastore.mysql.database.azure.com"
]
},
"japanwest": {
"webappDestinationAddresses": ["52.246.160.72/32"],
"sccRelayDestinationAddresses": ["20.46.121.76/32"],
"sccRelayDomains": ["tunnel.japaneast.azuredatabricks.net"],
"controlPlaneNatDestinationAddresses": ["13.78.19.235/32"],
"extendedInfrastructureDestinationAddresses": ["20.78.226.176/28"],
"logBlobStorageDomains": ["dblogprodjapaneast.blob.core.windows.net"],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodjapanwest.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodjapaneast.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-japaneast-observabilityeventhubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-japanwest-prod-metastore.mysql.database.azure.com"
]
},
"koreacentral": {
"webappDestinationAddresses": ["52.141.22.164/32"],
"sccRelayDestinationAddresses": ["20.194.4.102/32"],
"sccRelayDomains": ["tunnel.koreacentral.azuredatabricks.net"],
"controlPlaneNatDestinationAddresses": ["52.141.6.181/32"],
"extendedInfrastructureDestinationAddresses": ["20.194.107.48/28"],
"logBlobStorageDomains": ["dblogprodkoreacentral.blob.core.windows.net"],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodkoreacen.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodwestus.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-koreacentral-observabilityeventhubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-koreacentral-prod-metastore.mysql.database.azure.com"
]
},
"northcentralus": {
"webappDestinationAddresses": ["40.70.58.221/32", "40.80.188.0/32"],
"sccRelayDestinationAddresses": ["52.247.0.200/32", "23.100.226.13/32"],
"sccRelayDomains": [
"tunnel.eastus2.azuredatabricks.net",
"tunnel.northcentralusc2.azuredatabricks.net"
],
"controlPlaneNatDestinationAddresses": [
"23.101.152.95/32",
"40.80.188.1/32"
],
"extendedInfrastructureDestinationAddresses": ["20.57.106.0/28"],
"logBlobStorageDomains": [
"dblogprodwestus.blob.core.windows.net",
"dblogprodncentralus.blob.core.windows.net"
],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodncus.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodcus.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-westus-observabilityEventHubs.servicebus.windows.net",
"prod-northcentralusc2-observabilityeventhubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-northcentralus-prod-metastore.mysql.database.azure.com",
"consolidated-northcentralusc2-prod-metastore-0.mysql.database.azure.com"
]
},
"northeurope": {
"webappDestinationAddresses": ["52.232.19.246/32", "20.38.84.81/32"],
"sccRelayDestinationAddresses": ["23.97.201.41/32", "40.127.147.196/32"],
"sccRelayDomains": [
"tunnel.westeurope.azuredatabricks.net",
"tunnel.northeuropec2.azuredatabricks.net"
],
"controlPlaneNatDestinationAddresses": [
"23.100.0.135/32",
"20.38.84.80/32"
],
"extendedInfrastructureDestinationAddresses": ["20.73.215.48/28"],
"logBlobStorageDomains": ["dblogprodwesteurope.blob.core.windows.net"],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodnortheu.blob.core.windows.net",
"arprodnortheua1.blob.core.windows.net",
"arprodnortheua2.blob.core.windows.net",
"arprodnortheua3.blob.core.windows.net",
"arprodnortheua4.blob.core.windows.net",
"arprodnortheua5.blob.core.windows.net",
"arprodnortheua6.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodwesteu.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-westeurope-observabilityeventhubs.servicebus.windows.net",
"prod-northeuropec2-observabilityeventhubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-northeurope-prod-metastore.mysql.database.azure.com",
"consolidated-northeurope-prod-metastore-addl-1.mysql.database.azure.com",
"consolidated-northeurope-prod-metastore-addl-2.mysql.database.azure.com",
"consolidated-northeurope-prod-metastore-addl-3.mysql.database.azure.com",
"consolidated-northeuropec2-prod-metastore-0.mysql.database.azure.com",
"consolidated-northeuropec2-prod-metastore-1.mysql.database.azure.com",
"consolidated-northeuropec2-prod-metastore-2.mysql.database.azure.com",
"consolidated-northeuropec2-prod-metastore-3.mysql.database.azure.com"
]
},
"norwayeast": {
"webappDestinationAddresses": ["51.120.40.120/32"],
"sccRelayDestinationAddresses": ["51.120.40.122/32"],
"sccRelayDomains": ["tunnel.norwayeast.azuredatabricks.net"],
"controlPlaneNatDestinationAddresses": ["51.120.40.121/32"],
"extendedInfrastructureDestinationAddresses": ["51.13.86.224/28"],
"logBlobStorageDomains": ["dblogprodnweast.blob.core.windows.net"],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodnweast.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodwestus.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-norwayeast-observabilityEventHubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-norwayeast-prod-metastore-0.mysql.database.azure.com"
]
},
"southafricanorth": {
"webappDestinationAddresses": ["102.133.224.24/32"],
"sccRelayDestinationAddresses": ["102.37.41.3/32"],
"sccRelayDomains": ["tunnel.southafricanorth.azuredatabricks.net"],
"controlPlaneNatDestinationAddresses": ["40.127.5.82/32"],
"extendedInfrastructureDestinationAddresses": ["102.133.192.48/28"],
"logBlobStorageDomains": ["dblogprodsafrican.blob.core.windows.net"],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodsafrican.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodwestus.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-southafricanorth-observabilityeventhubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-southafricanorth-prod-metastore.mysql.database.azure.com"
]
},
"southcentralus": {
"webappDestinationAddresses": ["40.118.174.12/32", "40.119.9.208/32"],
"sccRelayDestinationAddresses": ["40.86.167.110/32"],
"sccRelayDomains": ["tunnel.westus.azuredatabricks.net"],
"controlPlaneNatDestinationAddresses": [
"40.83.178.242/32",
"40.119.9.209/32"
],
"extendedInfrastructureDestinationAddresses": ["13.91.84.96/28"],
"logBlobStorageDomains": [
"dblogprodwestus.blob.core.windows.net",
"dblogprodscentralus.blob.core.windows.net"
],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodscus.blob.core.windows.net",
"arprodscusa1.blob.core.windows.net",
"arprodscusa2.blob.core.windows.net",
"arprodscusa3.blob.core.windows.net",
"arprodscusa4.blob.core.windows.net",
"arprodscusa5.blob.core.windows.net",
"arprodscusa6.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodncus.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-westus-observabilityEventHubs.servicebus.windows.net",
"prod-southcentralusc2-observabilityeventhubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-southcentralus-prod-metastore.mysql.database.azure.com",
"consolidated-southcentralus-prod-metastore-addl-1.mysql.database.azure.com",
"consolidated-southcentralusc2-prod-metastore-addl-1.mysql.database.azure.com"
]
},
"southindia": {
"webappDestinationAddresses": ["104.211.89.81/32"],
"sccRelayDestinationAddresses": ["52.172.133.58/32"],
"sccRelayDomains": ["tunnel.centralindia.azuredatabricks.net"],
"controlPlaneNatDestinationAddresses": ["104.211.101.14/32"],
"extendedInfrastructureDestinationAddresses": ["20.193.246.208/28"],
"logBlobStorageDomains": ["dblogprodcindia.blob.core.windows.net"],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodsindia.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodcindia.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-centralindia-observabilityeventhubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-southindia-prod-metastore.mysql.database.azure.com"
]
},
"southeastasia": {
"webappDestinationAddresses": ["52.187.145.107/32"],
"sccRelayDestinationAddresses": ["52.230.27.216/32"],
"sccRelayDomains": ["tunnel.southeastasia.azuredatabricks.net"],
"controlPlaneNatDestinationAddresses": ["52.187.0.85/32"],
"extendedInfrastructureDestinationAddresses": ["20.195.104.64/28"],
"logBlobStorageDomains": ["dblogprodseasia.blob.core.windows.net"],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodseap.blob.core.windows.net",
"arprodseapa1.blob.core.windows.net",
"arprodseapa2.blob.core.windows.net",
"arprodseapa3.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodeap.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-southeastasia-observabilityeventhubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-southeastasia-prod-metastore.mysql.database.azure.com",
"consolidated-southeastasia-prod-metastore-addl-1.mysql.database.azure.com"
]
},
"swedencentral": {
"webappDestinationAddresses": ["51.12.41.16/32"],
"sccRelayDestinationAddresses": ["51.12.41.18/32"],
"sccRelayDomains": ["tunnel.swedencentral.azuredatabricks.net"],
"controlPlaneNatDestinationAddresses": ["51.12.41.17/32"],
"extendedInfrastructureDestinationAddresses": ["20.91.164.16/28"],
"logBlobStorageDomains": ["dblogprodswissnor.blob.core.windows.net"],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodswissnor.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodwesteu.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-switzerlandnorth-observabilityeventhubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-switzerlandnorth-prod-metastore-0.mysql.database.azure.com"
]
},
"switzerlandnorth": {
"webappDestinationAddresses": ["51.107.48.120/32"],
"sccRelayDestinationAddresses": ["51.107.203.195/32"],
"sccRelayDomains": ["tunnel.switzerlandnorth.azuredatabricks.net"],
"controlPlaneNatDestinationAddresses": ["51.107.48.121/32"],
"extendedInfrastructureDestinationAddresses": ["51.103.172.176/28"],
"logBlobStorageDomains": [""],
"artifactBlobStoragePrimaryDomains": [""],
"artifactBlobStorageSecondaryDomains": [""],
"eventHubEndpointDomains": [""],
"metastoreDomains": [""]
},
"switzerlandwest": {
"webappDestinationAddresses": ["51.107.144.68/32"],
"sccRelayDestinationAddresses": ["51.107.144.70/32"],
"sccRelayDomains": ["tunnel.switzerlandwest.azuredatabricks.net"],
"controlPlaneNatDestinationAddresses": ["51.107.144.69/32"],
"extendedInfrastructureDestinationAddresses": ["51.107.233.80/28"],
"logBlobStorageDomains": ["dblogprodswisswest.blob.core.windows.net"],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodswisswest.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodswissnor.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-switzerlandwest-observabilityeventhubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-switzerlandwest-prod-metastore-0.mysql.database.azure.com",
"consolidated-switzerlandwest-prod-metastore-0.mysql.database.azure.com"
]
},
"uaenorth": {
"webappDestinationAddresses": ["40.123.212.253/32"],
"sccRelayDestinationAddresses": ["40.123.225.135/32"],
"sccRelayDomains": ["tunnel.uaenorth.azuredatabricks.net"],
"controlPlaneNatDestinationAddresses": ["40.123.218.63/32"],
"extendedInfrastructureDestinationAddresses": ["40.120.89.0/28"],
"logBlobStorageDomains": ["dblogproduaenorth.blob.core.windows.net"],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsproduaenorth.blob.core.windows.net",
"arproduaenortha1.blob.core.windows.net",
"arproduaenortha2.blob.core.windows.net",
"arproduaenortha3.blob.core.windows.net",
"arproduaenortha4.blob.core.windows.net",
"arproduaenortha5.blob.core.windows.net",
"arproduaenortha6.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodsafrican.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-uaenorth-observabilityeventhubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-uaenorth-prod-metastore.mysql.database.azure.com"
]
},
"uksouth": {
"webappDestinationAddresses": ["51.140.204.4/32"],
"sccRelayDestinationAddresses": ["51.141.103.193/32"],
"sccRelayDomains": ["tunnel.ukwest.azuredatabricks.net"],
"controlPlaneNatDestinationAddresses": ["51.140.203.27/32"],
"extendedInfrastructureDestinationAddresses": ["51.141.64.128/28"],
"logBlobStorageDomains": ["dblogprodukwest.blob.core.windows.net"],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsproduksouth.blob.core.windows.net",
"arproduksoutha1.blob.core.windows.net",
"arproduksoutha2.blob.core.windows.net",
"arproduksoutha3.blob.core.windows.net",
"arproduksoutha4.blob.core.windows.net",
"arproduksoutha5.blob.core.windows.net",
"arproduksoutha6.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodukwest.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-ukwest-observabilityeventhubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-uksouth-prod-metastore.mysql.database.azure.com",
"consolidated-uksouth-prod-metastore-addl-1.mysql.database.azure.com"
]
},
"ukwest": {
"webappDestinationAddresses": ["51.140.204.4/32"],
"sccRelayDestinationAddresses": ["51.141.103.193/32"],
"sccRelayDomains": ["tunnel.ukwest.azuredatabricks.net"],
"controlPlaneNatDestinationAddresses": ["51.140.203.27/32"],
"extendedInfrastructureDestinationAddresses": ["51.141.64.128/28"],
"logBlobStorageDomains": ["dblogprodukwest.blob.core.windows.net"],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodukwest.blob.core.windows.net",
"arprodukwesta1.blob.core.windows.net",
"arprodukwesta2.blob.core.windows.net",
"arprodukwesta3.blob.core.windows.net",
"arprodukwesta4.blob.core.windows.net",
"arprodukwesta5.blob.core.windows.net",
"arprodukwesta6.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsproduksouth.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-ukwest-observabilityeventhubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-ukwest-prod-metastore.mysql.database.azure.com"
]
},
"usgovarizona": {
"webappDestinationAddresses": ["52.244.37.5/32"],
"sccRelayDestinationAddresses": ["52.244.38.231/32"],
"sccRelayDomains": ["tunnel.usgovarizona.databricks.azure.us"],
"controlPlaneNatDestinationAddresses": ["20.140.48.120/32"],
"extendedInfrastructureDestinationAddresses": [],
"logBlobStorageDomains": ["dblogprodusgovvirg.blob.core.usgovcloudapi.net"],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodusgariz.blob.core.usgovcloudapi.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodusgvirg.blob.core.usgovcloudapi.net"
],
"eventHubEndpointDomains": [
"prod-usgovarizona-observabilityeventhubs.servicebus.usgovcloudapi.net"
],
"metastoreDomains": [
"consolidated-usgovarizona-prod-metastore-0.mysql.database.usgovcloudapi.net"
]
},
"usgovvirginia": {
"webappDestinationAddresses": ["52.227.227.164/32"],
"sccRelayDestinationAddresses": ["52.227.226.255/32"],
"sccRelayDomains": ["tunnel.usgovvirginia.databricks.azure.us"],
"controlPlaneNatDestinationAddresses": ["52.127.49.73/32"],
"extendedInfrastructureDestinationAddresses": [],
"logBlobStorageDomains": ["dblogprodusgovvirg.blob.core.usgovcloudapi.net"],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodusgvirg.blob.core.usgovcloudapi.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodusgariz.blob.core.usgovcloudapi.net"
],
"eventHubEndpointDomains": [
"prod-usgovvirginia-observabilityeventhubs.servicebus.usgovcloudapi.net"
],
"metastoreDomains": [
"consolidated-usgovvirginia-prod-metastore.mysql.database.usgovcloudapi.net"
]
},
"westcentralus": {
"webappDestinationAddresses": ["52.150.136.68/32"],
"sccRelayDestinationAddresses": ["52.150.136.70/32"],
"sccRelayDomains": ["tunnel.westcentralus.azuredatabricks.net"],
"controlPlaneNatDestinationAddresses": ["52.150.136.69/32"],
"extendedInfrastructureDestinationAddresses": ["52.161.34.0/28"],
"logBlobStorageDomains": ["dblogprodwcentus.blob.core.windows.net"],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodwcentus.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodwestus.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-westcentralus-observabilityeventhubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-westcentralus-prod-metastore-0.mysql.database.azure.com"
]
},
"westeurope": {
"webappDestinationAddresses": ["52.232.19.246/32", "40.74.30.80/32"],
"sccRelayDestinationAddresses": ["23.97.201.41/32", "51.138.96.158/32"],
"sccRelayDomains": [
"tunnel.westeurope.azuredatabricks.net",
"tunnel.westeuropec2.azuredatabricks.net"
],
"controlPlaneNatDestinationAddresses": [
"23.100.0.135/32",
"40.74.30.81/32"
],
"extendedInfrastructureDestinationAddresses": ["20.73.215.48/28"],
"logBlobStorageDomains": ["dblogprodwesteurope.blob.core.windows.net"],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodwesteu.blob.core.windows.net",
"arprodwesteua1.blob.core.windows.net",
"arprodwesteua2.blob.core.windows.net",
"arprodwesteua3.blob.core.windows.net",
"arprodwesteua4.blob.core.windows.net",
"arprodwesteua5.blob.core.windows.net",
"arprodwesteua6.blob.core.windows.net",
"arprodwesteua7.blob.core.windows.net",
"arprodwesteua8.blob.core.windows.net",
"arprodwesteua9.blob.core.windows.net",
"arprodwesteua10.blob.core.windows.net",
"arprodwesteua11.blob.core.windows.net",
"arprodwesteua12.blob.core.windows.net",
"arprodwesteua13.blob.core.windows.net",
"arprodwesteua14.blob.core.windows.net",
"arprodwesteua15.blob.core.windows.net",
"arprodwesteua16.blob.core.windows.net",
"arprodwesteua17.blob.core.windows.net",
"arprodwesteua18.blob.core.windows.net",
"arprodwesteua19.blob.core.windows.net",
"arprodwesteua20.blob.core.windows.net",
"arprodwesteua21.blob.core.windows.net",
"arprodwesteua22.blob.core.windows.net",
"arprodwesteua23.blob.core.windows.net",
"arprodwesteua24.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodnortheu.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-westeurope-observabilityeventhubs.servicebus.windows.net",
"prod-westeuc2-observabilityeventhubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-westeurope-prod-metastore.mysql.database.azure.com",
"consolidated-westeurope-prod-metastore-addl-1.mysql.database.azure.com",
"consolidated-westeurope-prod-metastore-addl-2.mysql.database.azure.com",
"consolidated-westeurope-prod-metastore-addl-3.mysql.database.azure.com",
"consolidated-westeuropec2-prod-metastore-0.mysql.database.azure.com",
"consolidated-westeuropec2-prod-metastore-1.mysql.database.azure.com",
"consolidated-westeuropec2-prod-metastore-2.mysql.database.azure.com",
"consolidated-westeuropec2-prod-metastore-3.mysql.database.azure.com"
]
},
"westindia": {
"webappDestinationAddresses": ["104.211.89.81/32"],
"sccRelayDestinationAddresses": ["52.172.133.58/32"],
"sccRelayDomains": ["tunnel.centralindia.azuredatabricks.net"],
"controlPlaneNatDestinationAddresses": ["104.211.101.14/32"],
"extendedInfrastructureDestinationAddresses": ["20.193.246.208/28"],
"logBlobStorageDomains": ["dblogprodcindia.blob.core.windows.net"],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodwindia.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodsindia.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-centralindia-observabilityeventhubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-westindia-prod-metastore.mysql.database.azure.com"
]
},
"westus": {
"webappDestinationAddresses": ["40.118.174.12/32", "20.42.129.160/32"],
"sccRelayDestinationAddresses": ["40.86.167.110/32"],
"sccRelayDomains": ["tunnel.westus.azuredatabricks.net"],
"controlPlaneNatDestinationAddresses": [
"40.83.178.242/32",
"20.42.129.161/32"
],
"extendedInfrastructureDestinationAddresses": ["13.91.84.96/28"],
"logBlobStorageDomains": ["dblogprodwestus.blob.core.windows.net"],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodwestus.blob.core.windows.net",
"arprodwestusa1.blob.core.windows.net",
"arprodwestusa2.blob.core.windows.net",
"arprodwestusa3.blob.core.windows.net",
"arprodwestusa4.blob.core.windows.net",
"arprodwestusa5.blob.core.windows.net",
"arprodwestusa6.blob.core.windows.net",
"arprodwestusa7.blob.core.windows.net",
"arprodwestusa8.blob.core.windows.net",
"arprodwestusa9.blob.core.windows.net",
"arprodwestusa10.blob.core.windows.net",
"arprodwestusa11.blob.core.windows.net",
"arprodwestusa12.blob.core.windows.net",
"arprodwestusa13.blob.core.windows.net",
"arprodwestusa14.blob.core.windows.net",
"arprodwestusa15.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodwestus2.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-westus-observabilityEventHubs.servicebus.windows.net",
"prod-westus2c2-observabilityeventhubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-westus-prod-metastore.mysql.database.azure.com",
"consolidated-westus-prod-metastore-addl-1.mysql.database.azure.com",
"consolidated-westus-prod-metastore-addl-2.mysql.database.azure.com",
"consolidated-westus-prod-metastore-addl-3.mysql.database.azure.com",
"consolidated-westus2c2-prod-metastore-addl-1.mysql.database.azure.com"
]
},
"westus2": {
"webappDestinationAddresses": ["40.118.174.12/32"],
"sccRelayDestinationAddresses": ["40.86.167.110/32"],
"sccRelayDomains": ["tunnel.westus.azuredatabricks.net"],
"controlPlaneNatDestinationAddresses": ["40.83.178.242/32"],
"extendedInfrastructureDestinationAddresses": ["13.91.84.96/28"],
"logBlobStorageDomains": [
"dblogprodwestus.blob.core.windows.net",
"dblogprodwestus2.blob.core.windows.net"
],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodwestus2.blob.core.windows.net",
"arprodwestus2a1.blob.core.windows.net",
"arprodwestus2a2.blob.core.windows.net",
"arprodwestus2a3.blob.core.windows.net",
"arprodwestus2a4.blob.core.windows.net",
"arprodwestus2a5.blob.core.windows.net",
"arprodwestus2a6.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodwestus.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-westus-observabilityEventHubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-westus2-prod-metastore.mysql.database.azure.com",
"consolidated-westus2-prod-metastore-addl-1.mysql.database.azure.com",
"consolidated-westus2-prod-metastore-addl-2.mysql.database.azure.com"
]
},
"westus3": {
"webappDestinationAddresses": ["20.150.160.106"],
"sccRelayDestinationAddresses": ["40.86.167.110/32"],
"sccRelayDomains": ["tunnel.westus3.azuredatabricks.net"],
"controlPlaneNatDestinationAddresses": ["20.150.160.104"],
"extendedInfrastructureDestinationAddresses": ["20.125.82.0/28"],
"logBlobStorageDomains": ["dblogprodwestus3.blob.core.windows.net"],
"artifactBlobStoragePrimaryDomains": [
"dbartifactsprodwestus3.blob.core.windows.net",
"arprodwestus3a1.blob.core.windows.net",
"arprodwestus3a2.blob.core.windows.net",
"arprodwestus3a3.blob.core.windows.net",
"arprodwestus3a4.blob.core.windows.net",
"arprodwestus3a5.blob.core.windows.net",
"arprodwestus3a6.blob.core.windows.net"
],
"artifactBlobStorageSecondaryDomains": [
"dbartifactsprodwestus3.blob.core.windows.net"
],
"eventHubEndpointDomains": [
"prod-westus3-observabilityEventHubs.servicebus.windows.net"
],
"metastoreDomains": [
"consolidated-westus3-prod-metastore-0.mysql.database.azure.com"
]
}
}
|
AzureTRE/templates/workspace_services/databricks/terraform/databricks-udr.json/0
|
{
"file_path": "AzureTRE/templates/workspace_services/databricks/terraform/databricks-udr.json",
"repo_id": "AzureTRE",
"token_count": 21485
}
| 121 |
---
schemaVersion: 1.0.0
name: tre-workspace-service-gitea
version: 1.0.1
description: "A Gitea workspace service"
dockerfile: Dockerfile.tmpl
registry: azuretre
custom:
runtime_image:
name: gitea-workspace-service
build:
version_file: version.txt
docker_file: docker/Dockerfile
docker_context: docker
credentials:
- name: azure_tenant_id
env: ARM_TENANT_ID
- name: azure_subscription_id
env: ARM_SUBSCRIPTION_ID
- name: azure_client_id
env: ARM_CLIENT_ID
- name: azure_client_secret
env: ARM_CLIENT_SECRET
parameters:
- name: workspace_id
type: string
- name: tre_id
type: string
- name: mgmt_acr_name
type: string
env: mgmt_acr_name
description: "The devops ACR name"
- name: mgmt_resource_group_name
type: string
description: "Resource group containing the devops ACR"
env: MGMT_RESOURCE_GROUP_NAME
# the following are added automatically by the resource processor
- name: id
type: string
description: "An Id for this installation"
env: id
- name: tfstate_resource_group_name
type: string
description: "Resource group containing the Terraform state storage account"
- name: tfstate_storage_account_name
type: string
description: "The name of the Terraform state storage account"
- name: tfstate_container_name
env: tfstate_container_name
type: string
default: "tfstate"
description: "The name of the Terraform state storage container"
- name: arm_use_msi
env: ARM_USE_MSI
type: boolean
default: false
- name: arm_environment
env: ARM_ENVIRONMENT
type: string
default: "public"
- name: sql_sku
type: string
default: "B | 4GB 2vCores"
- name: aad_authority_url
type: string
default: "https://login.microsoftonline.com"
mixins:
- exec
- terraform:
clientVersion: 1.3.6
outputs:
- name: connection_uri
type: string
applyTo:
- install
- upgrade
- name: is_exposed_externally
type: boolean
applyTo:
- install
- upgrade
- name: authentication_callback_uri
type: string
applyTo:
- install
- upgrade
- name: workspace_address_space
type: string
applyTo:
- install
- upgrade
install:
- terraform:
description: "Deploy Gitea workspace service"
vars:
workspace_id: ${ bundle.parameters.workspace_id }
tre_id: ${ bundle.parameters.tre_id }
id: ${ bundle.parameters.id }
mgmt_acr_name: ${ bundle.parameters.mgmt_acr_name }
mgmt_resource_group_name: ${ bundle.parameters.mgmt_resource_group_name }
aad_authority_url: ${ bundle.parameters.aad_authority_url }
arm_environment: ${ bundle.parameters.arm_environment }
sql_sku: ${ bundle.parameters.sql_sku }
backendConfig:
resource_group_name: ${ bundle.parameters.tfstate_resource_group_name }
storage_account_name: ${ bundle.parameters.tfstate_storage_account_name }
container_name: ${ bundle.parameters.tfstate_container_name }
key: tre-workspace-service-gitea-${ bundle.parameters.id }
outputs:
- name: connection_uri
- name: is_exposed_externally
- name: authentication_callback_uri
- name: workspace_address_space
upgrade:
- terraform:
description: "Deploy Gitea workspace service"
vars:
workspace_id: ${ bundle.parameters.workspace_id }
tre_id: ${ bundle.parameters.tre_id }
id: ${ bundle.parameters.id }
mgmt_acr_name: ${ bundle.parameters.mgmt_acr_name }
mgmt_resource_group_name: ${ bundle.parameters.mgmt_resource_group_name }
aad_authority_url: ${ bundle.parameters.aad_authority_url }
arm_environment: ${ bundle.parameters.arm_environment }
sql_sku: ${ bundle.parameters.sql_sku }
backendConfig:
resource_group_name: ${ bundle.parameters.tfstate_resource_group_name }
storage_account_name: ${ bundle.parameters.tfstate_storage_account_name }
container_name: ${ bundle.parameters.tfstate_container_name }
key: tre-workspace-service-gitea-${ bundle.parameters.id }
outputs:
- name: connection_uri
- name: is_exposed_externally
- name: authentication_callback_uri
- name: workspace_address_space
uninstall:
- terraform:
description: "Tear down Gitead workspace service"
vars:
workspace_id: ${ bundle.parameters.workspace_id }
tre_id: ${ bundle.parameters.tre_id }
id: ${ bundle.parameters.id }
mgmt_acr_name: ${ bundle.parameters.mgmt_acr_name }
mgmt_resource_group_name: ${ bundle.parameters.mgmt_resource_group_name }
aad_authority_url: ${ bundle.parameters.aad_authority_url }
arm_environment: ${ bundle.parameters.arm_environment }
sql_sku: ${ bundle.parameters.sql_sku }
backendConfig:
resource_group_name: ${ bundle.parameters.tfstate_resource_group_name }
storage_account_name: ${ bundle.parameters.tfstate_storage_account_name }
container_name: ${ bundle.parameters.tfstate_container_name }
key: tre-workspace-service-gitea-${ bundle.parameters.id }
|
AzureTRE/templates/workspace_services/gitea/porter.yaml/0
|
{
"file_path": "AzureTRE/templates/workspace_services/gitea/porter.yaml",
"repo_id": "AzureTRE",
"token_count": 2097
}
| 122 |
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.guacamole</groupId>
<artifactId>guacamole-auth-tre</artifactId>
<version>0.3.4</version>
<packaging>jar</packaging>
<name>guacamole-azure-tre</name>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<maven.compiler.release>11</maven.compiler.release>
</properties>
<dependencies>
<!-- There was a dependency clash here between jwks-rsa and azure-identity, hence their versions should match -->
<dependency>
<groupId>com.auth0</groupId>
<artifactId>jwks-rsa</artifactId>
<version>0.22.0</version>
</dependency>
<dependency>
<groupId>com.auth0</groupId>
<artifactId>java-jwt</artifactId>
<version>4.4.0</version>
</dependency>
<dependency>
<groupId>com.google.inject</groupId>
<artifactId>guice</artifactId>
<version>7.0.0</version>
</dependency>
<dependency>
<groupId>com.google.inject.extensions</groupId>
<artifactId>guice-multibindings</artifactId>
<version>4.2.3</version>
</dependency>
<dependency>
<groupId>org.apache.guacamole</groupId>
<artifactId>guacamole-ext</artifactId>
<version>1.5.3</version>
<scope>provided</scope>
</dependency>
<!-- MS Azure dependencies -->
<dependency>
<groupId>com.azure</groupId>
<artifactId>azure-security-keyvault-secrets</artifactId>
<version>4.7.2</version>
</dependency>
<dependency>
<groupId>com.azure</groupId>
<artifactId>azure-identity</artifactId>
<version>1.9.1</version>
</dependency>
<!-- Http and Json Components -->
<dependency>
<groupId>org.json</groupId>
<artifactId>json</artifactId>
<version>20231013</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>4.5.14</version>
</dependency>
<!-- Java servlet API -->
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
<version>2.5</version>
<scope>provided</scope>
</dependency>
<!--Testing -->
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.13.2</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-inline</artifactId>
<version>5.2.0</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.squareup.okhttp3</groupId>
<artifactId>mockwebserver</artifactId>
<version>4.11.0</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-api-mockito2</artifactId>
<version>2.0.9</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-module-junit4</artifactId>
<version>2.0.9</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.github.stefanbirkner</groupId>
<artifactId>system-rules</artifactId>
<version>1.19.0</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-api</artifactId>
<version>5.9.3</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.junit-pioneer</groupId>
<artifactId>junit-pioneer</artifactId>
<version>2.0.1</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-junit-jupiter</artifactId>
<version>5.3.1</version>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<version>3.6.0</version>
<executions>
<execution>
<id>copy-dependencies</id>
<phase>prepare-package</phase>
<goals>
<goal>copy-dependencies</goal>
</goals>
<configuration>
<outputDirectory>${project.build.directory}/lib</outputDirectory>
<includeScope>compile</includeScope>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>3.1.2</version>
</plugin>
</plugins>
<pluginManagement>
<!-- lock down plugins versions to avoid using Maven defaults (may be moved to parent pom) -->
<plugins>
<!-- clean lifecycle, see https://maven.apache.org/ref/current/maven-core/lifecycles.html#clean_Lifecycle -->
<plugin>
<artifactId>maven-clean-plugin</artifactId>
<version>3.3.1</version>
</plugin>
<!-- default lifecycle, jar packaging: see https://maven.apache.org/ref/current/maven-core/default-bindings.html#Plugin_bindings_for_jar_packaging -->
<plugin>
<artifactId>maven-resources-plugin</artifactId>
<version>3.3.1</version>
</plugin>
<plugin>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.11.0</version>
</plugin>
<plugin>
<artifactId>maven-jar-plugin</artifactId>
<version>3.3.0</version>
</plugin>
</plugins>
</pluginManagement>
</build>
</project>
|
AzureTRE/templates/workspace_services/guacamole/guacamole-server/guacamole-auth-azure/pom.xml/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/guacamole-server/guacamole-auth-azure/pom.xml",
"repo_id": "AzureTRE",
"token_count": 3800
}
| 123 |
package org.apache.guacamole.auth.azuretre;
import com.auth0.jwk.UrlJwkProvider;
import org.apache.guacamole.GuacamoleException;
import org.apache.guacamole.auth.azuretre.connection.ConnectionService;
import org.apache.guacamole.auth.azuretre.user.AzureTREAuthenticatedUser;
import org.apache.guacamole.auth.azuretre.user.TreUserContext;
import org.apache.guacamole.net.auth.AuthenticatedUser;
import org.apache.guacamole.net.auth.Connection;
import org.apache.guacamole.net.auth.Credentials;
import org.apache.guacamole.net.auth.credentials.CredentialsInfo;
import org.apache.guacamole.net.auth.credentials.GuacamoleInvalidCredentialsException;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.junitpioneer.jupiter.SetEnvironmentVariable;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.MockedStatic;
import org.mockito.Mockito;
import org.mockito.junit.jupiter.MockitoExtension;
import javax.servlet.http.HttpServletRequest;
import java.util.HashMap;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@ExtendWith(MockitoExtension.class)
public class AzureTREAuthenticationProviderTest {
public static final String OAUTH_2_PROXY_JWKS_ENDPOINT = "OAUTH2_PROXY_JWKS_ENDPOINT";
public static final String JWKS_MOCK_ENDPOINT_URL = "https://mockedjwks.com";
public static final String MOCKED_TOKEN = "dummy_token";
public static final String MOCKED_USERNAME = "[email protected]";
@Mock
AuthenticationProviderService authenticationProviderService;
@Mock
HttpServletRequest requestMock;
@InjectMocks
Credentials credentialsMock;
AzureTREAuthenticationProvider azureTREAuthenticationProvider;
@Mock
AzureTREAuthenticatedUser authenticatedUser;
@BeforeEach
void setup() {
azureTREAuthenticationProvider = new AzureTREAuthenticationProvider(authenticationProviderService);
}
@Test
public void authenticateUserSucceed() {
when(credentialsMock.getRequest().getHeader("X-Forwarded-Access-Token")).thenReturn(MOCKED_TOKEN);
when(credentialsMock.getRequest().getHeader("X-Forwarded-Preferred-Username")).thenReturn(MOCKED_USERNAME);
assertNotNull(azureTREAuthenticationProvider.authenticateUser(credentialsMock));
}
@Test
public void authenticateUserFailsWhenNoAccessToken() {
when(credentialsMock.getRequest().getHeader("X-Forwarded-Access-Token")).thenReturn("");
when(credentialsMock.getRequest().getHeader("X-Forwarded-Preferred-Username")).thenReturn(MOCKED_USERNAME);
assertNull(azureTREAuthenticationProvider.authenticateUser(credentialsMock));
}
@Test
public void authenticateUserFailsWhenNoPrefUsername() {
when(credentialsMock.getRequest().getHeader("X-Forwarded-Access-Token")).thenReturn(MOCKED_TOKEN);
when(credentialsMock.getRequest().getHeader("X-Forwarded-Preferred-Username")).thenReturn("");
assertNull(azureTREAuthenticationProvider.authenticateUser(credentialsMock));
}
@Test
@SetEnvironmentVariable(key = OAUTH_2_PROXY_JWKS_ENDPOINT, value = JWKS_MOCK_ENDPOINT_URL)
public void getUserContextSucceed() throws GuacamoleException {
try (MockedStatic<ConnectionService> connectionServiceMockedStatic =
Mockito.mockStatic(ConnectionService.class)) {
connectionServiceMockedStatic.when(() -> ConnectionService.getConnections(authenticatedUser))
.thenReturn(new HashMap<String, Connection>());
when(authenticatedUser.getAccessToken()).thenReturn(MOCKED_TOKEN);
TreUserContext treUserContext =
(TreUserContext) azureTREAuthenticationProvider.getUserContext(authenticatedUser);
verify(authenticationProviderService).validateToken(anyString(), any(UrlJwkProvider.class));
assertNotNull(treUserContext);
}
}
@Test
public void getUserContextFailsWhenNotInstanceOfAuthUser() throws GuacamoleException {
AuthenticatedUser notTreUser = mock(AuthenticatedUser.class);
assertNull(azureTREAuthenticationProvider.getUserContext(notTreUser));
}
@Test
@SetEnvironmentVariable(key = OAUTH_2_PROXY_JWKS_ENDPOINT, value = JWKS_MOCK_ENDPOINT_URL)
public void getUserContextFailsWhenTokenValidation() throws GuacamoleException {
try (MockedStatic<ConnectionService> connectionServiceMockedStatic =
Mockito.mockStatic(ConnectionService.class)) {
connectionServiceMockedStatic.when(() -> ConnectionService.getConnections(authenticatedUser))
.thenReturn(new HashMap<String, Connection>());
when(authenticatedUser.getAccessToken()).thenReturn(MOCKED_TOKEN);
doThrow(new GuacamoleInvalidCredentialsException(
"Could not validate token",
CredentialsInfo.USERNAME_PASSWORD))
.when(authenticationProviderService).validateToken(anyString(), any(UrlJwkProvider.class));
assertNull(azureTREAuthenticationProvider.getUserContext(authenticatedUser));
}
}
}
|
AzureTRE/templates/workspace_services/guacamole/guacamole-server/guacamole-auth-azure/src/test/java/org/apache/guacamole/auth/azuretre/AzureTREAuthenticationProviderTest.java/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/guacamole-server/guacamole-auth-azure/src/test/java/org/apache/guacamole/auth/azuretre/AzureTREAuthenticationProviderTest.java",
"repo_id": "AzureTRE",
"token_count": 2052
}
| 124 |
data "azurerm_service_plan" "workspace" {
name = "plan-${var.workspace_id}"
resource_group_name = data.azurerm_resource_group.ws.name
}
# we have to use user-assigned to break a cycle in the dependencies: app identity, kv-policy, secrets in app settings
resource "azurerm_user_assigned_identity" "guacamole_id" {
resource_group_name = data.azurerm_resource_group.ws.name
location = data.azurerm_resource_group.ws.location
name = local.identity_name
tags = local.workspace_service_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_linux_web_app" "guacamole" {
name = local.webapp_name
location = data.azurerm_resource_group.ws.location
resource_group_name = data.azurerm_resource_group.ws.name
service_plan_id = data.azurerm_service_plan.workspace.id
https_only = true
key_vault_reference_identity_id = azurerm_user_assigned_identity.guacamole_id.id
virtual_network_subnet_id = data.azurerm_subnet.web_apps.id
tags = local.workspace_service_tags
site_config {
http2_enabled = true
container_registry_use_managed_identity = true
container_registry_managed_identity_client_id = azurerm_user_assigned_identity.guacamole_id.client_id
ftps_state = "Disabled"
vnet_route_all_enabled = true
minimum_tls_version = "1.2"
application_stack {
docker_image = "${data.azurerm_container_registry.mgmt_acr.login_server}/microsoft/azuretre/${var.image_name}"
docker_image_tag = local.image_tag
}
}
app_settings = {
WEBSITES_PORT = "8085"
TENANT_ID = data.azurerm_client_config.current.tenant_id
KEYVAULT_URL = data.azurerm_key_vault.ws.vault_uri
API_URL = local.api_url
SERVICE_ID = var.tre_resource_id
WORKSPACE_ID = var.workspace_id
MANAGED_IDENTITY_CLIENT_ID = azurerm_user_assigned_identity.guacamole_id.client_id
APPLICATIONINSIGHTS_CONNECTION_STRING = data.azurerm_application_insights.ws.connection_string
APPLICATIONINSIGHTS_INSTRUMENTATION_LOGGING_LEVEL = "INFO"
# Guacmole configuration
GUAC_DISABLE_COPY = var.guac_disable_copy
GUAC_DISABLE_PASTE = var.guac_disable_paste
GUAC_ENABLE_DRIVE = var.guac_enable_drive
GUAC_DRIVE_NAME = var.guac_drive_name
GUAC_DRIVE_PATH = var.guac_drive_path
GUAC_DISABLE_DOWNLOAD = var.guac_disable_download
GUAC_DISABLE_UPLOAD = var.guac_disable_upload
AUDIENCE = "@Microsoft.KeyVault(SecretUri=${data.azurerm_key_vault_secret.workspace_client_id.id})"
ISSUER = local.issuer
OAUTH2_PROXY_CLIENT_ID = "@Microsoft.KeyVault(SecretUri=${data.azurerm_key_vault_secret.workspace_client_id.id})"
OAUTH2_PROXY_CLIENT_SECRET = "@Microsoft.KeyVault(SecretUri=${data.azurerm_key_vault_secret.workspace_client_secret.id})"
OAUTH2_PROXY_REDIRECT_URI = "https://${local.webapp_name}.${local.webapp_suffix}/oauth2/callback"
OAUTH2_PROXY_EMAIL_DOMAIN = "\"*\"" # oauth proxy will allow all email domains only when the value is "*"
OAUTH2_PROXY_OIDC_ISSUER_URL = local.issuer
OAUTH2_PROXY_JWKS_ENDPOINT = local.jwks_endpoint
}
logs {
application_logs {
file_system_level = "Information"
}
http_logs {
file_system {
retention_in_days = 7
retention_in_mb = 100
}
}
}
identity {
type = "UserAssigned"
identity_ids = [azurerm_user_assigned_identity.guacamole_id.id]
}
lifecycle { ignore_changes = [tags] }
depends_on = [
azurerm_role_assignment.guac_acr_pull,
azurerm_key_vault_access_policy.guacamole_policy
]
}
resource "azurerm_monitor_diagnostic_setting" "guacamole" {
name = "diag-${var.tre_id}"
target_resource_id = azurerm_linux_web_app.guacamole.id
log_analytics_workspace_id = data.azurerm_log_analytics_workspace.tre.id
dynamic "enabled_log" {
for_each = setintersection(data.azurerm_monitor_diagnostic_categories.guacamole.log_category_types, local.guacamole_diagnostic_categories_enabled)
content {
category = enabled_log.value
}
}
metric {
category = "AllMetrics"
enabled = true
}
}
resource "azurerm_role_assignment" "guac_acr_pull" {
scope = data.azurerm_container_registry.mgmt_acr.id
role_definition_name = "AcrPull"
principal_id = azurerm_user_assigned_identity.guacamole_id.principal_id
}
resource "azurerm_private_endpoint" "guacamole" {
# disabling this makes the webapp available on the public internet
count = var.is_exposed_externally == false ? 1 : 0
name = "pe-${local.webapp_name}"
location = data.azurerm_resource_group.ws.location
resource_group_name = data.azurerm_resource_group.ws.name
subnet_id = data.azurerm_subnet.services.id
tags = local.workspace_service_tags
private_service_connection {
private_connection_resource_id = azurerm_linux_web_app.guacamole.id
name = "psc-${local.webapp_name}"
subresource_names = ["sites"]
is_manual_connection = false
}
private_dns_zone_group {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.azurewebsites.net"]
private_dns_zone_ids = [data.azurerm_private_dns_zone.azurewebsites.id]
}
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_key_vault_access_policy" "guacamole_policy" {
key_vault_id = data.azurerm_key_vault.ws.id
tenant_id = azurerm_user_assigned_identity.guacamole_id.tenant_id
object_id = azurerm_user_assigned_identity.guacamole_id.principal_id
secret_permissions = ["Get", "List", ]
}
|
AzureTRE/templates/workspace_services/guacamole/terraform/web_app.tf/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/terraform/web_app.tf",
"repo_id": "AzureTRE",
"token_count": 2835
}
| 125 |
---
apt:
preserve_sources_list: false
primary:
- arches:
- default
uri: "${nexus_proxy_url}/repository/ubuntu/"
security:
- arches:
- default
uri: "${nexus_proxy_url}/repository/ubuntu-security/"
sources_list: |
deb [trusted=yes] $PRIMARY $RELEASE main restricted universe multiverse
deb [trusted=yes] $PRIMARY $RELEASE-updates main restricted universe multiverse
deb [trusted=yes] $SECURITY $RELEASE main restricted universe multiverse
deb [signed-by=/etc/apt/trusted.gpg.d/microsoft.gpg] ${nexus_proxy_url}/repository/microsoft-apt/ubuntu/18.04/prod $RELEASE main
deb [signed-by=/etc/apt/trusted.gpg.d/microsoft.gpg] ${nexus_proxy_url}/repository/microsoft-apt/repos/edge stable main
deb [signed-by=/etc/apt/trusted.gpg.d/docker-archive-keyring.gpg] ${nexus_proxy_url}/repository/docker/ $RELEASE stable
|
AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-linuxvm/terraform/apt_sources_config.yml/0
|
{
"file_path": "AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-linuxvm/terraform/apt_sources_config.yml",
"repo_id": "AzureTRE",
"token_count": 342
}
| 126 |
{
"$schema": "http://json-schema.org/draft-07/schema",
"$id": "https://github.com/microsoft/AzureTRE/templates/workspace_services/healthcare_services/template_schema.json",
"type": "object",
"title": "Azure Health Data Services Workspace Service",
"description": "Provides Azure Health Data Services within the workspace",
"required": [],
"properties": {
"display_name": {
"type": "string",
"title": "Name for the workspace service",
"description": "The name of the workspace service to be displayed to users",
"default": "Azure Health Data Services",
"updateable": true
},
"description": {
"type": "string",
"title": "Description of the workspace service",
"description": "Description of the workspace service",
"default": "Unify and manage health data and protected health information (PHI) in the cloud.",
"updateable": true
},
"deploy_dicom": {
"$id": "#/properties/deploy_dicom",
"type": "boolean",
"title": "Deploy DICOM",
"description": "Deploy DICOM instance",
"updateable": true,
"default": false
},
"deploy_fhir": {
"$id": "#/properties/deploy_fhir",
"type": "boolean",
"title": "Deploy FHIR",
"description": "Deploy FHIR instance",
"updateable": true,
"default": false
}
},
"allOf": [
{
"if": {
"properties": {
"deploy_fhir": {
"const": true
}
},
"required": [
"deploy_fhir"
]
},
"then": {
"properties": {
"fhir_kind": {
"type": "string",
"title": "FHIR version",
"description": "The FHIR version that will be used.",
"default": "R4",
"enum": [
"R4",
"Stu3"
]
}
}
}
}
],
"pipeline": {
"install": [
{
"stepId": "main"
},
{
"stepId": "d5504764-94cd-11ed-a1eb-0242ac120002",
"stepTitle": "Add network firewall rules for health services",
"resourceTemplateName": "tre-shared-service-firewall",
"resourceType": "shared-service",
"resourceAction": "upgrade",
"properties": [
{
"name": "network_rule_collections",
"type": "array",
"arraySubstitutionAction": "replace",
"arrayMatchField": "name",
"value": {
"name": "nrc_svc_{{ resource.id }}_health_services",
"action": "Allow",
"rules": [
{
"name": "AzureAD",
"description": "AAD access",
"source_addresses": "{{ resource.properties.workspace_address_space }}",
"destination_addresses": ["AzureActiveDirectory"],
"destination_ports": ["*"],
"protocols": ["TCP"]
}
]
}
},
{
"name": "rule_collections",
"type": "array",
"arraySubstitutionAction": "replace",
"arrayMatchField": "name",
"value": {
"name": "arc_svc_{{ resource.id }}_health_services",
"action": "Allow",
"rules": [
{
"name": "AAD CDN",
"description": "AAD CDN",
"source_addresses": "{{ resource.properties.workspace_address_space }}",
"target_fqdns": [
"aadcdn.msftauth.net"
],
"protocols": [
{
"port": "443",
"type": "Https"
}
]
}
]
}
}
]
}
],
"upgrade": [
{
"stepId": "main"
},
{
"stepId": "12c0cf8c-94f7-11ed-a1eb-0242ac120002",
"stepTitle": "Update network firewall rules for health services",
"resourceTemplateName": "tre-shared-service-firewall",
"resourceType": "shared-service",
"resourceAction": "upgrade",
"properties": [
{
"name": "network_rule_collections",
"type": "array",
"arraySubstitutionAction": "replace",
"arrayMatchField": "name",
"value": {
"name": "nrc_svc_{{ resource.id }}_health_services",
"action": "Allow",
"rules": [
{
"name": "AzureAD",
"description": "AAD access for authNZ",
"source_addresses": "{{ resource.properties.workspace_address_space }}",
"destination_addresses": ["AzureActiveDirectory"],
"destination_ports": ["*"],
"protocols": ["TCP"]
}
]
}
},
{
"name": "rule_collections",
"type": "array",
"arraySubstitutionAction": "replace",
"arrayMatchField": "name",
"value": {
"name": "arc_svc_{{ resource.id }}_health_services",
"action": "Allow",
"rules": [
{
"name": "AAD CDN",
"description": "AAD CDN",
"source_addresses": "{{ resource.properties.workspace_address_space }}",
"target_fqdns": [
"aadcdn.msftauth.net"
],
"protocols": [
{
"port": "443",
"type": "Https"
}
]
}
]
}
}
]
}
],
"uninstall": [
{
"stepId": "1fc155ee-94f7-11ed-a1eb-0242ac120002",
"stepTitle": "Remove network firewall rules for health services",
"resourceTemplateName": "tre-shared-service-firewall",
"resourceType": "shared-service",
"resourceAction": "upgrade",
"properties": [
{
"name": "network_rule_collections",
"type": "array",
"arraySubstitutionAction": "remove",
"arrayMatchField": "name",
"value": {
"name": "nrc_svc_{{ resource.id }}_health_services"
}
},
{
"name": "rule_collections",
"type": "array",
"arraySubstitutionAction": "remove",
"arrayMatchField": "name",
"value": {
"name": "arc_svc_{{ resource.id }}_health_services"
}
}
]
},
{
"stepId": "main"
}
]
}
}
|
AzureTRE/templates/workspace_services/health-services/template_schema.json/0
|
{
"file_path": "AzureTRE/templates/workspace_services/health-services/template_schema.json",
"repo_id": "AzureTRE",
"token_count": 3781
}
| 127 |
---
schemaVersion: 1.0.0
name: tre-service-innereye
version: 0.6.4
description: "An Azure TRE service for InnerEye Deep Learning"
registry: azuretre
dockerfile: Dockerfile.tmpl
credentials:
- name: azure_tenant_id
env: ARM_TENANT_ID
- name: azure_subscription_id
env: ARM_SUBSCRIPTION_ID
- name: azure_client_id
env: ARM_CLIENT_ID
- name: azure_client_secret
env: ARM_CLIENT_SECRET
parameters:
- name: workspace_id
type: string
- name: tre_id
type: string
- name: id
type: string
description: "An Id for this installation"
- name: azure_environment
type: string
default: "AzureCloud"
description: "Used by Azure CLI to set the Azure environment"
- name: inference_sp_client_id
type: string
- name: inference_sp_client_secret
type: string
- name: mgmt_acr_name
type: string
- name: tfstate_resource_group_name
type: string
description: "Resource group containing the Terraform state storage account"
env: MGMT_RESOURCE_GROUP_NAME
- name: tfstate_storage_account_name
type: string
description: "The name of the Terraform state storage account"
env: MGMT_STORAGE_ACCOUNT_NAME
- name: tfstate_container_name
type: string
default: "tfstate"
description: "The name of the Terraform state storage container"
env: TERRAFORM_STATE_CONTAINER_NAME
- name: arm_use_msi
env: ARM_USE_MSI
type: boolean
default: false
- name: arm_environment
env: ARM_ENVIRONMENT
type: string
default: "public"
mixins:
- exec
- az:
clientVersion: 2.37.0
extensions:
- azure-firewall
- terraform:
clientVersion: 1.3.6
install:
- az:
description: "Set Azure Cloud Environment"
arguments:
- cloud
- set
flags:
name: ${ bundle.parameters.azure_environment }
- az:
description: "az login"
arguments:
- login
flags:
identity:
username: ${ bundle.credentials.azure_client_id}
- az:
description: "acr login"
arguments:
- acr
- login
flags:
name: ${ bundle.parameters.mgmt_acr_name }
- exec:
description: "Install Azure ML service"
command: ./install_service_azureml.sh
- terraform:
description: "Deploy service"
vars:
workspace_id: ${ bundle.parameters.workspace_id }
tre_id: ${ bundle.parameters.tre_id }
tre_resource_id: ${ bundle.parameters.id }
inference_sp_client_id: ${ bundle.parameters.inference_sp_client_id }
inference_sp_client_secret: ${ bundle.parameters.inference_sp_client_secret }
arm_tenant_id: ${ bundle.credentials.azure_tenant_id }
arm_client_id: ${ bundle.credentials.azure_client_id }
arm_client_secret: ${ bundle.credentials.azure_client_secret }
arm_use_msi: ${ bundle.parameters.arm_use_msi }
arm_environment: ${ bundle.parameters.arm_environment }
backendConfig:
resource_group_name: ${ bundle.parameters.tfstate_resource_group_name }
storage_account_name: ${ bundle.parameters.tfstate_storage_account_name }
container_name: ${ bundle.parameters.tfstate_container_name }
key: tre-service-innereye-${ bundle.parameters.id }
outputs:
- name: azureml_compute_cluster_name
upgrade:
- exec:
description: "Upgrade service"
command: echo
arguments:
- "This workspace does not implement upgrade action"
uninstall:
- terraform:
description: "Tear down workspace"
vars:
workspace_id: ${ bundle.parameters.workspace_id }
tre_id: ${ bundle.parameters.tre_id }
tre_resource_id: ${ bundle.parameters.id }
inference_sp_client_id: ${ bundle.parameters.inference_sp_client_id }
inference_sp_client_secret: ${ bundle.parameters.inference_sp_client_secret }
arm_tenant_id: ${ bundle.credentials.azure_tenant_id }
arm_client_id: ${ bundle.credentials.azure_client_id }
arm_client_secret: ${ bundle.credentials.azure_client_secret }
arm_use_msi: ${ bundle.parameters.arm_use_msi }
arm_environment: ${ bundle.parameters.arm_environment }
backendConfig:
resource_group_name: ${ bundle.parameters.tfstate_resource_group_name }
storage_account_name: ${ bundle.parameters.tfstate_storage_account_name }
container_name: ${ bundle.parameters.tfstate_container_name }
key: tre-service-innereye-${ bundle.parameters.id }
|
AzureTRE/templates/workspace_services/innereye/porter.yaml/0
|
{
"file_path": "AzureTRE/templates/workspace_services/innereye/porter.yaml",
"repo_id": "AzureTRE",
"token_count": 1827
}
| 128 |
# This is ssh server systemwide configuration file
#
# /etc/sshd_config
Port 2222
ListenAddress 0.0.0.0
LoginGraceTime 180
X11Forwarding yes
Ciphers aes128-cbc,3des-cbc,aes256-cbc
MACs hmac-sha1,hmac-sha1-96
StrictModes yes
SyslogFacility DAEMON
PrintMotd no
IgnoreRhosts no
RhostsRSAAuthentication yes
RSAAuthentication no
PasswordAuthentication yes
PermitEmptyPasswords no
PermitRootLogin yes
|
AzureTRE/templates/workspace_services/mlflow/mlflow-server/docker/sshd_config/0
|
{
"file_path": "AzureTRE/templates/workspace_services/mlflow/mlflow-server/docker/sshd_config",
"repo_id": "AzureTRE",
"token_count": 295
}
| 129 |
resource "random_string" "username" {
length = 10
upper = true
lower = true
numeric = false
min_lower = 1
special = false
}
resource "random_password" "password" {
length = 16
lower = true
min_lower = 1
upper = true
min_upper = 1
numeric = true
min_numeric = 1
special = true
min_special = 1
override_special = "_%$"
}
resource "azurerm_key_vault_secret" "postgresql_admin_username" {
name = "${local.postgresql_server_name}-admin-username"
value = random_string.username.result
key_vault_id = data.azurerm_key_vault.ws.id
tags = local.tre_workspace_service_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_key_vault_secret" "postgresql_admin_password" {
name = "${local.postgresql_server_name}-admin-password"
value = random_password.password.result
key_vault_id = data.azurerm_key_vault.ws.id
tags = local.tre_workspace_service_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_postgresql_server" "mlflow" {
name = local.postgresql_server_name
location = data.azurerm_resource_group.ws.location
resource_group_name = data.azurerm_resource_group.ws.name
tags = local.tre_workspace_service_tags
administrator_login = random_string.username.result
administrator_login_password = random_password.password.result
sku_name = "GP_Gen5_2"
version = "11"
storage_mb = 5120
backup_retention_days = 7
geo_redundant_backup_enabled = false
auto_grow_enabled = true
public_network_access_enabled = false
ssl_enforcement_enabled = true
ssl_minimal_tls_version_enforced = "TLS1_2"
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_postgresql_database" "mlflow" {
name = "mlflowdb"
resource_group_name = data.azurerm_resource_group.ws.name
server_name = azurerm_postgresql_server.mlflow.name
charset = "UTF8"
collation = "English_United States.1252"
}
resource "azurerm_private_endpoint" "private_endpoint" {
name = "pe-${azurerm_postgresql_server.mlflow.name}-postgres"
location = data.azurerm_resource_group.ws.location
resource_group_name = data.azurerm_resource_group.ws.name
subnet_id = data.azurerm_subnet.services.id
tags = local.tre_workspace_service_tags
private_service_connection {
private_connection_resource_id = azurerm_postgresql_server.mlflow.id
name = "psc-${azurerm_postgresql_server.mlflow.name}"
subresource_names = ["postgresqlServer"]
is_manual_connection = false
}
private_dns_zone_group {
name = module.terraform_azurerm_environment_configuration.private_links["privatelink.postgres.database.azure.com"]
private_dns_zone_ids = [data.azurerm_private_dns_zone.postgres.id]
}
lifecycle { ignore_changes = [tags] }
}
|
AzureTRE/templates/workspace_services/mlflow/terraform/postgresql.tf/0
|
{
"file_path": "AzureTRE/templates/workspace_services/mlflow/terraform/postgresql.tf",
"repo_id": "AzureTRE",
"token_count": 1363
}
| 130 |
# Local .terraform directories
**/.terraform/*
# TF backend files
**/*_backend.tf
Dockerfile.tmpl
|
AzureTRE/templates/workspace_services/ohdsi/.dockerignore/0
|
{
"file_path": "AzureTRE/templates/workspace_services/ohdsi/.dockerignore",
"repo_id": "AzureTRE",
"token_count": 37
}
| 131 |
#!/bin/bash
set -o errexit
set -o pipefail
set -o nounset
if [[ -z ${DATA_SOURCE_CONFIG:-} ]] || [[ -z ${DATA_SOURCE_DIAMONS:-} ]]; then
printf 'No data source or daimons configured.'
exit 0
else
# Parse Data source
ds_config="$(echo "$DATA_SOURCE_CONFIG" | base64 --decode)"
ds_daimons="$(echo "$DATA_SOURCE_DIAMONS" | base64 --decode)"
dialect="$(jq -r '.dialect' <<< "$ds_config")"
if [[ $dialect != "Azure Synapse" ]]; then
printf 'Not a Synapse data source, no action required.'
exit 0
fi
origin_results_schema_name="$(jq -r '.daimon_results' <<< "$ds_daimons")"
origin_temp_schema_name="$(jq -r '.daimon_temp' <<< "$ds_daimons")"
if [[ -z $origin_results_schema_name ]] || [[ -z $origin_temp_schema_name ]]; then
printf 'Results and temp schemas are not configured.'
exit 0
fi
# Parse required info
admin_user="$(jq -r '.username' <<< "$ds_config")"
jdbc_connection_string="$(jq -r '.connection_string' <<< "$ds_config")"
synapse_server="$([[ $jdbc_connection_string =~ jdbc:sqlserver://(.*):1433 ]] && echo "${BASH_REMATCH[1]}")"
synapse_db="$([[ $jdbc_connection_string =~ database=(.*)(;user) ]] && echo "${BASH_REMATCH[1]}")"
origin_results_schema_name="$(jq -r '.daimon_results' <<< "$ds_daimons")"
origin_temp_schema_name="$(jq -r '.daimon_temp' <<< "$ds_daimons")"
parsed_resource_id="$(echo "$TRE_RESOURCE_ID" | tr - _ )"
results_schema_name="${origin_results_schema_name}_${parsed_resource_id}"
temp_schema_name="${origin_temp_schema_name}_${parsed_resource_id}"
# Export password as required by sqlcmd tool
# shellcheck disable=SC2155
export SQLCMDPASSWORD="$(jq -r '.password' <<< "$ds_config")"
printf 'Execute Synapse SQL script'
sqlcmd -U "${admin_user}" -S "${synapse_server}" -d "${synapse_db}" -W -v RESULTS_SCHEMA_NAME="${results_schema_name}" -v TEMP_SCHEMA_NAME="${temp_schema_name}" -v ORIGIN_RESULTS_SCHEMA_NAME="${origin_results_schema_name}" -i "${SCRIPT_PATH}"
printf 'Execute Synapse SQL script: done.'
exit 0
fi
|
AzureTRE/templates/workspace_services/ohdsi/synapse_runner.sh/0
|
{
"file_path": "AzureTRE/templates/workspace_services/ohdsi/synapse_runner.sh",
"repo_id": "AzureTRE",
"token_count": 809
}
| 132 |
# Workspace Templates
Workspace Templates are located in this folder.
To customize or author new Workspace Templates read the [Authoring Workspace Templates](../../docs/tre-workspace-authors/authoring-workspace-templates.md).
|
AzureTRE/templates/workspaces/README.md/0
|
{
"file_path": "AzureTRE/templates/workspaces/README.md",
"repo_id": "AzureTRE",
"token_count": 61
}
| 133 |
data "azuread_client_config" "current" {}
resource "random_uuid" "oauth2_user_impersonation_id" {}
resource "random_uuid" "app_role_workspace_owner_id" {}
resource "random_uuid" "app_role_workspace_researcher_id" {}
resource "random_uuid" "app_role_workspace_airlock_manager_id" {}
resource "azuread_application" "workspace" {
display_name = var.workspace_resource_name_suffix
identifier_uris = ["api://${var.workspace_resource_name_suffix}"]
owners = [data.azuread_client_config.current.object_id]
api {
mapped_claims_enabled = true
requested_access_token_version = 2
oauth2_permission_scope {
admin_consent_description = "Allow the app to access the Workspace API on behalf of the signed-in user."
admin_consent_display_name = "Access the Workspace API on behalf of signed-in user"
enabled = true
id = random_uuid.oauth2_user_impersonation_id.result
type = "User"
user_consent_description = "Allow the app to access the Workspace API on your behalf."
user_consent_display_name = "Access the Workspace API"
value = "user_impersonation"
}
}
app_role {
allowed_member_types = ["User", "Application"]
description = "Provides workspace owners access to the Workspace."
display_name = "Workspace Owner"
enabled = true
id = random_uuid.app_role_workspace_owner_id.result
value = "WorkspaceOwner"
}
app_role {
allowed_member_types = ["User", "Application"]
description = "Provides researchers access to the Workspace."
display_name = "Workspace Researcher"
enabled = true
id = random_uuid.app_role_workspace_researcher_id.result
value = "WorkspaceResearcher"
}
app_role {
allowed_member_types = ["User", "Application"]
description = "Provides airlock managers access to the Workspace and ability to review airlock requests."
display_name = "Airlock Manager"
enabled = true
id = random_uuid.app_role_workspace_airlock_manager_id.result
value = "AirlockManager"
}
feature_tags {
enterprise = true
}
optional_claims {
id_token {
name = "ipaddr"
essential = false
}
id_token {
name = "email"
essential = false
}
}
required_resource_access {
resource_app_id = "00000003-0000-0000-c000-000000000000" # Microsoft Graph
resource_access {
id = "64a6cdd6-aab1-4aaf-94b8-3cc8405e90d0" # Email
type = "Scope" # Delegated
}
resource_access {
id = "37f7f235-527c-4136-accd-4a02d197296e" # Openid
type = "Scope" # Delegated
}
resource_access {
id = "14dad69e-099b-42c9-810b-d002981feec1" # Profile
type = "Scope" # Delegated
}
}
web {
redirect_uris = jsondecode(base64decode(var.aad_redirect_uris_b64))[*].value
}
}
resource "azuread_service_principal" "workspace" {
application_id = azuread_application.workspace.application_id
app_role_assignment_required = false
owners = [data.azuread_client_config.current.object_id, var.workspace_owner_object_id]
feature_tags {
enterprise = true
}
}
resource "azuread_service_principal_password" "workspace" {
service_principal_id = azuread_service_principal.workspace.object_id
}
resource "azurerm_key_vault_secret" "client_id" {
name = "workspace-client-id"
value = azuread_application.workspace.application_id
key_vault_id = var.key_vault_id
tags = var.tre_workspace_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_key_vault_secret" "client_secret" {
name = "workspace-client-secret"
value = azuread_service_principal_password.workspace.value
key_vault_id = var.key_vault_id
tags = var.tre_workspace_tags
lifecycle { ignore_changes = [tags] }
}
resource "azuread_app_role_assignment" "workspace_owner" {
app_role_id = azuread_service_principal.workspace.app_role_ids["WorkspaceOwner"]
principal_object_id = var.workspace_owner_object_id
resource_object_id = azuread_service_principal.workspace.object_id
}
resource "azuread_group" "workspace_owners" {
count = var.create_aad_groups ? 1 : 0
display_name = "${var.workspace_resource_name_suffix} Workspace Owners"
owners = [var.workspace_owner_object_id]
security_enabled = true
}
resource "azuread_group" "workspace_researchers" {
count = var.create_aad_groups ? 1 : 0
display_name = "${var.workspace_resource_name_suffix} Workspace Researchers"
owners = [var.workspace_owner_object_id]
security_enabled = true
}
resource "azuread_group" "workspace_airlock_managers" {
count = var.create_aad_groups ? 1 : 0
display_name = "${var.workspace_resource_name_suffix} Airlock Managers"
owners = [var.workspace_owner_object_id]
security_enabled = true
}
resource "azuread_group_member" "workspace_owner" {
count = var.create_aad_groups ? 1 : 0
group_object_id = azuread_group.workspace_owners[count.index].id
member_object_id = var.workspace_owner_object_id
}
resource "azuread_app_role_assignment" "workspace_owners_group" {
count = var.create_aad_groups ? 1 : 0
app_role_id = azuread_service_principal.workspace.app_role_ids["WorkspaceOwner"]
principal_object_id = azuread_group.workspace_owners[count.index].id
resource_object_id = azuread_service_principal.workspace.object_id
}
resource "azuread_app_role_assignment" "workspace_researchers_group" {
count = var.create_aad_groups ? 1 : 0
app_role_id = azuread_service_principal.workspace.app_role_ids["WorkspaceResearcher"]
principal_object_id = azuread_group.workspace_researchers[count.index].id
resource_object_id = azuread_service_principal.workspace.object_id
}
resource "azuread_app_role_assignment" "workspace_airlock_managers_group" {
count = var.create_aad_groups ? 1 : 0
app_role_id = azuread_service_principal.workspace.app_role_ids["AirlockManager"]
principal_object_id = azuread_group.workspace_airlock_managers[count.index].id
resource_object_id = azuread_service_principal.workspace.object_id
}
|
AzureTRE/templates/workspaces/base/terraform/aad/aad.tf/0
|
{
"file_path": "AzureTRE/templates/workspaces/base/terraform/aad/aad.tf",
"repo_id": "AzureTRE",
"token_count": 2884
}
| 134 |
variable "tre_id" {
type = string
}
variable "location" {
type = string
}
variable "resource_group_name" {
type = string
}
variable "resource_group_id" {
type = string
}
variable "tre_workspace_tags" {
type = map(string)
}
variable "workspace_subnet_id" {
type = string
}
variable "azure_monitor_dns_zone_id" {
type = string
}
variable "azure_monitor_oms_opinsights_dns_zone_id" {
type = string
}
variable "azure_monitor_ods_opinsights_dns_zone_id" {
type = string
}
variable "azure_monitor_agentsvc_dns_zone_id" {
type = string
}
variable "blob_core_dns_zone_id" {
type = string
}
variable "tre_resource_id" {
type = string
}
variable "enable_local_debugging" {
type = bool
}
|
AzureTRE/templates/workspaces/base/terraform/azure-monitor/variables.tf/0
|
{
"file_path": "AzureTRE/templates/workspaces/base/terraform/azure-monitor/variables.tf",
"repo_id": "AzureTRE",
"token_count": 267
}
| 135 |
resource "azurerm_storage_account" "stg" {
name = local.storage_name
resource_group_name = azurerm_resource_group.ws.name
location = azurerm_resource_group.ws.location
account_tier = "Standard"
account_replication_type = "GRS"
allow_nested_items_to_be_public = false
is_hns_enabled = true
tags = local.tre_workspace_tags
lifecycle { ignore_changes = [tags] }
}
resource "azurerm_storage_share" "shared_storage" {
name = "vm-shared-storage"
storage_account_name = azurerm_storage_account.stg.name
quota = var.shared_storage_quota
depends_on = [
azurerm_private_endpoint.stgfilepe,
azurerm_storage_account_network_rules.stgrules
]
}
resource "azurerm_storage_container" "stgcontainer" {
name = "datalake"
storage_account_name = azurerm_storage_account.stg.name
container_access_type = "private"
depends_on = [
azurerm_private_endpoint.stgblobpe,
azurerm_storage_account_network_rules.stgrules
]
}
resource "azurerm_storage_account_network_rules" "stgrules" {
storage_account_id = azurerm_storage_account.stg.id
# When deploying from a local machine we need to "allow"
default_action = var.enable_local_debugging ? "Allow" : "Deny"
bypass = ["AzureServices"]
}
resource "azurerm_private_endpoint" "stgfilepe" {
name = "stgfilepe-${local.workspace_resource_name_suffix}"
location = azurerm_resource_group.ws.location
resource_group_name = azurerm_resource_group.ws.name
subnet_id = module.network.services_subnet_id
tags = local.tre_workspace_tags
depends_on = [
module.network
]
lifecycle { ignore_changes = [tags] }
private_dns_zone_group {
name = "private-dns-zone-group"
private_dns_zone_ids = [module.network.filecore_zone_id]
}
private_service_connection {
name = "stgfilepesc-${local.workspace_resource_name_suffix}"
private_connection_resource_id = azurerm_storage_account.stg.id
is_manual_connection = false
subresource_names = ["File"]
}
}
resource "azurerm_private_endpoint" "stgblobpe" {
name = "stgblobpe-${local.workspace_resource_name_suffix}"
location = azurerm_resource_group.ws.location
resource_group_name = azurerm_resource_group.ws.name
subnet_id = module.network.services_subnet_id
tags = local.tre_workspace_tags
depends_on = [
module.network,
azurerm_private_endpoint.stgfilepe
]
lifecycle { ignore_changes = [tags] }
private_dns_zone_group {
name = "private-dns-zone-group"
private_dns_zone_ids = [module.network.blobcore_zone_id]
}
private_service_connection {
name = "stgblobpesc-${local.workspace_resource_name_suffix}"
private_connection_resource_id = azurerm_storage_account.stg.id
is_manual_connection = false
subresource_names = ["Blob"]
}
}
resource "azurerm_private_endpoint" "stgdfspe" {
name = "stgdfspe-${local.workspace_resource_name_suffix}"
location = azurerm_resource_group.ws.location
resource_group_name = azurerm_resource_group.ws.name
subnet_id = module.network.services_subnet_id
tags = local.tre_workspace_tags
depends_on = [
module.network,
azurerm_private_endpoint.stgblobpe
]
lifecycle { ignore_changes = [tags] }
private_dns_zone_group {
name = "private-dns-zone-group"
private_dns_zone_ids = [module.network.dfscore_zone_id]
}
private_service_connection {
name = "stgdfspesc-${local.workspace_resource_name_suffix}"
private_connection_resource_id = azurerm_storage_account.stg.id
is_manual_connection = false
subresource_names = ["dfs"]
}
}
|
AzureTRE/templates/workspaces/base/terraform/storage.tf/0
|
{
"file_path": "AzureTRE/templates/workspaces/base/terraform/storage.tf",
"repo_id": "AzureTRE",
"token_count": 1857
}
| 136 |
{
"name": "tre-ui",
"version": "0.5.22",
"private": true,
"dependencies": {
"@azure/msal-browser": "^2.35.0",
"@azure/msal-react": "^1.5.12",
"@fluentui/react": "^8.114.1",
"@fluentui/react-file-type-icons": "^8.7.9",
"@reduxjs/toolkit": "^1.8.6",
"@rjsf/core": "^4.2.3",
"@rjsf/fluent-ui": "^4.2.3",
"@testing-library/jest-dom": "^6.2.0",
"@testing-library/react": "^14.0.0",
"@testing-library/user-event": "^14.4.3",
"@testing-library/dom": "^7.21.4",
"@types/jest": "^29.5.0",
"@types/node": "^20.4.2",
"@types/react": "^18.0.25",
"@types/react-dom": "^18.2.6",
"moment": "^2.29.4",
"node-sass": "^8.0.0",
"react": "^18.2.0",
"react-dom": "^18.2.0",
"react-markdown": "^8.0.3",
"react-redux": "^8.0.4",
"react-router-dom": "6.21.1",
"remark-gfm": "^3.0.1",
"typescript": "^5.3.3",
"web-vitals": "^3.3.0"
},
"devDependencies": {
"react-scripts": "5.0.1",
"@babel/core": "^7.23.7",
"@babel/plugin-syntax-flow" : "^7.23.3",
"@babel/plugin-transform-react-jsx": "^7.23.4",
"@babel/plugin-proposal-private-property-in-object": "^7.21.11"
},
"scripts": {
"start": "react-scripts start",
"build": "react-scripts build",
"test": "react-scripts test",
"eject": "react-scripts eject"
},
"eslintConfig": {
"extends": [
"react-app",
"react-app/jest"
]
},
"browserslist": {
"production": [
">0.2%",
"not dead",
"not op_mini all"
],
"development": [
"last 1 chrome version",
"last 1 firefox version",
"last 1 safari version"
]
},
"resolutions": {
"nth-check": "^2.0.1",
"node-sass/**/ip": "^2.0.1"
}
}
|
AzureTRE/ui/app/package.json/0
|
{
"file_path": "AzureTRE/ui/app/package.json",
"repo_id": "AzureTRE",
"token_count": 915
}
| 137 |
import { Dialog, DialogFooter, PrimaryButton, DefaultButton, DialogType, Spinner } from '@fluentui/react';
import React, { useContext, useState } from 'react';
import { Resource } from '../../models/resource';
import { HttpMethod, ResultType, useAuthApiCall } from '../../hooks/useAuthApiCall';
import { WorkspaceContext } from '../../contexts/WorkspaceContext';
import { ResourceType } from '../../models/resourceType';
import { LoadingState } from '../../models/loadingState';
import { APIError } from '../../models/exceptions';
import { ExceptionLayout } from './ExceptionLayout';
import { useAppDispatch } from '../../hooks/customReduxHooks';
import { addUpdateOperation } from '../shared/notifications/operationsSlice';
interface ConfirmDisableEnableResourceProps {
resource: Resource,
isEnabled: boolean,
onDismiss: () => void
}
// show a 'are you sure' modal, and then send a patch if the user confirms
export const ConfirmDisableEnableResource: React.FunctionComponent<ConfirmDisableEnableResourceProps> = (props: ConfirmDisableEnableResourceProps) => {
const apiCall = useAuthApiCall();
const [loading, setLoading] = useState(LoadingState.Ok);
const [apiError, setApiError] = useState({} as APIError);
const workspaceCtx = useContext(WorkspaceContext);
const dispatch = useAppDispatch();
const disableProps = {
type: DialogType.normal,
title: 'Disable Resource?',
closeButtonAriaLabel: 'Close',
subText: `Are you sure you want to disable ${props.resource.properties.display_name}?`,
};
const enableProps = {
type: DialogType.normal,
title: 'Enable Resource?',
closeButtonAriaLabel: 'Close',
subText: `Are you sure you want to enable ${props.resource.properties.display_name}?`,
};
const dialogStyles = { main: { maxWidth: 450 } };
const modalProps = {
titleAriaId: 'labelId',
subtitleAriaId: 'subTextId',
isBlocking: true,
styles: dialogStyles
};
const wsAuth = (props.resource.resourceType === ResourceType.WorkspaceService || props.resource.resourceType === ResourceType.UserResource);
const toggleDisableCall = async () => {
setLoading(LoadingState.Loading);
try {
let body = { isEnabled: props.isEnabled }
let op = await apiCall(props.resource.resourcePath, HttpMethod.Patch, wsAuth ? workspaceCtx.workspaceApplicationIdURI : undefined, body, ResultType.JSON, undefined, undefined, props.resource._etag);
dispatch(addUpdateOperation(op.operation));
props.onDismiss();
} catch (err: any) {
err.userMessage = 'Failed to enable/disable resource';
setApiError(err);
setLoading(LoadingState.Error);
}
}
return (
<>
<Dialog
hidden={false}
onDismiss={() => props.onDismiss()}
dialogContentProps={props.isEnabled ? enableProps : disableProps}
modalProps={modalProps}
>
{
loading === LoadingState.Ok &&
<DialogFooter>
{props.isEnabled ?
<PrimaryButton text="Enable" onClick={() => toggleDisableCall()} />
:
<PrimaryButton text="Disable" onClick={() => toggleDisableCall()} />
}
<DefaultButton text="Cancel" onClick={() => props.onDismiss()} />
</DialogFooter>
}
{
loading === LoadingState.Loading &&
<Spinner label="Sending request..." ariaLive="assertive" labelPosition="right" />
}
{
loading === LoadingState.Error &&
<ExceptionLayout e={apiError} />
}
</Dialog>
</>);
};
|
AzureTRE/ui/app/src/components/shared/ConfirmDisableEnableResource.tsx/0
|
{
"file_path": "AzureTRE/ui/app/src/components/shared/ConfirmDisableEnableResource.tsx",
"repo_id": "AzureTRE",
"token_count": 1293
}
| 138 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.