repo_id
stringlengths
15
86
file_path
stringlengths
28
180
content
stringlengths
1
1.75M
__index_level_0__
int64
0
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/swin2sr/test_image_processing_swin2sr.py
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import Swin2SRImageProcessor from transformers.image_transforms import get_image_size class Swin2SRImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_rescale=True, rescale_factor=1 / 255, do_pad=True, pad_size=8, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_pad = do_pad self.pad_size = pad_size def prepare_image_processor_dict(self): return { "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, "pad_size": self.pad_size, } def prepare_inputs(self, equal_resolution=False, numpify=False, torchify=False): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. """ assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: image_inputs = [] for i in range(self.batch_size): image_inputs.append( np.random.randint( 255, size=(self.num_channels, self.max_resolution, self.max_resolution), dtype=np.uint8 ) ) else: image_inputs = [] for i in range(self.batch_size): width, height = np.random.choice(np.arange(self.min_resolution, self.max_resolution), 2) image_inputs.append(np.random.randint(255, size=(self.num_channels, width, height), dtype=np.uint8)) if not numpify and not torchify: # PIL expects the channel dimension as last dimension image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] if torchify: image_inputs = [torch.from_numpy(x) for x in image_inputs] return image_inputs @require_torch @require_vision class Swin2SRImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = Swin2SRImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = Swin2SRImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processor = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processor, "do_rescale")) self.assertTrue(hasattr(image_processor, "rescale_factor")) self.assertTrue(hasattr(image_processor, "do_pad")) self.assertTrue(hasattr(image_processor, "pad_size")) def test_batch_feature(self): pass def calculate_expected_size(self, image): old_height, old_width = get_image_size(image) size = self.image_processor_tester.pad_size pad_height = (old_height // size + 1) * size - old_height pad_width = (old_width // size + 1) * size - old_width return old_height + pad_height, old_width + pad_width def test_call_pil(self): # Initialize image_processor image_processor = self.image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = self.image_processor_tester.prepare_inputs(equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = image_processor(image_inputs[0], return_tensors="pt").pixel_values expected_height, expected_width = self.calculate_expected_size(np.array(image_inputs[0])) self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_call_numpy(self): # Initialize image_processor image_processor = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_inputs(equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processor(image_inputs[0], return_tensors="pt").pixel_values expected_height, expected_width = self.calculate_expected_size(image_inputs[0]) self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_call_pytorch(self): # Initialize image_processor image_processor = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_inputs(equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = image_processor(image_inputs[0], return_tensors="pt").pixel_values expected_height, expected_width = self.calculate_expected_size(image_inputs[0]) self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, expected_height, expected_width, ), )
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/swin2sr/test_modeling_swin2sr.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Swin2SR model. """ import inspect import unittest from transformers import Swin2SRConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import Swin2SRForImageSuperResolution, Swin2SRModel from transformers.models.swin2sr.modeling_swin2sr import SWIN2SR_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import Swin2SRImageProcessor class Swin2SRModelTester: def __init__( self, parent, batch_size=13, image_size=32, patch_size=1, num_channels=3, embed_dim=16, depths=[1, 2, 1], num_heads=[2, 2, 4], window_size=2, mlp_ratio=2.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act="gelu", use_absolute_embeddings=False, patch_norm=True, initializer_range=0.02, layer_norm_eps=1e-5, is_training=True, scope=None, use_labels=False, upscale=2, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.embed_dim = embed_dim self.depths = depths self.num_heads = num_heads self.window_size = window_size self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.use_absolute_embeddings = use_absolute_embeddings self.patch_norm = patch_norm self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.is_training = is_training self.scope = scope self.use_labels = use_labels self.upscale = upscale # here we set some attributes to make tests pass self.num_hidden_layers = len(depths) self.hidden_size = embed_dim self.seq_length = (image_size // patch_size) ** 2 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return Swin2SRConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, upscale=self.upscale, ) def create_and_check_model(self, config, pixel_values, labels): model = Swin2SRModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.embed_dim, self.image_size, self.image_size) ) def create_and_check_for_image_super_resolution(self, config, pixel_values, labels): model = Swin2SRForImageSuperResolution(config) model.to(torch_device) model.eval() result = model(pixel_values) expected_image_size = self.image_size * self.upscale self.parent.assertEqual( result.reconstruction.shape, (self.batch_size, self.num_channels, expected_image_size, expected_image_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class Swin2SRModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Swin2SRModel, Swin2SRForImageSuperResolution) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": Swin2SRModel} if is_torch_available() else {} fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False test_torchscript = False def setUp(self): self.model_tester = Swin2SRModelTester(self) self.config_tester = ConfigTester(self, config_class=Swin2SRConfig, embed_dim=37) def test_config(self): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_for_image_super_resolution(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_super_resolution(*config_and_inputs) # TODO: check if this works again for PyTorch 2.x.y @unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0.") def test_multi_gpu_data_parallel_forward(self): pass @unittest.skip(reason="Swin2SR does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Swin2SR does not support training yet") def test_training(self): pass @unittest.skip(reason="Swin2SR does not support training yet") def test_training_gradient_checkpointing(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) @slow def test_model_from_pretrained(self): for model_name in SWIN2SR_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = Swin2SRModel.from_pretrained(model_name) self.assertIsNotNone(model) # overwriting because of `logit_scale` parameter def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if "logit_scale" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions expected_num_attentions = len(self.model_tester.depths) self.assertEqual(len(attentions), expected_num_attentions) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True window_size_squared = config.window_size**2 model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), expected_num_attentions) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], ) @require_vision @require_torch @slow class Swin2SRModelIntegrationTest(unittest.TestCase): def test_inference_image_super_resolution_head(self): processor = Swin2SRImageProcessor() model = Swin2SRForImageSuperResolution.from_pretrained("caidas/swin2SR-classical-sr-x2-64").to(torch_device) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size([1, 3, 976, 1296]) self.assertEqual(outputs.reconstruction.shape, expected_shape) expected_slice = torch.tensor( [[0.5458, 0.5546, 0.5638], [0.5526, 0.5565, 0.5651], [0.5396, 0.5426, 0.5621]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.reconstruction[0, 0, :3, :3], expected_slice, atol=1e-4))
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/convbert/test_modeling_tf_convbert.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class TFConvBertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_mask = True self.use_token_type_ids = True self.use_labels = True self.vocab_size = 99 self.hidden_size = 384 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.embedding_size = 128 self.head_ratio = 2 self.conv_kernel_size = 9 self.num_groups = 1 self.scope = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = ConvBertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, return_dict=True, ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFConvBertModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFConvBertForMaskedLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFConvBertForSequenceClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = TFConvBertForMultipleChoice(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFConvBertForTokenClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFConvBertForQuestionAnswering(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFConvBertModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFConvBertModel, "fill-mask": TFConvBertForMaskedLM, "question-answering": TFConvBertForQuestionAnswering, "text-classification": TFConvBertForSequenceClassification, "token-classification": TFConvBertForTokenClassification, "zero-shot": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) test_pruning = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFConvBertModelTester(self) self.config_tester = ConfigTester(self, config_class=ConvBertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @slow def test_saved_model_creation_extended(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True if hasattr(config, "use_cache"): config.use_cache = True encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes: class_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) num_out = len(model(class_inputs_dict)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=True) saved_model_dir = os.path.join(tmpdirname, "saved_model", "1") model = tf.keras.models.load_model(saved_model_dir) outputs = model(class_inputs_dict) if self.is_encoder_decoder: output_hidden_states = outputs["encoder_hidden_states"] output_attentions = outputs["encoder_attentions"] else: output_hidden_states = outputs["hidden_states"] output_attentions = outputs["attentions"] self.assertEqual(len(outputs), num_out) expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(output_hidden_states), expected_num_layers) self.assertListEqual( list(output_hidden_states[0].shape[-2:]), [self.model_tester.seq_length, self.model_tester.hidden_size], ) self.assertEqual(len(output_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(output_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], ) @slow def test_model_from_pretrained(self): model = TFConvBertModel.from_pretrained("YituTech/conv-bert-base") self.assertIsNotNone(model) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", self.model_tester.seq_length) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length) decoder_key_length = getattr(self.model_tester, "key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) def check_decoder_attentions_output(outputs): out_len = len(outputs) self.assertEqual(out_len % 2, 0) decoder_attentions = outputs.decoder_attentions self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length], ) def check_encoder_attentions_output(outputs): attentions = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], ) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True config.output_hidden_states = False model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) out_len = len(outputs) self.assertEqual(config.output_hidden_states, False) check_encoder_attentions_output(outputs) if self.is_encoder_decoder: model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(config.output_hidden_states, False) check_decoder_attentions_output(outputs) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(config.output_hidden_states, False) check_encoder_attentions_output(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True config.output_hidden_states = True model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(outputs)) self.assertEqual(model.config.output_hidden_states, True) check_encoder_attentions_output(outputs) @require_tf class TFConvBertModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = TFConvBertModel.from_pretrained("YituTech/conv-bert-base") input_ids = tf.constant([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] expected_shape = [1, 6, 768] self.assertEqual(output.shape, expected_shape) expected_slice = tf.constant( [ [ [-0.03475493, -0.4686034, -0.30638832], [0.22637248, -0.26988646, -0.7423424], [0.10324868, -0.45013508, -0.58280784], ] ] ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-4)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/convbert/test_modeling_convbert.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch ConvBERT model. """ import os import tempfile import unittest from transformers import ConvBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_QUESTION_ANSWERING_MAPPING, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertModel, ) from transformers.models.convbert.modeling_convbert import CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST class ConvBertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return ConvBertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = ConvBertModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = ConvBertForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = ConvBertForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = ConvBertForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = ConvBertForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = ConvBertForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class ConvBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( ConvBertModel, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": ConvBertModel, "fill-mask": ConvBertForMaskedLM, "question-answering": ConvBertForQuestionAnswering, "text-classification": ConvBertForSequenceClassification, "token-classification": ConvBertForTokenClassification, "zero-shot": ConvBertForSequenceClassification, } if is_torch_available() else {} ) test_pruning = False test_head_masking = False def setUp(self): self.model_tester = ConvBertModelTester(self) self.config_tester = ConfigTester(self, config_class=ConvBertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ConvBertModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]), [self.model_tester.num_attention_heads / 2, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning # Question Answering model returns start_logits and end_logits if model_class in get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING): correct_outlen += 1 # start_logits and end_logits instead of only 1 output if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(self_attentions[0].shape[-4:]), [self.model_tester.num_attention_heads / 2, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], ) @slow @require_torch_gpu def test_torchscript_device_change(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # ConvBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == ConvBertForMultipleChoice: return config.torchscript = True model = model_class(config=config) inputs_dict = self._prepare_for_class(inputs_dict, model_class) traced_model = torch.jit.trace( model, (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(traced_model, os.path.join(tmp, "traced_model.pt")) loaded = torch.jit.load(os.path.join(tmp, "traced_model.pt"), map_location=torch_device) loaded(inputs_dict["input_ids"].to(torch_device), inputs_dict["attention_mask"].to(torch_device)) def test_model_for_input_embeds(self): batch_size = 2 seq_length = 10 inputs_embeds = torch.rand([batch_size, seq_length, 768], device=torch_device) config = self.model_tester.get_config() model = ConvBertModel(config=config) model.to(torch_device) model.eval() result = model(inputs_embeds=inputs_embeds) self.assertEqual(result.last_hidden_state.shape, (batch_size, seq_length, config.hidden_size)) def test_reducing_attention_heads(self): config, *inputs_dict = self.model_tester.prepare_config_and_inputs() config.head_ratio = 4 self.model_tester.create_and_check_for_masked_lm(config, *inputs_dict) @require_torch class ConvBertModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head(self): model = ConvBertModel.from_pretrained("YituTech/conv-bert-base") input_ids = torch.tensor([[1, 2, 3, 4, 5, 6]]) with torch.no_grad(): output = model(input_ids)[0] expected_shape = torch.Size((1, 6, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-0.0864, -0.4898, -0.3677], [0.1434, -0.2952, -0.7640], [-0.0112, -0.4432, -0.5432]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/mobilevit/test_modeling_mobilevit.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch MobileViT model. """ import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class MobileViTConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "hidden_sizes")) self.parent.assertTrue(hasattr(config, "neck_hidden_sizes")) self.parent.assertTrue(hasattr(config, "num_attention_heads")) class MobileViTModelTester: def __init__( self, parent, batch_size=13, image_size=32, patch_size=2, num_channels=3, last_hidden_size=32, num_attention_heads=4, hidden_act="silu", conv_kernel_size=3, output_stride=32, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, classifier_dropout_prob=0.1, initializer_range=0.02, is_training=True, use_labels=True, num_labels=10, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.last_hidden_size = last_hidden_size self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.conv_kernel_size = conv_kernel_size self.output_stride = output_stride self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.classifier_dropout_prob = classifier_dropout_prob self.use_labels = use_labels self.is_training = is_training self.num_labels = num_labels self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None pixel_labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) pixel_labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels, pixel_labels def get_config(self): return MobileViTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, hidden_sizes=[12, 16, 20], neck_hidden_sizes=[8, 8, 16, 16, 32, 32, 32], ) def create_and_check_model(self, config, pixel_values, labels, pixel_labels): model = MobileViTModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def create_and_check_for_image_classification(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = MobileViTForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_semantic_segmentation(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = MobileViTForSemanticSegmentation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) result = model(pixel_values, labels=pixel_labels) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels, pixel_labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class MobileViTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as MobileViT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": MobileViTModel, "image-classification": MobileViTForImageClassification, "image-segmentation": MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = MobileViTModelTester(self) self.config_tester = MobileViTConfigTester(self, config_class=MobileViTConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="MobileViT does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="MobileViT does not support input and output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="MobileViT does not output attentions") def test_attention_outputs(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_stages = 5 self.assertEqual(len(hidden_states), expected_num_stages) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. divisor = 2 for i in range(len(hidden_states)): self.assertListEqual( list(hidden_states[i].shape[-2:]), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], ) divisor *= 2 self.assertEqual(self.model_tester.output_stride, divisor // 2) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) def test_for_semantic_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = MobileViTModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class MobileViTModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-1.9364, -1.2327, -0.4653]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) @slow def test_inference_semantic_segmentation(self): model = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small") model = model.to(torch_device) image_processor = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small") image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits # verify the logits expected_shape = torch.Size((1, 21, 32, 32)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor( [ [[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]], [[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]], [[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]], ], device=torch_device, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], expected_slice, atol=1e-4)) @slow def test_post_processing_semantic_segmentation(self): model = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small") model = model.to(torch_device) image_processor = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small") image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) outputs.logits = outputs.logits.detach().cpu() segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[(50, 60)]) expected_shape = torch.Size((50, 60)) self.assertEqual(segmentation[0].shape, expected_shape) segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs) expected_shape = torch.Size((32, 32)) self.assertEqual(segmentation[0].shape, expected_shape)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/mobilevit/test_modeling_tf_mobilevit.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the TensorFlow MobileViT model. """ from __future__ import annotations import inspect import unittest from transformers import MobileViTConfig from transformers.file_utils import is_tf_available, is_vision_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel from transformers.models.mobilevit.modeling_tf_mobilevit import TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class TFMobileViTConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "hidden_sizes")) self.parent.assertTrue(hasattr(config, "neck_hidden_sizes")) self.parent.assertTrue(hasattr(config, "num_attention_heads")) class TFMobileViTModelTester: def __init__( self, parent, batch_size=13, image_size=32, patch_size=2, num_channels=3, last_hidden_size=32, num_attention_heads=4, hidden_act="silu", conv_kernel_size=3, output_stride=32, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, classifier_dropout_prob=0.1, initializer_range=0.02, is_training=True, use_labels=True, num_labels=10, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.last_hidden_size = last_hidden_size self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.conv_kernel_size = conv_kernel_size self.output_stride = output_stride self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.classifier_dropout_prob = classifier_dropout_prob self.use_labels = use_labels self.is_training = is_training self.num_labels = num_labels self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None pixel_labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) pixel_labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels, pixel_labels def get_config(self): return MobileViTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, hidden_sizes=[12, 16, 20], neck_hidden_sizes=[8, 8, 16, 16, 32, 32, 32], ) def create_and_check_model(self, config, pixel_values, labels, pixel_labels): model = TFMobileViTModel(config=config) result = model(pixel_values, training=False) expected_height = expected_width = self.image_size // self.output_stride self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.last_hidden_size, expected_height, expected_width) ) def create_and_check_for_image_classification(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = TFMobileViTForImageClassification(config) result = model(pixel_values, labels=labels, training=False) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_semantic_segmentation(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = TFMobileViTForSemanticSegmentation(config) expected_height = expected_width = self.image_size // self.output_stride result = model(pixel_values, training=False) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, expected_height, expected_width) ) result = model(pixel_values, labels=pixel_labels, training=False) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, expected_height, expected_width) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels, pixel_labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFMobileViTModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as MobileViT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( (TFMobileViTModel, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation) if is_tf_available() else () ) pipeline_model_mapping = ( {"feature-extraction": TFMobileViTModel, "image-classification": TFMobileViTForImageClassification} if is_tf_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False test_onnx = False def setUp(self): self.model_tester = TFMobileViTModelTester(self) self.config_tester = TFMobileViTConfigTester(self, config_class=MobileViTConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="MobileViT does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="MobileViT does not support input and output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="MobileViT does not output attentions") def test_attention_outputs(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_stages = 5 self.assertEqual(len(hidden_states), expected_num_stages) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. divisor = 2 for i in range(len(hidden_states)): self.assertListEqual( list(hidden_states[i].shape[-2:]), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], ) divisor *= 2 self.assertEqual(self.model_tester.output_stride, divisor // 2) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) def test_for_semantic_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*config_and_inputs) @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, reason="TF does not support backprop for grouped convolutions on CPU.", ) def test_dataset_conversion(self): super().test_dataset_conversion() def check_keras_fit_results(self, val_loss1, val_loss2, atol=2e-1, rtol=2e-1): self.assertTrue(np.allclose(val_loss1, val_loss2, atol=atol, rtol=rtol)) @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, reason="TF does not support backprop for grouped convolutions on CPU.", ) @slow def test_keras_fit(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Since `TFMobileViTModel` cannot operate with the default `fit()` method. if model_class.__name__ != "TFMobileViTModel": model = model_class(config) if getattr(model, "hf_compute_loss", None): super().test_keras_fit() # The default test_loss_computation() uses -100 as a proxy ignore_index # to test masked losses. Overridding to avoid -100 since semantic segmentation # models use `semantic_loss_ignore_index` from the config. def test_loss_computation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # set an ignore index to correctly test the masked loss used in # `TFMobileViTForSemanticSegmentation`. if model_class.__name__ != "TFMobileViTForSemanticSegmentation": config.semantic_loss_ignore_index = 5 model = model_class(config) if getattr(model, "hf_compute_loss", None): # The number of elements in the loss should be the same as the number of elements in the label prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) added_label = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys(), reverse=True)[0] ] expected_loss_size = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) possible_input_names = {"input_ids", "pixel_values", "input_features"} input_name = possible_input_names.intersection(set(prepared_for_class)).pop() model_input = prepared_for_class.pop(input_name) loss = model(model_input, **prepared_for_class)[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) # Test that model correctly compute the loss when we mask some positions prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) possible_input_names = {"input_ids", "pixel_values", "input_features"} input_name = possible_input_names.intersection(set(prepared_for_class)).pop() model_input = prepared_for_class.pop(input_name) if "labels" in prepared_for_class: labels = prepared_for_class["labels"].numpy() if len(labels.shape) > 1 and labels.shape[1] != 1: # labels[0] = -100 prepared_for_class["labels"] = tf.convert_to_tensor(labels) loss = model(model_input, **prepared_for_class)[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) self.assertTrue(not np.any(np.isnan(loss.numpy()))) # Test that model correctly compute the loss with a dict prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) loss = model(prepared_for_class)[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) # Test that model correctly compute the loss with a tuple prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) # Get keys that were added with the _prepare_for_class function label_keys = prepared_for_class.keys() - inputs_dict.keys() signature = inspect.signature(model.call).parameters signature_names = list(signature.keys()) # Create a dictionary holding the location of the tensors in the tuple tuple_index_mapping = {0: input_name} for label_key in label_keys: label_key_index = signature_names.index(label_key) tuple_index_mapping[label_key_index] = label_key sorted_tuple_index_mapping = sorted(tuple_index_mapping.items()) # Initialize a list with their default values, update the values and convert to a tuple list_input = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default) for index, value in sorted_tuple_index_mapping: list_input[index] = prepared_for_class[value] tuple_input = tuple(list_input) # Send to model loss = model(tuple_input[:-1])[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) @slow def test_model_from_pretrained(self): for model_name in TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFMobileViTModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf class TFMobileViTModelIntegrationTest(unittest.TestCase): @slow def test_inference_image_classification_head(self): model = TFMobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small") image_processor = MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small") image = prepare_img() inputs = image_processor(images=image, return_tensors="tf") # forward pass outputs = model(**inputs, training=False) # verify the logits expected_shape = tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = tf.constant([-1.9364, -1.2327, -0.4653]) tf.debugging.assert_near(outputs.logits[0, :3], expected_slice, atol=1e-4, rtol=1e-04) @slow def test_inference_semantic_segmentation(self): # `from_pt` will be removed model = TFMobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small") image_processor = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small") image = prepare_img() inputs = image_processor(images=image, return_tensors="tf") # forward pass outputs = model(inputs.pixel_values, training=False) logits = outputs.logits # verify the logits expected_shape = tf.TensorShape((1, 21, 32, 32)) self.assertEqual(logits.shape, expected_shape) expected_slice = tf.constant( [ [[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]], [[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]], [[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]], ] ) tf.debugging.assert_near(logits[0, :3, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/mobilevit/test_image_processing_mobilevit.py
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class MobileViTImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_center_crop=True, crop_size=None, do_flip_channel_order=True, ): size = size if size is not None else {"shortest_edge": 20} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_flip_channel_order = do_flip_channel_order def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class MobileViTImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = MobileViTImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = MobileViTImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "center_crop")) self.assertTrue(hasattr(image_processing, "do_flip_channel_order")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 20}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) def test_batch_feature(self): pass def test_call_pil(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) def test_call_numpy(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) def test_call_pytorch(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), )
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/bert/test_modeling_bert.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest from transformers import BertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import CaptureLogger, require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLMHeadModel, BertModel, logging, ) from transformers.models.bert.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_LIST class BertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): """ Returns a tiny configuration by default. """ return BertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = BertModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = BertModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = BertLMHeadModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = BertForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_model_for_causal_lm_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = BertLMHeadModel(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, encoder_hidden_states=encoder_hidden_states, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = BertLMHeadModel(config=config).to(torch_device).eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_for_next_sequence_prediction( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = BertForNextSentencePrediction(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, 2)) def create_and_check_for_pretraining( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = BertForPreTraining(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, next_sentence_label=sequence_labels, ) self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = BertForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = BertForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = BertForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = BertForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class BertModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( BertModel, BertLMHeadModel, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, ) if is_torch_available() else () ) all_generative_model_classes = (BertLMHeadModel,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": BertModel, "fill-mask": BertForMaskedLM, "question-answering": BertForQuestionAnswering, "text-classification": BertForSequenceClassification, "text-generation": BertLMHeadModel, "token-classification": BertForTokenClassification, "zero-shot": BertForSequenceClassification, } if is_torch_available() else {} ) fx_compatible = True # special case for ForPreTraining model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) inputs_dict["next_sentence_label"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = BertModelTester(self) self.config_tester = ConfigTester(self, config_class=BertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): # This regression test was failing with PyTorch < 1.3 ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_causal_lm_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_for_causal_lm_as_decoder(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_decoder_model_past_with_large_inputs_relative_pos_emb(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() config_and_inputs[0].position_embedding_type = "relative_key" self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_next_sequence_prediction(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_warning_if_padding_and_no_attention_mask(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.model_tester.prepare_config_and_inputs() # Set pad tokens in the input_ids input_ids[0, 0] = config.pad_token_id # Check for warnings if the attention_mask is missing. logger = logging.get_logger("transformers.modeling_utils") # clear cache so we can test the warning is emitted (from `warning_once`). logger.warning_once.cache_clear() with CaptureLogger(logger) as cl: model = BertModel(config=config) model.to(torch_device) model.eval() model(input_ids, attention_mask=None, token_type_ids=token_type_ids) self.assertIn("We strongly recommend passing in an `attention_mask`", cl.out) @slow def test_model_from_pretrained(self): for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = BertModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow @require_torch_gpu def test_torchscript_device_change(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == BertForMultipleChoice: return config.torchscript = True model = model_class(config=config) inputs_dict = self._prepare_for_class(inputs_dict, model_class) traced_model = torch.jit.trace( model, (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(traced_model, os.path.join(tmp, "bert.pt")) loaded = torch.jit.load(os.path.join(tmp, "bert.pt"), map_location=torch_device) loaded(inputs_dict["input_ids"].to(torch_device), inputs_dict["attention_mask"].to(torch_device)) @require_torch class BertModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = BertModel.from_pretrained("bert-base-uncased") input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) with torch.no_grad(): output = model(input_ids, attention_mask=attention_mask)[0] expected_shape = torch.Size((1, 11, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor([[[0.4249, 0.1008, 0.7531], [0.3771, 0.1188, 0.7467], [0.4152, 0.1098, 0.7108]]]) self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4)) @slow def test_inference_no_head_relative_embedding_key(self): model = BertModel.from_pretrained("zhiheng-huang/bert-base-uncased-embedding-relative-key") input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) with torch.no_grad(): output = model(input_ids, attention_mask=attention_mask)[0] expected_shape = torch.Size((1, 11, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[0.0756, 0.3142, -0.5128], [0.3761, 0.3462, -0.5477], [0.2052, 0.3760, -0.1240]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4)) @slow def test_inference_no_head_relative_embedding_key_query(self): model = BertModel.from_pretrained("zhiheng-huang/bert-base-uncased-embedding-relative-key-query") input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) with torch.no_grad(): output = model(input_ids, attention_mask=attention_mask)[0] expected_shape = torch.Size((1, 11, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[0.6496, 0.3784, 0.8203], [0.8148, 0.5656, 0.2636], [-0.0681, 0.5597, 0.7045]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/bert/test_modeling_flax_bert.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers import BertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.bert.modeling_flax_bert import ( FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, ) class FlaxBertModelTester(unittest.TestCase): def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_attention_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_choices=4, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_choices = num_choices def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) config = BertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, attention_mask def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, token_type_ids, attention_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def prepare_config_and_inputs_for_decoder(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, token_type_ids, attention_mask = config_and_inputs config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) @require_flax class FlaxBertModelTest(FlaxModelTesterMixin, unittest.TestCase): test_head_masking = True all_model_classes = ( ( FlaxBertModel, FlaxBertForPreTraining, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForQuestionAnswering, FlaxBertForNextSentencePrediction, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertForQuestionAnswering, ) if is_flax_available() else () ) def setUp(self): self.model_tester = FlaxBertModelTester(self) @slow def test_model_from_pretrained(self): # Only check this for base model, not necessary for all model classes. # This will also help speed-up tests. model = FlaxBertModel.from_pretrained("bert-base-cased") outputs = model(np.ones((1, 1))) self.assertIsNotNone(outputs)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/bert/test_tokenization_bert_tf.py
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer TOKENIZER_CHECKPOINTS = ["bert-base-uncased", "bert-base-cased"] TINY_MODEL_CHECKPOINT = "hf-internal-testing/tiny-bert-tf-only" if is_tf_available(): class ModelToSave(tf.keras.Model): def __init__(self, tokenizer): super().__init__() self.tokenizer = tokenizer config = AutoConfig.from_pretrained(TINY_MODEL_CHECKPOINT) self.bert = TFAutoModel.from_config(config) def call(self, inputs): tokenized = self.tokenizer(inputs) out = self.bert(**tokenized) return out["pooler_output"] @require_tf @require_tensorflow_text class BertTokenizationTest(unittest.TestCase): # The TF tokenizers are usually going to be used as pretrained tokenizers from existing model checkpoints, # so that's what we focus on here. def setUp(self): super().setUp() self.tokenizers = [ BertTokenizer.from_pretrained(checkpoint) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false self.tf_tokenizers = [TFBertTokenizer.from_pretrained(checkpoint) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(checkpoint, use_fast_bert_tokenizer=False) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers) == len(self.tf_tokenizers) self.test_sentences = [ "This is a straightforward English test sentence.", "This one has some weird characters\rto\nsee\r\nif those\u00E9break things.", "Now we're going to add some Chinese: 一 二 三 一二三", "And some much more rare Chinese: 齉 堃 齉堃", "Je vais aussi écrire en français pour tester les accents", "Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ", ] self.paired_sentences = list(zip(self.test_sentences, self.test_sentences[::-1])) def test_output_equivalence(self): for tokenizer, tf_tokenizer in zip(self.tokenizers, self.tf_tokenizers): for test_inputs in (self.test_sentences, self.paired_sentences): python_outputs = tokenizer(test_inputs, return_tensors="tf", padding="longest") tf_outputs = tf_tokenizer(test_inputs) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape)) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key], tf.int64) == tf_outputs[key])) @slow def test_different_pairing_styles(self): for tf_tokenizer in self.tf_tokenizers: merged_outputs = tf_tokenizer(self.paired_sentences) separated_outputs = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences], text_pair=[sentence[1] for sentence in self.paired_sentences], ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key], tf.int64) == separated_outputs[key])) @slow def test_graph_mode(self): for tf_tokenizer in self.tf_tokenizers: compiled_tokenizer = tf.function(tf_tokenizer) for test_inputs in (self.test_sentences, self.paired_sentences): test_inputs = tf.constant(test_inputs) compiled_outputs = compiled_tokenizer(test_inputs) eager_outputs = tf_tokenizer(test_inputs) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key])) @slow def test_saved_model(self): for tf_tokenizer in self.tf_tokenizers: model = ModelToSave(tokenizer=tf_tokenizer) test_inputs = tf.convert_to_tensor(self.test_sentences) out = model(test_inputs) # Build model with some sample inputs with TemporaryDirectory() as tempdir: save_path = Path(tempdir) / "saved.model" model.save(save_path) loaded_model = tf.keras.models.load_model(save_path) loaded_output = loaded_model(test_inputs) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)), 1e-5)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/bert/test_tokenization_bert.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class BertTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = BertTokenizer rust_tokenizer_class = BertTokenizerFast test_rust_tokenizer = True space_between_special_tokens = True from_pretrained_filter = filter_non_english def setUp(self): super().setUp() vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def get_input_output_texts(self, tokenizer): input_text = "UNwant\u00E9d,running" output_text = "unwanted, running" return input_text, output_text def test_full_tokenizer(self): tokenizer = self.tokenizer_class(self.vocab_file) tokens = tokenizer.tokenize("UNwant\u00E9d,running") self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [9, 6, 7, 12, 10, 11]) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: return tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence = "UNwant\u00E9d,running" tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) # With lower casing tokenizer = self.get_tokenizer(do_lower_case=True) rust_tokenizer = self.get_rust_tokenizer(do_lower_case=True) sequence = "UNwant\u00E9d,running" tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) def test_chinese(self): tokenizer = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz"), ["ah", "\u535A", "\u63A8", "zz"]) def test_basic_tokenizer_lower(self): tokenizer = BasicTokenizer(do_lower_case=True) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"]) def test_basic_tokenizer_lower_strip_accents_false(self): tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=False) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["h\u00E9llo"]) def test_basic_tokenizer_lower_strip_accents_true(self): tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=True) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"]) def test_basic_tokenizer_lower_strip_accents_default(self): tokenizer = BasicTokenizer(do_lower_case=True) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"]) def test_basic_tokenizer_no_lower(self): tokenizer = BasicTokenizer(do_lower_case=False) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def test_basic_tokenizer_no_lower_strip_accents_false(self): tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=False) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def test_basic_tokenizer_no_lower_strip_accents_true(self): tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=True) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def test_basic_tokenizer_respects_never_split_tokens(self): tokenizer = BasicTokenizer(do_lower_case=False, never_split=["[UNK]"]) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]"), ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def test_basic_tokenizer_splits_on_punctuation(self): tokenizer = BasicTokenizer() text = "a\n'll !!to?'d of, can't." expected = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."] self.assertListEqual(tokenizer.tokenize(text), expected) def test_wordpiece_tokenizer(self): vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] vocab = {} for i, token in enumerate(vocab_tokens): vocab[token] = i tokenizer = WordpieceTokenizer(vocab=vocab, unk_token="[UNK]") self.assertListEqual(tokenizer.tokenize(""), []) self.assertListEqual(tokenizer.tokenize("unwanted running"), ["un", "##want", "##ed", "runn", "##ing"]) self.assertListEqual(tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"]) def test_is_whitespace(self): self.assertTrue(_is_whitespace(" ")) self.assertTrue(_is_whitespace("\t")) self.assertTrue(_is_whitespace("\r")) self.assertTrue(_is_whitespace("\n")) self.assertTrue(_is_whitespace("\u00A0")) self.assertFalse(_is_whitespace("A")) self.assertFalse(_is_whitespace("-")) def test_is_control(self): self.assertTrue(_is_control("\u0005")) self.assertFalse(_is_control("A")) self.assertFalse(_is_control(" ")) self.assertFalse(_is_control("\t")) self.assertFalse(_is_control("\r")) def test_is_punctuation(self): self.assertTrue(_is_punctuation("-")) self.assertTrue(_is_punctuation("$")) self.assertTrue(_is_punctuation("`")) self.assertTrue(_is_punctuation(".")) self.assertFalse(_is_punctuation("A")) self.assertFalse(_is_punctuation(" ")) def test_clean_text(self): tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(t) for t in ["Test", "\xad", "test"]], [["[UNK]"], [], ["[UNK]"]]) self.assertListEqual( [rust_tokenizer.tokenize(t) for t in ["Test", "\xad", "test"]], [["[UNK]"], [], ["[UNK]"]] ) @slow def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("bert-base-uncased") text = tokenizer.encode("sequence builders", add_special_tokens=False) text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_2 + [102] def test_offsets_with_special_characters(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) sentence = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence." tokens = tokenizer_r.encode_plus( sentence, return_attention_mask=False, return_token_type_ids=False, return_offsets_mapping=True, add_special_tokens=True, ) do_lower_case = tokenizer_r.do_lower_case if hasattr(tokenizer_r, "do_lower_case") else False expected_results = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["input_ids"]) ) self.assertEqual([e[0] for e in expected_results], tokens["offset_mapping"]) def test_change_tokenize_chinese_chars(self): list_of_commun_chinese_char = ["的", "人", "有"] text_with_chinese_char = "".join(list_of_commun_chinese_char) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): kwargs["tokenize_chinese_chars"] = True tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) ids_without_spe_char_p = tokenizer_p.encode(text_with_chinese_char, add_special_tokens=False) ids_without_spe_char_r = tokenizer_r.encode(text_with_chinese_char, add_special_tokens=False) tokens_without_spe_char_r = tokenizer_r.convert_ids_to_tokens(ids_without_spe_char_r) tokens_without_spe_char_p = tokenizer_p.convert_ids_to_tokens(ids_without_spe_char_p) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(tokens_without_spe_char_p, list_of_commun_chinese_char) self.assertListEqual(tokens_without_spe_char_r, list_of_commun_chinese_char) kwargs["tokenize_chinese_chars"] = False tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) ids_without_spe_char_r = tokenizer_r.encode(text_with_chinese_char, add_special_tokens=False) ids_without_spe_char_p = tokenizer_p.encode(text_with_chinese_char, add_special_tokens=False) tokens_without_spe_char_r = tokenizer_r.convert_ids_to_tokens(ids_without_spe_char_r) tokens_without_spe_char_p = tokenizer_p.convert_ids_to_tokens(ids_without_spe_char_p) # it is expected that only the first Chinese character is not preceded by "##". expected_tokens = [ f"##{token}" if idx != 0 else token for idx, token in enumerate(list_of_commun_chinese_char) ] self.assertListEqual(tokens_without_spe_char_p, expected_tokens) self.assertListEqual(tokens_without_spe_char_r, expected_tokens)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/bert/test_modeling_tf_bert.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import BertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin from ...utils.test_modeling_tf_core import TFCoreModelTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_MODEL_FOR_PRETRAINING_MAPPING from transformers.models.bert.modeling_tf_bert import ( TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertModel, ) class TFBertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_mask = True self.use_token_type_ids = True self.use_labels = True self.vocab_size = 99 self.hidden_size = 32 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.scope = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = BertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFBertModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_causal_lm_base_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.is_decoder = True model = TFBertModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = TFBertModel(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, } result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states) # Also check the case where encoder outputs are not passed result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_causal_lm_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.is_decoder = True model = TFBertLMHeadModel(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } prediction_scores = model(inputs)["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape), [self.batch_size, self.seq_length, self.vocab_size] ) def create_and_check_causal_lm_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = TFBertLMHeadModel(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, } result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states) prediction_scores = result["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape), [self.batch_size, self.seq_length, self.vocab_size] ) def create_and_check_causal_lm_model_past( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.is_decoder = True model = TFBertLMHeadModel(config=config) # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs.past_key_values # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and attn_mask next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) output_from_no_past = model(next_input_ids, output_hidden_states=True).hidden_states[0] output_from_past = model( next_tokens, past_key_values=past_key_values, output_hidden_states=True ).hidden_states[0] # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx] output_from_past_slice = output_from_past[:, 0, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-6) def create_and_check_causal_lm_model_past_with_attn_mask( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.is_decoder = True model = TFBertLMHeadModel(config=config) # create attention mask half_seq_length = self.seq_length // 2 attn_mask_begin = tf.ones((self.batch_size, half_seq_length), dtype=tf.int32) attn_mask_end = tf.zeros((self.batch_size, self.seq_length - half_seq_length), dtype=tf.int32) attn_mask = tf.concat([attn_mask_begin, attn_mask_end], axis=1) # first forward pass outputs = model(input_ids, attention_mask=attn_mask, use_cache=True) # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) past_key_values = outputs.past_key_values # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).numpy() + 1 random_other_next_tokens = ids_tensor((self.batch_size, self.seq_length), config.vocab_size) vector_condition = tf.range(self.seq_length) == (self.seq_length - random_seq_idx_to_change) condition = tf.transpose( tf.broadcast_to(tf.expand_dims(vector_condition, -1), (self.seq_length, self.batch_size)) ) input_ids = tf.where(condition, random_other_next_tokens, input_ids) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) attn_mask = tf.concat( [attn_mask, tf.ones((attn_mask.shape[0], 1), dtype=tf.int32)], axis=1, ) output_from_no_past = model( next_input_ids, attention_mask=attn_mask, output_hidden_states=True, ).hidden_states[0] output_from_past = model( next_tokens, past_key_values=past_key_values, attention_mask=attn_mask, output_hidden_states=True ).hidden_states[0] # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx] output_from_past_slice = output_from_past[:, 0, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-6) def create_and_check_causal_lm_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.is_decoder = True model = TFBertLMHeadModel(config=config) input_ids = input_ids[:1, :] input_mask = input_mask[:1, :] self.batch_size = 1 # first forward pass outputs = model(input_ids, attention_mask=input_mask, use_cache=True) past_key_values = outputs.past_key_values # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([input_mask, next_attn_mask], axis=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, output_hidden_states=True, ).hidden_states[0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values, output_hidden_states=True, ).hidden_states[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = TFBertLMHeadModel(config=config) input_ids = input_ids[:1, :] input_mask = input_mask[:1, :] encoder_hidden_states = encoder_hidden_states[:1, :, :] encoder_attention_mask = encoder_attention_mask[:1, :] self.batch_size = 1 # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([input_mask, next_attn_mask], axis=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, ).hidden_states[0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, ).hidden_states[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFBertForMaskedLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_next_sequence_prediction( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFBertForNextSentencePrediction(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, 2)) def create_and_check_for_pretraining( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFBertForPreTraining(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFBertForSequenceClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = TFBertForMultipleChoice(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFBertForTokenClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFBertForQuestionAnswering(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFBertModelTest(TFModelTesterMixin, TFCoreModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFBertModel, TFBertForMaskedLM, TFBertLMHeadModel, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertForMultipleChoice, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFBertModel, "fill-mask": TFBertForMaskedLM, "question-answering": TFBertForQuestionAnswering, "text-classification": TFBertForSequenceClassification, "text-generation": TFBertLMHeadModel, "token-classification": TFBertForTokenClassification, "zero-shot": TFBertForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = True onnx_min_opset = 10 # special case for ForPreTraining model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(TF_MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["next_sentence_label"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) return inputs_dict def setUp(self): self.model_tester = TFBertModelTester(self) self.config_tester = ConfigTester(self, config_class=BertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): """Test the base model""" config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_causal_lm_base_model(self): """Test the base model of the causal LM model is_deocder=True, no cross_attention, no encoder outputs """ config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_base_model(*config_and_inputs) def test_model_as_decoder(self): """Test the base model as a decoder (of an encoder-decoder architecture) is_deocder=True + cross_attention + pass encoder outputs """ config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_causal_lm(self): """Test the causal LM model""" config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_model(*config_and_inputs) def test_causal_lm_model_as_decoder(self): """Test the causal LM model as a decoder""" config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_causal_lm_model_as_decoder(*config_and_inputs) def test_causal_lm_model_past(self): """Test causal LM model with `past_key_values`""" config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_model_past(*config_and_inputs) def test_causal_lm_model_past_with_attn_mask(self): """Test the causal LM model with `past_key_values` and `attention_mask`""" config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_model_past_with_attn_mask(*config_and_inputs) def test_causal_lm_model_past_with_large_inputs(self): """Test the causal LM model with `past_key_values` and a longer decoder sequence length""" config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_model_past_large_inputs(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): """Similar to `test_causal_lm_model_past_with_large_inputs` but with cross-attention""" config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_next_sequence_prediction(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_model_from_pretrained(self): model = TFBertModel.from_pretrained("jplu/tiny-tf-bert-random") self.assertIsNotNone(model) def test_custom_load_tf_weights(self): model, output_loading_info = TFBertForTokenClassification.from_pretrained( "jplu/tiny-tf-bert-random", output_loading_info=True ) self.assertEqual(sorted(output_loading_info["unexpected_keys"]), []) for layer in output_loading_info["missing_keys"]: self.assertTrue(layer.split("_")[0] in ["dropout", "classifier"]) # TODO (Joao): fix me @unittest.skip("Onnx compliancy broke with TF 2.10") def test_onnx_compliancy(self): pass @require_tf class TFBertModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = TFBertForPreTraining.from_pretrained("lysandre/tiny-bert-random") input_ids = tf.constant([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] expected_shape = [1, 6, 32000] self.assertEqual(output.shape, expected_shape) print(output[:, :3, :3]) expected_slice = tf.constant( [ [ [-0.05243197, -0.04498899, 0.05512108], [-0.07444685, -0.01064632, 0.04352357], [-0.05020351, 0.05530146, 0.00700043], ] ] ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-4)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/gpt2/test_modeling_flax_gpt2.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import numpy as np import transformers from transformers import GPT2Config, GPT2Tokenizer, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gpt2.modeling_flax_gpt2 import FlaxGPT2LMHeadModel, FlaxGPT2Model if is_torch_available(): import torch class FlaxGPT2ModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, initializer_range=0.02, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = None self.bos_token_id = vocab_size - 1 self.eos_token_id = vocab_size - 1 self.pad_token_id = vocab_size - 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) config = GPT2Config( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, use_cache=False, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, ) return (config, input_ids, input_mask) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict def prepare_config_and_inputs_for_decoder(self): config, input_ids, attention_mask = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) def check_use_cache_forward(self, model_class_name, config, input_ids, attention_mask): max_decoder_length = 20 model = model_class_name(config) past_key_values = model.init_cache(input_ids.shape[0], max_decoder_length) attention_mask = jnp.ones((input_ids.shape[0], max_decoder_length), dtype="i4") position_ids = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) ) outputs_cache = model( input_ids[:, :-1], attention_mask=attention_mask, past_key_values=past_key_values, position_ids=position_ids, ) position_ids = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model( input_ids[:, -1:], attention_mask=attention_mask, past_key_values=outputs_cache.past_key_values, position_ids=position_ids, ) outputs = model(input_ids) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") def check_use_cache_forward_with_attn_mask(self, model_class_name, config, input_ids, attention_mask): max_decoder_length = 20 model = model_class_name(config) attention_mask_cache = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))], axis=-1, ) past_key_values = model.init_cache(input_ids.shape[0], max_decoder_length) position_ids = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) ) outputs_cache = model( input_ids[:, :-1], attention_mask=attention_mask_cache, past_key_values=past_key_values, position_ids=position_ids, ) position_ids = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model( input_ids[:, -1:], past_key_values=outputs_cache.past_key_values, attention_mask=attention_mask_cache, position_ids=position_ids, ) outputs = model(input_ids, attention_mask=attention_mask) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") @require_flax class FlaxGPT2ModelTest(FlaxModelTesterMixin, FlaxGenerationTesterMixin, unittest.TestCase): all_model_classes = (FlaxGPT2Model, FlaxGPT2LMHeadModel) if is_flax_available() else () all_generative_model_classes = (FlaxGPT2LMHeadModel,) if is_flax_available() else () def setUp(self): self.model_tester = FlaxGPT2ModelTester(self) def test_use_cache_forward(self): for model_class_name in self.all_model_classes: config, input_ids, attention_mask = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(model_class_name, config, input_ids, attention_mask) def test_use_cache_forward_with_attn_mask(self): for model_class_name in self.all_model_classes: config, input_ids, attention_mask = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( model_class_name, config, input_ids, attention_mask ) @slow def test_batch_generation(self): tokenizer = GPT2Tokenizer.from_pretrained("gpt2", pad_token="</s>", padding_side="left") inputs = tokenizer(["Hello this is a long string", "Hey"], return_tensors="np", padding=True, truncation=True) model = FlaxGPT2LMHeadModel.from_pretrained("gpt2") model.do_sample = False model.config.pad_token_id = model.config.eos_token_id jit_generate = jax.jit(model.generate) output_sequences = jit_generate(inputs["input_ids"], attention_mask=inputs["attention_mask"]).sequences output_string = tokenizer.batch_decode(output_sequences, skip_special_tokens=True) expected_string = [ "Hello this is a long string of words. I'm going to start with the first one.\n", "Hey, I'm not sure if I'm going to be able to do", ] self.assertListEqual(output_string, expected_string) # overwrite from common since `attention_mask` in combination # with `causal_mask` behaves slighly differently @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) batch_size, seq_length = pt_inputs["input_ids"].shape rnd_start_indices = np.random.randint(0, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): pt_inputs["attention_mask"][batch_idx, :start_index] = 0 pt_inputs["attention_mask"][batch_idx, start_index:] = 1 prepared_inputs_dict["attention_mask"][batch_idx, :start_index] = 0 prepared_inputs_dict["attention_mask"][batch_idx, start_index:] = 1 pt_model = pt_model_class(config).eval() fx_model = model_class(config, dtype=jnp.float32) fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**prepared_inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs, pt_outputs): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = model_class.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**prepared_inputs_dict).to_tuple() self.assertEqual( len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded, pt_outputs): self.assert_almost_equals(fx_output_loaded[:, -1], pt_output[:, -1].numpy(), 4e-2) # overwrite from common since `attention_mask` in combination # with `causal_mask` behaves slighly differently @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() fx_model = model_class(config, dtype=jnp.float32) pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) batch_size, seq_length = pt_inputs["input_ids"].shape rnd_start_indices = np.random.randint(0, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): pt_inputs["attention_mask"][batch_idx, :start_index] = 0 pt_inputs["attention_mask"][batch_idx, start_index:] = 1 prepared_inputs_dict["attention_mask"][batch_idx, :start_index] = 0 prepared_inputs_dict["attention_mask"][batch_idx, start_index:] = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**prepared_inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs, pt_outputs): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = pt_model_class.from_pretrained(tmpdirname, from_flax=True) with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple() self.assertEqual( len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(fx_outputs, pt_outputs_loaded): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("gpt2", from_pt=True) outputs = model(np.ones((1, 1))) self.assertIsNotNone(outputs)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/gpt2/test_tokenization_gpt2.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import unittest from transformers import AutoTokenizer, GPT2Tokenizer, GPT2TokenizerFast from transformers.models.gpt2.tokenization_gpt2 import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class GPT2TokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = GPT2Tokenizer rust_tokenizer_class = GPT2TokenizerFast test_rust_tokenizer = True from_pretrained_kwargs = {"add_prefix_space": True} test_seq2seq = False def setUp(self): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] self.special_tokens_map = {"unk_token": "<unk>"} self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) def get_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return GPT2Tokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_rust_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return GPT2TokenizerFast.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self, tokenizer): input_text = "lower newer" output_text = "lower newer" return input_text, output_text def test_full_tokenizer(self): tokenizer = GPT2Tokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map) text = "lower newer" bpe_tokens = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] tokens = tokenizer.tokenize(text, add_prefix_space=True) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + [tokenizer.unk_token] input_bpe_tokens = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: return tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer(add_prefix_space=True) sequence = "lower newer" # Testing tokenization tokens = tokenizer.tokenize(sequence, add_prefix_space=True) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) # Testing conversion to ids without special tokens ids = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=True) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) # Testing conversion to ids with special tokens rust_tokenizer = self.get_rust_tokenizer(add_prefix_space=True) ids = tokenizer.encode(sequence, add_prefix_space=True) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) # Testing the unknown token input_tokens = tokens + [rust_tokenizer.unk_token] input_bpe_tokens = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) def test_pretokenized_inputs(self, *args, **kwargs): # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def test_padding(self, max_length=15): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) # Simple input s = "This is a simple input" s2 = ["This is a simple input 1", "This is a simple input 2"] p = ("This is a simple input", "This is a pair") p2 = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(ValueError, tokenizer_r.encode, s, max_length=max_length, padding="max_length") # Simple input self.assertRaises(ValueError, tokenizer_r.encode_plus, s, max_length=max_length, padding="max_length") # Simple input self.assertRaises( ValueError, tokenizer_r.batch_encode_plus, s2, max_length=max_length, padding="max_length", ) # Pair input self.assertRaises(ValueError, tokenizer_r.encode, p, max_length=max_length, padding="max_length") # Pair input self.assertRaises(ValueError, tokenizer_r.encode_plus, p, max_length=max_length, padding="max_length") # Pair input self.assertRaises( ValueError, tokenizer_r.batch_encode_plus, p2, max_length=max_length, padding="max_length", ) def test_padding_if_pad_token_set_slow(self): tokenizer = GPT2Tokenizer.from_pretrained(self.tmpdirname, pad_token="<pad>") # Simple input s = "This is a simple input" s2 = ["This is a simple input looooooooong", "This is a simple input"] p = ("This is a simple input", "This is a pair") p2 = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] pad_token_id = tokenizer.pad_token_id out_s = tokenizer(s, padding="max_length", max_length=30, return_tensors="np") out_s2 = tokenizer(s2, padding=True, truncate=True, return_tensors="np") out_p = tokenizer(*p, padding="max_length", max_length=60, return_tensors="np") out_p2 = tokenizer(p2, padding=True, truncate=True, return_tensors="np") # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1], 30) self.assertTrue(pad_token_id in out_s["input_ids"]) self.assertTrue(0 in out_s["attention_mask"]) # s2 # test automatic padding self.assertEqual(out_s2["input_ids"].shape[-1], 33) # long slice doesn't have padding self.assertFalse(pad_token_id in out_s2["input_ids"][0]) self.assertFalse(0 in out_s2["attention_mask"][0]) # short slice does have padding self.assertTrue(pad_token_id in out_s2["input_ids"][1]) self.assertTrue(0 in out_s2["attention_mask"][1]) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1], 60) self.assertTrue(pad_token_id in out_p["input_ids"]) self.assertTrue(0 in out_p["attention_mask"]) # p2 # test automatic padding pair self.assertEqual(out_p2["input_ids"].shape[-1], 52) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_p2["input_ids"][0]) self.assertFalse(0 in out_p2["attention_mask"][0]) # short slice pair does have padding self.assertTrue(pad_token_id in out_p2["input_ids"][1]) self.assertTrue(0 in out_p2["attention_mask"][1]) def test_add_bos_token_slow(self): bos_token = "$$$" tokenizer = GPT2Tokenizer.from_pretrained(self.tmpdirname, bos_token=bos_token, add_bos_token=True) s = "This is a simple input" s2 = ["This is a simple input 1", "This is a simple input 2"] bos_token_id = tokenizer.bos_token_id out_s = tokenizer(s) out_s2 = tokenizer(s2) self.assertEqual(out_s.input_ids[0], bos_token_id) self.assertTrue(all(o[0] == bos_token_id for o in out_s2.input_ids)) decode_s = tokenizer.decode(out_s.input_ids) decode_s2 = tokenizer.batch_decode(out_s2.input_ids) self.assertEqual(decode_s.split()[0], bos_token) self.assertTrue(all(d.split()[0] == bos_token for d in decode_s2)) # tokenizer has no padding token def test_padding_different_model_input_name(self): pass def test_special_tokens_mask_input_pairs_and_bos_token(self): # TODO: change to self.get_tokenizers() when the fast version is implemented tokenizers = [self.get_tokenizer(do_lower_case=False, add_bos_token=True)] for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): sequence_0 = "Encode this." sequence_1 = "This one too please." encoded_sequence = tokenizer.encode(sequence_0, add_special_tokens=False) encoded_sequence += tokenizer.encode(sequence_1, add_special_tokens=False) encoded_sequence_dict = tokenizer.encode_plus( sequence_0, sequence_1, add_special_tokens=True, return_special_tokens_mask=True, ) encoded_sequence_w_special = encoded_sequence_dict["input_ids"] special_tokens_mask = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special)) filtered_sequence = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(encoded_sequence_w_special) ] filtered_sequence = [x for x in filtered_sequence if x is not None] self.assertEqual(encoded_sequence, filtered_sequence) @require_tokenizers class OPTTokenizationTest(unittest.TestCase): def test_serialize_deserialize_fast_opt(self): # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=True) text = "A photo of a cat" tokens_ids = tokenizer.encode( text, ) self.assertEqual(tokens_ids, [2, 250, 1345, 9, 10, 4758]) tokenizer.save_pretrained("test_opt") tokenizer = AutoTokenizer.from_pretrained("./test_opt") tokens_ids = tokenizer.encode( text, ) self.assertEqual(tokens_ids, [2, 250, 1345, 9, 10, 4758]) def test_fast_slow_equivalence(self): tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m", use_slow=True) text = "A photo of a cat" tokens_ids = tokenizer.encode( text, ) # Same as above self.assertEqual(tokens_ids, [2, 250, 1345, 9, 10, 4758]) @unittest.skip("This test is failing because of a bug in the fast tokenizer") def test_users_can_modify_bos(self): tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=True) tokenizer.bos_token = "bos" tokenizer.bos_token_id = tokenizer.get_vocab()["bos"] text = "A photo of a cat" tokens_ids = tokenizer.encode( text, ) # We changed the bos token self.assertEqual(tokens_ids, [31957, 250, 1345, 9, 10, 4758]) tokenizer.save_pretrained("./tok") tokenizer = AutoTokenizer.from_pretrained("./tok") self.assertTrue(tokenizer.is_fast) tokens_ids = tokenizer.encode( text, ) self.assertEqual(tokens_ids, [31957, 250, 1345, 9, 10, 4758])
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/gpt2/test_modeling_tf_gpt2.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import GPT2Config, is_tf_available from transformers.testing_utils import require_tf, require_tf2onnx, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin from ...utils.test_modeling_tf_core import TFCoreModelTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPT2Tokenizer from transformers.models.gpt2.modeling_tf_gpt2 import ( TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST, TFGPT2DoubleHeadsModel, TFGPT2ForSequenceClassification, TFGPT2LMHeadModel, TFGPT2Model, ) from transformers.tf_utils import shape_list class TFGPT2ModelTester: def __init__( self, parent, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_token_type_ids = True self.use_input_mask = True self.use_labels = True self.use_mc_token_ids = True self.vocab_size = 99 self.hidden_size = 32 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.scope = None self.bos_token_id = self.vocab_size - 1 self.eos_token_id = self.vocab_size - 1 self.pad_token_id = self.vocab_size - 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) mc_token_ids = None if self.use_mc_token_ids: mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = GPT2Config( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, # intermediate_size=self.intermediate_size, # hidden_act=self.hidden_act, # hidden_dropout_prob=self.hidden_dropout_prob, # attention_probs_dropout_prob=self.attention_probs_dropout_prob, n_positions=self.max_position_embeddings, # type_vocab_size=self.type_vocab_size, # initializer_range=self.initializer_range bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, return_dict=True, ) head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, input_mask, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_gpt2_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = TFGPT2Model(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) inputs = [input_ids, None, input_mask] # None is the input for 'past' result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_gpt2_model_past(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = TFGPT2Model(config=config) # first forward pass outputs = model(input_ids, token_type_ids=token_type_ids, use_cache=True) outputs_use_cache_conf = model(input_ids, token_type_ids=token_type_ids) outputs_no_past = model(input_ids, token_type_ids=token_type_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) output, past_key_values = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) next_token_types = ids_tensor([self.batch_size, 1], self.type_vocab_size) # append to next input_ids and token_type_ids next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_token_type_ids = tf.concat([token_type_ids, next_token_types], axis=-1) output_from_no_past = model(next_input_ids, token_type_ids=next_token_type_ids)["last_hidden_state"] output_from_past = model(next_tokens, token_type_ids=next_token_types, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = int(ids_tensor((1,), shape_list(output_from_past)[-1])) output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx] output_from_past_slice = output_from_past[:, 0, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-6) def create_and_check_gpt2_model_attention_mask_past( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): model = TFGPT2Model(config=config) # create attention mask half_seq_length = self.seq_length // 2 attn_mask_begin = tf.ones((self.batch_size, half_seq_length), dtype=tf.int32) attn_mask_end = tf.zeros((self.batch_size, self.seq_length - half_seq_length), dtype=tf.int32) attn_mask = tf.concat([attn_mask_begin, attn_mask_end], axis=1) # first forward pass output, past_key_values = model(input_ids, attention_mask=attn_mask).to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).numpy() + 1 random_other_next_tokens = ids_tensor((self.batch_size, self.seq_length), config.vocab_size) vector_condition = tf.range(self.seq_length) == (self.seq_length - random_seq_idx_to_change) condition = tf.transpose( tf.broadcast_to(tf.expand_dims(vector_condition, -1), (self.seq_length, self.batch_size)) ) input_ids = tf.where(condition, random_other_next_tokens, input_ids) # append to next input_ids and attn_mask next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) attn_mask = tf.concat([attn_mask, tf.ones((shape_list(attn_mask)[0], 1), dtype=tf.int32)], axis=1) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values, attention_mask=attn_mask)[ "last_hidden_state" ] # select random slice random_slice_idx = int(ids_tensor((1,), shape_list(output_from_past)[-1])) output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx] output_from_past_slice = output_from_past[:, 0, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-12) def create_and_check_gpt2_model_past_large_inputs( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): model = TFGPT2Model(config=config) input_ids = input_ids[:1, :] input_mask = input_mask[:1, :] token_type_ids = token_type_ids[:1, :] self.batch_size = 1 # first forward pass outputs = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) next_token_types = ids_tensor((self.batch_size, 3), self.type_vocab_size) # append to next input_ids and token_type_ids next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([input_mask, next_attn_mask], axis=-1) next_token_type_ids = tf.concat([token_type_ids, next_token_types], axis=-1) output_from_no_past = model( next_input_ids, token_type_ids=next_token_type_ids, attention_mask=next_attention_mask )["last_hidden_state"] output_from_past = model( next_tokens, token_type_ids=next_token_types, attention_mask=next_attention_mask, past_key_values=past_key_values, )["last_hidden_state"] self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1]) # select random slice random_slice_idx = int(ids_tensor((1,), shape_list(output_from_past)[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def create_and_check_gpt2_lm_head(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = TFGPT2LMHeadModel(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_gpt2_double_head( self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, *args ): model = TFGPT2DoubleHeadsModel(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "mc_token_ids": mc_token_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_choices, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.mc_logits.shape, (self.batch_size, self.num_choices)) def create_and_check_gpt2_for_sequence_classification( self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, *args ): config.num_labels = self.num_labels inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "labels": sequence_labels, } model = TFGPT2ForSequenceClassification(config) result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_tf class TFGPT2ModelTest(TFModelTesterMixin, TFCoreModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (TFGPT2Model, TFGPT2LMHeadModel, TFGPT2ForSequenceClassification, TFGPT2DoubleHeadsModel) if is_tf_available() else () ) all_generative_model_classes = (TFGPT2LMHeadModel,) if is_tf_available() else () pipeline_model_mapping = ( { "feature-extraction": TFGPT2Model, "text-classification": TFGPT2ForSequenceClassification, "text-generation": TFGPT2LMHeadModel, "zero-shot": TFGPT2ForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = True onnx_min_opset = 10 def setUp(self): self.model_tester = TFGPT2ModelTester(self) self.config_tester = ConfigTester(self, config_class=GPT2Config, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_gpt2_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_model(*config_and_inputs) def test_gpt2_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_model_past(*config_and_inputs) def test_gpt2_model_att_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_model_attention_mask_past(*config_and_inputs) def test_gpt2_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_model_past_large_inputs(*config_and_inputs) def test_gpt2_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_lm_head(*config_and_inputs) def test_gpt2_double_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_double_head(*config_and_inputs) def test_gpt2_sequence_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_for_sequence_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFGPT2Model.from_pretrained(model_name) self.assertIsNotNone(model) # overwrite from common since ONNX runtime optimization doesn't work with tf.gather() when the argument # `batch_dims` > 0" @require_tf2onnx @slow def test_onnx_runtime_optimize(self): if not self.test_onnx: return import onnxruntime import tf2onnx config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Skip these 2 classes which uses `tf.gather` with `batch_dims=1` if model_class in [TFGPT2ForSequenceClassification, TFGPT2DoubleHeadsModel]: continue model = model_class(config) model.build() onnx_model_proto, _ = tf2onnx.convert.from_keras(model, opset=self.onnx_min_opset) onnxruntime.InferenceSession(onnx_model_proto.SerializeToString()) # TODO (Joao): fix me @unittest.skip("Onnx compliancy broke with TF 2.10") def test_onnx_compliancy(self): pass @require_tf class TFGPT2ModelLanguageGenerationTest(unittest.TestCase): @slow def test_lm_generate_greedy_distilgpt2_batch_special(self): model = TFGPT2LMHeadModel.from_pretrained("distilgpt2") tokenizer = GPT2Tokenizer.from_pretrained("distilgpt2") tokenizer.pad_token = tokenizer.eos_token tokenizer.padding_side = "left" sentences = ["Today is a beautiful day and", "Yesterday was"] input_ids = tokenizer(sentences, return_tensors="tf", padding=True) generation_kwargs = { "bad_words_ids": [tokenizer("is").input_ids, tokenizer("angry about").input_ids], "no_repeat_ngram_size": 2, "do_sample": False, "repetition_penalty": 1.3, } output_ids = model.generate(**input_ids, **generation_kwargs) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) expected_output_string = [ "Today is a beautiful day and I am so happy to be able take part in this amazing event.", "Yesterday was a very interesting time for the world to see how much of this is", ] self.assertListEqual(output_strings, expected_output_string) @slow def test_lm_generate_sample_distilgpt2_batch_special(self): model = TFGPT2LMHeadModel.from_pretrained("distilgpt2") tokenizer = GPT2Tokenizer.from_pretrained("distilgpt2") tokenizer.pad_token = tokenizer.eos_token tokenizer.padding_side = "left" sentences = ["Today is a beautiful day and", "Yesterday was"] input_ids = tokenizer(sentences, return_tensors="tf", padding=True) generation_kwargs = { "do_sample": True, "bad_words_ids": [tokenizer("is").input_ids, tokenizer("angry about").input_ids], "no_repeat_ngram_size": 2, "repetition_penalty": 1.3, "temperature": 1.5, "top_k": 500, "top_p": 0.9, "seed": [42, 0], # seed set -> deterministic sampling sequence -> deterministic generation } # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(":/CPU:0"): output_ids = model.generate(**input_ids, **generation_kwargs) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) expected_output_string = [ "Today is a beautiful day and we will make you feel very hot/terrific in all your", "Yesterday was known by national television networks as Le Big Show or Wild Dog Jeopard", ] self.assertListEqual(output_strings, expected_output_string) @slow def test_lm_generate_greedy_distilgpt2_beam_search_special(self): model = TFGPT2LMHeadModel.from_pretrained("distilgpt2") tokenizer = GPT2Tokenizer.from_pretrained("distilgpt2") tokenizer.pad_token = tokenizer.eos_token tokenizer.padding_side = "left" sentences = ["Today is a beautiful day and", "Yesterday was"] input_ids = tokenizer(sentences, return_tensors="tf", padding=True) generation_kwargs = { "bad_words_ids": [tokenizer("is").input_ids, tokenizer("angry about").input_ids], "no_repeat_ngram_size": 2, "do_sample": False, "num_beams": 2, } output_ids = model.generate(**input_ids, **generation_kwargs) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) expected_output_string = [ "Today is a beautiful day and a great day for all of us.\n\nI’m", "Yesterday was the first time that a person has been arrested in the United States for", ] self.assertListEqual(output_strings, expected_output_string) @slow def test_lm_generate_distilgpt2_left_padding(self): """Tests that the generated text is the same, regarless of left padding""" model = TFGPT2LMHeadModel.from_pretrained("distilgpt2") tokenizer = GPT2Tokenizer.from_pretrained("distilgpt2") tokenizer.pad_token = tokenizer.eos_token tokenizer.padding_side = "left" generation_kwargs = { "bad_words_ids": [tokenizer("is").input_ids, tokenizer("angry about").input_ids], "no_repeat_ngram_size": 2, "do_sample": False, "repetition_penalty": 1.3, } expected_output_string = ( "Today is a beautiful day and I am so happy to be able take part in this amazing event." ) sentences = ["Today is a beautiful day and"] input_ids = tokenizer(sentences, return_tensors="tf", padding=True) # using default length output_ids = model.generate(**input_ids, **generation_kwargs) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) self.assertEqual(output_strings[0], expected_output_string) sentences = ["Today is a beautiful day and", "This is a very long input that we absolutely don't care about"] input_ids = tokenizer(sentences, return_tensors="tf", padding=True) # longer max length to capture the full length (remember: it is left padded) output_ids = model.generate(**input_ids, **generation_kwargs, max_length=27) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) self.assertEqual(output_strings[0], expected_output_string) @slow def test_lm_generate_gpt2_greedy_xla(self): model = TFGPT2LMHeadModel.from_pretrained("gpt2") tokenizer = GPT2Tokenizer.from_pretrained("gpt2") tokenizer.pad_token = tokenizer.eos_token tokenizer.padding_side = "left" sentences = ["The dog", "The flying machine"] expected_output_strings = [ "The dog was found in a field near the intersection of West and West Streets.\n\nThe", "The flying machine is a small, lightweight, and lightweight aircraft that can be used for any type of", ] input_ids = tokenizer(sentences, return_tensors="tf", padding=True) output_ids = model.generate(**input_ids, do_sample=False) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) self.assertListEqual(output_strings, expected_output_strings) xla_generate = tf.function(model.generate, jit_compile=True) output_ids = xla_generate(**input_ids, do_sample=False) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) self.assertListEqual(output_strings, expected_output_strings) @slow def test_lm_generate_gpt2_sample_xla(self): # NOTE: due to the small numerical differences that are natural when we compile to XLA, sampling the same # output out of the same seed is far from guaranteed. We can, however, confirm that the results are sensible # and that we can seed both versions. # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(":/CPU:0"): model = TFGPT2LMHeadModel.from_pretrained("gpt2") tokenizer = GPT2Tokenizer.from_pretrained("gpt2") tokenizer.pad_token = tokenizer.eos_token tokenizer.padding_side = "left" sentence = ["The dog", "The flying machine"] expected_output_string = [ "The dog owner asked why did our vet decide there needed to be extra ventilation inside because most" " puppies", "The flying machine was made by an artist who found it difficult to control it as it did not use", ] expected_output_string_xla = [ "The dog has been named in connection with the murder of a 20-year-old man in", "The flying machine is a new and improved system to operate and operate a new system and system " "system system", ] input_ids = tokenizer(sentence, return_tensors="tf", padding=True) output_ids = model.generate(**input_ids, do_sample=True, seed=[7, 0]) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) self.assertListEqual(output_strings, expected_output_string) xla_generate = tf.function(model.generate, jit_compile=True) output_ids = xla_generate(**input_ids, do_sample=True, seed=[7, 0]) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) self.assertListEqual(output_strings, expected_output_string_xla) @slow def test_lm_generate_gpt2_beam_search_xla(self): model = TFGPT2LMHeadModel.from_pretrained("gpt2") tokenizer = GPT2Tokenizer.from_pretrained("gpt2") tokenizer.pad_token = tokenizer.eos_token tokenizer.padding_side = "left" sentences = ["The dog", "The flying machine"] expected_output_strings = [ "The dog was found in the backyard of a home in the 6500 block of South Main Street", "The flying machine is a very powerful machine, but it's not a very powerful machine. It's", ] input_ids = tokenizer(sentences, return_tensors="tf", padding=True) output_ids = model.generate(**input_ids, do_sample=False, num_beams=2) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) self.assertListEqual(output_strings, expected_output_strings) xla_generate = tf.function(model.generate, jit_compile=True) output_ids = xla_generate(**input_ids, do_sample=False, num_beams=2) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) self.assertListEqual(output_strings, expected_output_strings) @slow def test_contrastive_search_gpt2(self): article = ( "DeepMind Technologies is a British artificial intelligence subsidiary of Alphabet Inc. and research " "laboratory founded in 2010. DeepMind was acquired by Google in 2014. The company is based" ) gpt2_tokenizer = GPT2Tokenizer.from_pretrained("gpt2-large") gpt2_model = TFGPT2LMHeadModel.from_pretrained("gpt2-large") input_ids = gpt2_tokenizer(article, return_tensors="tf") outputs = gpt2_model.generate(**input_ids, penalty_alpha=0.6, top_k=4, max_length=256) generated_text = gpt2_tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "DeepMind Technologies is a British artificial intelligence subsidiary of Alphabet Inc. and research " "laboratory founded in 2010. DeepMind was acquired by Google in 2014. The company is based in London, " "United Kingdom\n\nGoogle has a lot of data on its users and uses it to improve its products, such as " "Google Now, which helps users find the information they're looking for on the web. But the company " "is not the only one to collect data on its users. Facebook, for example, has its own facial " "recognition technology, as well as a database of millions of photos that it uses to personalize its " "News Feed.\n\nFacebook's use of data is a hot topic in the tech industry, with privacy advocates " "concerned about the company's ability to keep users' information private. In a blog post last " 'year, Facebook CEO Mark Zuckerberg said his company would "do our best to be transparent about our ' 'data use and how we use it."\n\n"We have made it clear that we do not sell or share your data with ' 'third parties," Zuckerberg wrote. "If you have questions or concerns, please reach out to us at ' '[email protected]."\n\nGoogle declined to comment on the privacy implications of its use of data, ' "but said in a statement to The Associated Press that" ], ) @slow def test_contrastive_search_gpt2_xla(self): article = ( "DeepMind Technologies is a British artificial intelligence subsidiary of Alphabet Inc. and research " "laboratory founded in 2010. DeepMind was acquired by Google in 2014. The company is based" ) gpt2_tokenizer = GPT2Tokenizer.from_pretrained("gpt2-large") gpt2_model = TFGPT2LMHeadModel.from_pretrained("gpt2-large") input_ids = gpt2_tokenizer(article, return_tensors="tf") xla_generate = tf.function(gpt2_model.generate, jit_compile=True) outputs = xla_generate(**input_ids, penalty_alpha=0.6, top_k=4, max_length=256) generated_text = gpt2_tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "DeepMind Technologies is a British artificial intelligence subsidiary of Alphabet Inc. and research " "laboratory founded in 2010. DeepMind was acquired by Google in 2014. The company is based in London, " "United Kingdom\n\nGoogle has a lot of data on its users and uses it to improve its products, such as " "Google Now, which helps users find the information they're looking for on the web. But the company " "is not the only one to collect data on its users. Facebook, for example, has its own facial " "recognition technology, as well as a database of millions of photos that it uses to personalize its " "News Feed.\n\nFacebook's use of data is a hot topic in the tech industry, with privacy advocates " "concerned about the company's ability to keep users' information private. In a blog post last " 'year, Facebook CEO Mark Zuckerberg said his company would "do our best to be transparent about our ' 'data use and how we use it."\n\n"We have made it clear that we do not sell or share your data with ' 'third parties," Zuckerberg wrote. "If you have questions or concerns, please reach out to us at ' '[email protected]."\n\nGoogle declined to comment on the privacy implications of its use of data, ' "but said in a statement to The Associated Press that" ], )
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/gpt2/test_modeling_gpt2.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import gc import math import unittest from transformers import GPT2Config, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPT2_PRETRAINED_MODEL_ARCHIVE_LIST, GPT2DoubleHeadsModel, GPT2ForQuestionAnswering, GPT2ForSequenceClassification, GPT2ForTokenClassification, GPT2LMHeadModel, GPT2Model, GPT2Tokenizer, ) class GPT2ModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_token_type_ids=True, use_input_mask=True, use_labels=True, use_mc_token_ids=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_token_type_ids = use_token_type_ids self.use_input_mask = use_input_mask self.use_labels = use_labels self.use_mc_token_ids = use_mc_token_ids self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = None self.bos_token_id = vocab_size - 1 self.eos_token_id = vocab_size - 1 self.pad_token_id = vocab_size - 1 def get_large_model_config(self): return GPT2Config.from_pretrained("gpt2") def prepare_config_and_inputs( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) mc_token_ids = None if self.use_mc_token_ids: mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config( gradient_checkpointing=gradient_checkpointing, scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx, reorder_and_upcast_attn=reorder_and_upcast_attn, ) head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def get_config( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): return GPT2Config( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_inner=self.intermediate_size, activation_function=self.hidden_act, resid_pdrop=self.hidden_dropout_prob, attn_pdrop=self.attention_probs_dropout_prob, n_positions=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, gradient_checkpointing=gradient_checkpointing, scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx, reorder_and_upcast_attn=reorder_and_upcast_attn, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 return config def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, input_mask, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_gpt2_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = GPT2Model(config=config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(len(result.past_key_values), config.n_layer) def create_and_check_gpt2_model_past(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = GPT2Model(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model(input_ids, token_type_ids=token_type_ids, use_cache=True) outputs_use_cache_conf = model(input_ids, token_type_ids=token_type_ids) outputs_no_past = model(input_ids, token_type_ids=token_type_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) output, past = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) next_token_types = ids_tensor([self.batch_size, 1], self.type_vocab_size) # append to next input_ids and token_type_ids next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_token_type_ids = torch.cat([token_type_ids, next_token_types], dim=-1) output_from_no_past = model(next_input_ids, token_type_ids=next_token_type_ids)["last_hidden_state"] output_from_past = model(next_tokens, token_type_ids=next_token_types, past_key_values=past)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_gpt2_model_attention_mask_past( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): model = GPT2Model(config=config) model.to(torch_device) model.eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = self.seq_length // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass output, past = model(input_ids, attention_mask=attn_mask).to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_gpt2_model_past_large_inputs( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): model = GPT2Model(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model(input_ids, token_type_ids=token_type_ids, attention_mask=input_mask, use_cache=True) output, past = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_token_types = ids_tensor([self.batch_size, 3], self.type_vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and token_type_ids next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_token_type_ids = torch.cat([token_type_ids, next_token_types], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, token_type_ids=next_token_type_ids, attention_mask=next_attention_mask )["last_hidden_state"] output_from_past = model( next_tokens, token_type_ids=next_token_types, attention_mask=next_attention_mask, past_key_values=past )["last_hidden_state"] self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1]) # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = GPT2LMHeadModel(config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_forward_and_backwards( self, config, input_ids, input_mask, head_mask, token_type_ids, *args, gradient_checkpointing=False ): model = GPT2LMHeadModel(config) model.to(torch_device) if gradient_checkpointing: model.gradient_checkpointing_enable() result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) result.loss.backward() def create_and_check_double_lm_head_model( self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, *args ): model = GPT2DoubleHeadsModel(config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() inputs = { "input_ids": multiple_choice_inputs_ids, "mc_token_ids": mc_token_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, "labels": multiple_choice_inputs_ids, } result = model(**inputs) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_choices, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.mc_logits.shape, (self.batch_size, self.num_choices)) def create_and_check_gpt2_for_question_answering( self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, *args ): config.num_labels = self.num_labels model = GPT2ForQuestionAnswering(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_gpt2_for_sequence_classification( self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, *args ): config.num_labels = self.num_labels model = GPT2ForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_gpt2_for_token_classification( self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, *args ): config.num_labels = self.num_labels model = GPT2ForTokenClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_gpt2_weight_initialization(self, config, *args): model = GPT2Model(config) model_std = model.config.initializer_range / math.sqrt(2 * model.config.n_layer) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std), 0.001) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0), 0.01) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class GPT2ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( GPT2Model, GPT2LMHeadModel, GPT2DoubleHeadsModel, GPT2ForQuestionAnswering, GPT2ForSequenceClassification, GPT2ForTokenClassification, ) if is_torch_available() else () ) all_generative_model_classes = (GPT2LMHeadModel, GPT2DoubleHeadsModel) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": GPT2Model, "question-answering": GPT2ForQuestionAnswering, "text-classification": GPT2ForSequenceClassification, "text-generation": GPT2LMHeadModel, "token-classification": GPT2ForTokenClassification, "zero-shot": GPT2ForSequenceClassification, } if is_torch_available() else {} ) all_parallelizable_model_classes = (GPT2LMHeadModel, GPT2DoubleHeadsModel) if is_torch_available() else () fx_compatible = True test_missing_keys = False test_model_parallel = True # special case for DoubleHeads model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "GPT2DoubleHeadsModel": inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length), dtype=torch.long, device=torch_device, ) inputs_dict["input_ids"] = inputs_dict["labels"] inputs_dict["token_type_ids"] = inputs_dict["labels"] inputs_dict["mc_token_ids"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices), dtype=torch.long, device=torch_device, ) inputs_dict["mc_labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = GPT2ModelTester(self) self.config_tester = ConfigTester(self, config_class=GPT2Config, n_embd=37) def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def test_config(self): self.config_tester.run_common_tests() def test_gpt2_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_model(*config_and_inputs) def test_gpt2_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_model_past(*config_and_inputs) def test_gpt2_model_att_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_model_attention_mask_past(*config_and_inputs) def test_gpt2_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_model_past_large_inputs(*config_and_inputs) def test_gpt2_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*config_and_inputs) def test_gpt2_double_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*config_and_inputs) def test_gpt2_question_answering_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_for_question_answering(*config_and_inputs) def test_gpt2_sequence_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_for_sequence_classification(*config_and_inputs) def test_gpt2_token_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_for_token_classification(*config_and_inputs) def test_gpt2_gradient_checkpointing(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True) def test_gpt2_scale_attn_by_inverse_layer_idx(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(scale_attn_by_inverse_layer_idx=True) self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs) def test_gpt2_reorder_and_upcast_attn(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(reorder_and_upcast_attn=True) self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs) def test_gpt2_weight_initialization(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_weight_initialization(*config_and_inputs) @slow def test_batch_generation(self): model = GPT2LMHeadModel.from_pretrained("gpt2") model.to(torch_device) tokenizer = GPT2Tokenizer.from_pretrained("gpt2") tokenizer.padding_side = "left" # Define PAD Token = EOS Token = 50256 tokenizer.pad_token = tokenizer.eos_token model.config.pad_token_id = model.config.eos_token_id # use different length sentences to test batching sentences = [ "Hello, my dog is a little", "Today, I", ] inputs = tokenizer(sentences, return_tensors="pt", padding=True) input_ids = inputs["input_ids"].to(torch_device) token_type_ids = torch.cat( [ input_ids.new_full((input_ids.shape[0], input_ids.shape[1] - 1), 0), input_ids.new_full((input_ids.shape[0], 1), 500), ], dim=-1, ) outputs = model.generate( input_ids=input_ids, attention_mask=inputs["attention_mask"].to(torch_device), ) outputs_tt = model.generate( input_ids=input_ids, attention_mask=inputs["attention_mask"].to(torch_device), token_type_ids=token_type_ids, ) inputs_non_padded = tokenizer(sentences[0], return_tensors="pt").input_ids.to(torch_device) output_non_padded = model.generate(input_ids=inputs_non_padded) num_paddings = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item() inputs_padded = tokenizer(sentences[1], return_tensors="pt").input_ids.to(torch_device) output_padded = model.generate(input_ids=inputs_padded, max_length=model.config.max_length - num_paddings) batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True) batch_out_sentence_tt = tokenizer.batch_decode(outputs_tt, skip_special_tokens=True) non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True) padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True) expected_output_sentence = [ "Hello, my dog is a little bit of a mess. I'm not sure if he's going", "Today, I'm going to be doing a lot of research on this. I", ] self.assertListEqual(expected_output_sentence, batch_out_sentence) self.assertTrue(batch_out_sentence_tt != batch_out_sentence) # token_type_ids should change output self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence]) @slow def test_batch_generation_2heads(self): model = GPT2DoubleHeadsModel.from_pretrained("gpt2") model.to(torch_device) tokenizer = GPT2Tokenizer.from_pretrained("gpt2") tokenizer.padding_side = "left" # This tokenizer has no pad token, so we have to set it in some way # Define PAD Token = EOS Token = 50256 tokenizer.pad_token = tokenizer.eos_token model.config.pad_token_id = model.config.eos_token_id # use different length sentences to test batching sentences = [ "Hello, my dog is a little", "Today, I", ] inputs = tokenizer(sentences, return_tensors="pt", padding=True) input_ids = inputs["input_ids"].to(torch_device) token_type_ids = torch.cat( [ input_ids.new_full((input_ids.shape[0], input_ids.shape[1] - 1), 0), input_ids.new_full((input_ids.shape[0], 1), 500), ], dim=-1, ) outputs = model.generate( input_ids=input_ids, attention_mask=inputs["attention_mask"].to(torch_device), ) outputs_tt = model.generate( input_ids=input_ids, attention_mask=inputs["attention_mask"].to(torch_device), token_type_ids=token_type_ids, ) inputs_non_padded = tokenizer(sentences[0], return_tensors="pt").input_ids.to(torch_device) output_non_padded = model.generate(input_ids=inputs_non_padded) num_paddings = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item() inputs_padded = tokenizer(sentences[1], return_tensors="pt").input_ids.to(torch_device) output_padded = model.generate(input_ids=inputs_padded, max_length=model.config.max_length - num_paddings) batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True) batch_out_sentence_tt = tokenizer.batch_decode(outputs_tt, skip_special_tokens=True) non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True) padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True) expected_output_sentence = [ "Hello, my dog is a little bit of a mess. I'm not sure if he's going", "Today, I'm going to be doing a lot of research on this. I", ] self.assertListEqual(expected_output_sentence, batch_out_sentence) self.assertTrue(batch_out_sentence_tt != batch_out_sentence) # token_type_ids should change output self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence]) @slow def test_model_from_pretrained(self): for model_name in GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = GPT2Model.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class GPT2ModelLanguageGenerationTest(unittest.TestCase): def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def _test_lm_generate_gpt2_helper( self, gradient_checkpointing=False, reorder_and_upcast_attn=False, scale_attn_by_inverse_layer_idx=False, verify_outputs=True, ): model = GPT2LMHeadModel.from_pretrained( "gpt2", reorder_and_upcast_attn=reorder_and_upcast_attn, scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx, ) if gradient_checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(torch_device) # The dog input_ids = torch.tensor([[464, 3290]], dtype=torch.long, device=torch_device) # The dog was found in a field near the intersection of West and West Streets.\n\nThe dog # fmt: off expected_output_ids = [ 464, 3290, 373, 1043, 287, 257, 2214, 1474, 262, 16246, 286, 2688, 290, 2688, 27262, 13, 198, 198, 464, 3290, ] # fmt: on output_ids = model.generate(input_ids, do_sample=False) if verify_outputs: self.assertListEqual(output_ids[0].tolist(), expected_output_ids) @slow def test_lm_generate_gpt2(self): self._test_lm_generate_gpt2_helper() @slow def test_lm_generate_gpt2_with_gradient_checkpointing(self): self._test_lm_generate_gpt2_helper(gradient_checkpointing=True) @slow def test_lm_generate_gpt2_with_reorder_and_upcast_attn(self): self._test_lm_generate_gpt2_helper(reorder_and_upcast_attn=True) @slow def test_lm_generate_gpt2_with_scale_attn_by_inverse_layer_idx(self): self._test_lm_generate_gpt2_helper(scale_attn_by_inverse_layer_idx=True, verify_outputs=False) @slow def test_gpt2_sample(self): tokenizer = GPT2Tokenizer.from_pretrained("gpt2") model = GPT2LMHeadModel.from_pretrained("gpt2") model.to(torch_device) torch.manual_seed(0) tokenized = tokenizer("Today is a nice day and", return_tensors="pt", return_token_type_ids=True) input_ids = tokenized.input_ids.to(torch_device) output_ids = model.generate(input_ids, do_sample=True) output_str = tokenizer.decode(output_ids[0], skip_special_tokens=True) token_type_ids = tokenized.token_type_ids.to(torch_device) output_seq = model.generate(input_ids=input_ids, do_sample=True, num_return_sequences=5) output_seq_tt = model.generate( input_ids=input_ids, token_type_ids=token_type_ids, do_sample=True, num_return_sequences=5 ) output_seq_strs = tokenizer.batch_decode(output_seq, skip_special_tokens=True) output_seq_tt_strs = tokenizer.batch_decode(output_seq_tt, skip_special_tokens=True) EXPECTED_OUTPUT_STR = ( "Today is a nice day and if you don't know anything about the state of play during your holiday" ) self.assertEqual(output_str, EXPECTED_OUTPUT_STR) self.assertTrue( all(output_seq_strs[idx] != output_seq_tt_strs[idx] for idx in range(len(output_seq_tt_strs))) ) # token_type_ids should change output @slow def test_gpt2_sample_max_time(self): tokenizer = GPT2Tokenizer.from_pretrained("gpt2") model = GPT2LMHeadModel.from_pretrained("gpt2") model.to(torch_device) torch.manual_seed(0) tokenized = tokenizer("Today is a nice day and", return_tensors="pt", return_token_type_ids=True) input_ids = tokenized.input_ids.to(torch_device) MAX_TIME = 0.5 start = datetime.datetime.now() model.generate(input_ids, do_sample=True, max_time=MAX_TIME, max_length=256) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME)) self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME)) start = datetime.datetime.now() model.generate(input_ids, do_sample=False, max_time=MAX_TIME, max_length=256) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME)) self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME)) start = datetime.datetime.now() model.generate(input_ids, do_sample=False, num_beams=2, max_time=MAX_TIME, max_length=256) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME)) self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME)) start = datetime.datetime.now() model.generate(input_ids, do_sample=True, num_beams=2, max_time=MAX_TIME, max_length=256) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME)) self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME)) start = datetime.datetime.now() model.generate(input_ids, do_sample=False, max_time=None, max_length=256) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=1.5 * MAX_TIME)) @slow def test_contrastive_search_gpt2(self): article = ( "DeepMind Technologies is a British artificial intelligence subsidiary of Alphabet Inc. and research " "laboratory founded in 2010. DeepMind was acquired by Google in 2014. The company is based" ) gpt2_tokenizer = GPT2Tokenizer.from_pretrained("gpt2-large") gpt2_model = GPT2LMHeadModel.from_pretrained("gpt2-large").to(torch_device) input_ids = gpt2_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) outputs = gpt2_model.generate(input_ids, penalty_alpha=0.6, top_k=4, max_length=256) generated_text = gpt2_tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "DeepMind Technologies is a British artificial intelligence subsidiary of Alphabet Inc. and research " "laboratory founded in 2010. DeepMind was acquired by Google in 2014. The company is based in London, " "United Kingdom\n\nGoogle has a lot of data on its users and uses it to improve its products, such as " "Google Now, which helps users find the information they're looking for on the web. But the company " "is not the only one to collect data on its users. Facebook, for example, has its own facial " "recognition technology, as well as a database of millions of photos that it uses to personalize its " "News Feed.\n\nFacebook's use of data is a hot topic in the tech industry, with privacy advocates " "concerned about the company's ability to keep users' information private. In a blog post last " 'year, Facebook CEO Mark Zuckerberg said his company would "do our best to be transparent about our ' 'data use and how we use it."\n\n"We have made it clear that we do not sell or share your data with ' 'third parties," Zuckerberg wrote. "If you have questions or concerns, please reach out to us at ' '[email protected]."\n\nGoogle declined to comment on the privacy implications of its use of data, ' "but said in a statement to The Associated Press that" ], )
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/gpt2/test_tokenization_gpt2_tf.py
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPT2LMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpt2 import TFGPT2Tokenizer TOKENIZER_CHECKPOINTS = ["gpt2"] TINY_MODEL_CHECKPOINT = "gpt2" if is_tf_available(): class ModelToSave(tf.Module): def __init__(self, tokenizer): super().__init__() self.tokenizer = tokenizer config = AutoConfig.from_pretrained(TINY_MODEL_CHECKPOINT) self.model = TFGPT2LMHeadModel.from_config(config) @tf.function(input_signature=(tf.TensorSpec((None,), tf.string, name="text"),)) def serving(self, text): tokenized = self.tokenizer(text) input_ids_dense = tokenized["input_ids"].to_tensor() input_mask = tf.cast(input_ids_dense > 0, tf.int32) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) outputs = self.model(input_ids=input_ids_dense, attention_mask=input_mask)["logits"] return outputs @require_tf @require_keras_nlp class GPTTokenizationTest(unittest.TestCase): # The TF tokenizers are usually going to be used as pretrained tokenizers from existing model checkpoints, # so that's what we focus on here. def setUp(self): super().setUp() self.tokenizers = [GPT2Tokenizer.from_pretrained(checkpoint) for checkpoint in (TOKENIZER_CHECKPOINTS)] self.tf_tokenizers = [TFGPT2Tokenizer.from_pretrained(checkpoint) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers) == len(self.tf_tokenizers) self.test_sentences = [ "This is a straightforward English test sentence.", "This one has some weird characters\rto\nsee\r\nif those\u00E9break things.", "Now we're going to add some Chinese: 一 二 三 一二三", "And some much more rare Chinese: 齉 堃 齉堃", "Je vais aussi écrire en français pour tester les accents", "Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ", ] self.paired_sentences = list(zip(self.test_sentences, self.test_sentences[::-1])) def test_output_equivalence(self): for tokenizer, tf_tokenizer in zip(self.tokenizers, self.tf_tokenizers): for test_inputs in self.test_sentences: python_outputs = tokenizer([test_inputs], return_tensors="tf") tf_outputs = tf_tokenizer([test_inputs]) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors python_outputs_values = python_outputs[key].numpy() tf_outputs_values = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape)) self.assertTrue(tf.reduce_all(tf.cast(python_outputs_values, tf.int64) == tf_outputs_values)) @slow def test_graph_mode(self): for tf_tokenizer in self.tf_tokenizers: compiled_tokenizer = tf.function(tf_tokenizer) for test_inputs in self.test_sentences: test_inputs = tf.constant(test_inputs) compiled_outputs = compiled_tokenizer(test_inputs) eager_outputs = tf_tokenizer(test_inputs) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key])) @slow def test_saved_model(self): for tf_tokenizer in self.tf_tokenizers: model = ModelToSave(tokenizer=tf_tokenizer) test_inputs = tf.convert_to_tensor([self.test_sentences[0]]) out = model.serving(test_inputs) # Build model with some sample inputs with TemporaryDirectory() as tempdir: save_path = Path(tempdir) / "saved.model" tf.saved_model.save(model, save_path, signatures={"serving_default": model.serving}) loaded_model = tf.saved_model.load(save_path) loaded_output = loaded_model.signatures["serving_default"](test_inputs)["output_0"] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output)) @slow def test_from_config(self): for tf_tokenizer in self.tf_tokenizers: test_inputs = tf.convert_to_tensor([self.test_sentences[0]]) out = tf_tokenizer(test_inputs) # Build model with some sample inputs config = tf_tokenizer.get_config() model_from_config = TFGPT2Tokenizer.from_config(config) from_config_output = model_from_config(test_inputs) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key])) @slow def test_padding(self): for tf_tokenizer in self.tf_tokenizers: # for the test to run tf_tokenizer.pad_token_id = 123123 for max_length in [3, 5, 1024]: test_inputs = tf.convert_to_tensor([self.test_sentences[0]]) out = tf_tokenizer(test_inputs, max_length=max_length) out_length = out["input_ids"].numpy().shape[1] assert out_length == max_length
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/detr/test_image_processing_detr.py
# coding=utf-8 # Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetrImageProcessor class DetrImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_rescale=True, rescale_factor=1 / 255, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], do_pad=True, ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p size = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_pad = do_pad def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_pad": self.do_pad, } def get_expected_values(self, image_inputs, batched=False): """ This function computes the expected height and width when providing images to DetrImageProcessor, assuming do_resize is set to True with a scalar size. """ if not batched: image = image_inputs[0] if isinstance(image, Image.Image): w, h = image.size else: h, w = image.shape[1], image.shape[2] if w < h: expected_height = int(self.size["shortest_edge"] * h / w) expected_width = self.size["shortest_edge"] elif w > h: expected_height = self.size["shortest_edge"] expected_width = int(self.size["shortest_edge"] * w / h) else: expected_height = self.size["shortest_edge"] expected_width = self.size["shortest_edge"] else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width @require_torch @require_vision class DetrImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = DetrImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = DetrImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "rescale_factor")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_pad")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 1333}) self.assertEqual(image_processor.do_pad, True) image_processor = self.image_processing_class.from_dict( self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=False ) self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 84}) self.assertEqual(image_processor.do_pad, False) def test_batch_feature(self): pass def test_call_pil(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs, batched=True) encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_call_numpy(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs, batched=True) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_call_pytorch(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs, batched=True) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) @slow def test_call_pytorch_with_coco_detection_annotations(self): # prepare image and target image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt", "r") as f: target = json.loads(f.read()) target = {"image_id": 39769, "annotations": target} # encode them image_processing = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50") encoding = image_processing(images=image, annotations=target, return_tensors="pt") # verify pixel values expected_shape = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4)) # verify area expected_area = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area)) # verify boxes expected_boxes_shape = torch.Size([6, 4]) self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape) expected_boxes_slice = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3)) # verify image_id expected_image_id = torch.tensor([39769]) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id)) # verify is_crowd expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd)) # verify class_labels expected_class_labels = torch.tensor([75, 75, 63, 65, 17, 17]) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels)) # verify orig_size expected_orig_size = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size)) # verify size expected_size = torch.tensor([800, 1066]) self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size)) @slow def test_call_pytorch_with_coco_panoptic_annotations(self): # prepare image, target and masks_path image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt", "r") as f: target = json.loads(f.read()) target = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target} masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic") # encode them image_processing = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50-panoptic") encoding = image_processing(images=image, annotations=target, masks_path=masks_path, return_tensors="pt") # verify pixel values expected_shape = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4)) # verify area expected_area = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area)) # verify boxes expected_boxes_shape = torch.Size([6, 4]) self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape) expected_boxes_slice = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3)) # verify image_id expected_image_id = torch.tensor([39769]) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id)) # verify is_crowd expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd)) # verify class_labels expected_class_labels = torch.tensor([17, 17, 63, 75, 75, 93]) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels)) # verify masks expected_masks_sum = 822873 self.assertEqual(encoding["labels"][0]["masks"].sum().item(), expected_masks_sum) # verify orig_size expected_orig_size = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size)) # verify size expected_size = torch.tensor([800, 1066]) self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size))
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/detr/test_modeling_detr.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch DETR model. """ import inspect import math import unittest from transformers import DetrConfig, ResNetConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_timm, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DetrForObjectDetection, DetrForSegmentation, DetrModel if is_vision_available(): from PIL import Image from transformers import DetrImageProcessor class DetrModelTester: def __init__( self, parent, batch_size=8, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=8, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, num_queries=12, num_channels=3, min_size=200, max_size=200, n_targets=8, num_labels=91, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_queries = num_queries self.num_channels = num_channels self.min_size = min_size self.max_size = max_size self.n_targets = n_targets self.num_labels = num_labels # we also set the expected seq length for both encoder and decoder self.encoder_seq_length = math.ceil(self.min_size / 32) * math.ceil(self.max_size / 32) self.decoder_seq_length = self.num_queries def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]) pixel_mask = torch.ones([self.batch_size, self.min_size, self.max_size], device=torch_device) labels = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) labels = [] for i in range(self.batch_size): target = {} target["class_labels"] = torch.randint( high=self.num_labels, size=(self.n_targets,), device=torch_device ) target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device) target["masks"] = torch.rand(self.n_targets, self.min_size, self.max_size, device=torch_device) labels.append(target) config = self.get_config() return config, pixel_values, pixel_mask, labels def get_config(self): resnet_config = ResNetConfig( num_channels=3, embeddings_size=10, hidden_sizes=[10, 20, 30, 40], depths=[1, 1, 2, 1], hidden_act="relu", num_labels=3, out_features=["stage2", "stage3", "stage4"], out_indices=[2, 3, 4], ) return DetrConfig( d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, num_queries=self.num_queries, num_labels=self.num_labels, use_timm_backbone=False, backbone_config=resnet_config, ) def prepare_config_and_inputs_for_common(self): config, pixel_values, pixel_mask, labels = self.prepare_config_and_inputs() inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def create_and_check_detr_model(self, config, pixel_values, pixel_mask, labels): model = DetrModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.decoder_seq_length, self.hidden_size) ) def create_and_check_detr_object_detection_head_model(self, config, pixel_values, pixel_mask, labels): model = DetrForObjectDetection(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) @require_torch class DetrModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( DetrModel, DetrForObjectDetection, DetrForSegmentation, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": DetrModel, "image-segmentation": DetrForSegmentation, "object-detection": DetrForObjectDetection, } if is_torch_available() else {} ) is_encoder_decoder = True test_torchscript = False test_pruning = False test_head_masking = False test_missing_keys = False # special case for head models def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ in ["DetrForObjectDetection", "DetrForSegmentation"]: labels = [] for i in range(self.model_tester.batch_size): target = {} target["class_labels"] = torch.ones( size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long ) target["boxes"] = torch.ones( self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float ) target["masks"] = torch.ones( self.model_tester.n_targets, self.model_tester.min_size, self.model_tester.max_size, device=torch_device, dtype=torch.float, ) labels.append(target) inputs_dict["labels"] = labels return inputs_dict def setUp(self): self.model_tester = DetrModelTester(self) self.config_tester = ConfigTester(self, config_class=DetrConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() def test_detr_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_detr_model(*config_and_inputs) def test_detr_object_detection_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_detr_object_detection_head_model(*config_and_inputs) # TODO: check if this works again for PyTorch 2.x.y @unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0.") def test_multi_gpu_data_parallel_forward(self): pass @unittest.skip(reason="DETR does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="DETR does not have a get_input_embeddings method") def test_model_common_attributes(self): pass @unittest.skip(reason="DETR is not a generative model") def test_generate_without_input_ids(self): pass @unittest.skip(reason="DETR does not use token embeddings") def test_resize_tokens_embeddings(self): pass @slow def test_model_outputs_equivalence(self): # TODO Niels: fix me! pass def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True decoder_seq_length = self.model_tester.decoder_seq_length encoder_seq_length = self.model_tester.encoder_seq_length decoder_key_length = self.model_tester.decoder_seq_length encoder_key_length = self.model_tester.encoder_seq_length for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning # Object Detection model returns pred_logits and pred_boxes if model_class.__name__ == "DetrForObjectDetection": correct_outlen += 2 # Panoptic Segmentation model returns pred_logits, pred_boxes, pred_masks if model_class.__name__ == "DetrForSegmentation": correct_outlen += 3 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_retain_grad_hidden_states_attentions(self): # removed retain_grad and grad on decoder_hidden_states, as queries don't require grad config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_attentions = outputs.encoder_attentions[0] encoder_hidden_states.retain_grad() encoder_attentions.retain_grad() decoder_attentions = outputs.decoder_attentions[0] decoder_attentions.retain_grad() cross_attentions = outputs.cross_attentions[0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(encoder_attentions.grad) self.assertIsNotNone(decoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = ["pixel_values", "pixel_mask"] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" in arg_names else [] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["pixel_values", "pixel_mask"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_different_timm_backbone(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # let's pick a random timm backbone config.backbone = "tf_mobilenetv3_small_075" for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if model_class.__name__ == "DetrForObjectDetection": expected_shape = ( self.model_tester.batch_size, self.model_tester.num_queries, self.model_tester.num_labels + 1, ) self.assertEqual(outputs.logits.shape, expected_shape) self.assertTrue(outputs) def test_greyscale_images(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # use greyscale pixel values inputs_dict["pixel_values"] = floats_tensor( [self.model_tester.batch_size, 1, self.model_tester.min_size, self.model_tester.max_size] ) # let's set num_channels to 1 config.num_channels = 1 config.backbone_config.num_channels = 1 for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertTrue(outputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) configs_no_init.init_xavier_std = 1e9 for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if "bbox_attention" in name and "bias" not in name: self.assertLess( 100000, abs(param.data.max().item()), msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) TOLERANCE = 1e-4 # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_timm @require_vision @slow class DetrModelIntegrationTestsTimmBackbone(unittest.TestCase): @cached_property def default_image_processor(self): return DetrImageProcessor.from_pretrained("facebook/detr-resnet-50") if is_vision_available() else None def test_inference_no_head(self): model = DetrModel.from_pretrained("facebook/detr-resnet-50").to(torch_device) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**encoding) expected_shape = torch.Size((1, 100, 256)) assert outputs.last_hidden_state.shape == expected_shape expected_slice = torch.tensor( [[0.0616, -0.5146, -0.4032], [-0.7629, -0.4934, -1.7153], [-0.4768, -0.6403, -0.7826]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4)) def test_inference_object_detection_head(self): model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50").to(torch_device) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt").to(torch_device) pixel_values = encoding["pixel_values"].to(torch_device) pixel_mask = encoding["pixel_mask"].to(torch_device) with torch.no_grad(): outputs = model(pixel_values, pixel_mask) # verify outputs expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.num_labels + 1)) self.assertEqual(outputs.logits.shape, expected_shape_logits) expected_slice_logits = torch.tensor( [[-19.1194, -0.0893, -11.0154], [-17.3640, -1.8035, -14.0219], [-20.0461, -0.5837, -11.1060]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice_logits, atol=1e-4)) expected_shape_boxes = torch.Size((1, model.config.num_queries, 4)) self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes) expected_slice_boxes = torch.tensor( [[0.4433, 0.5302, 0.8853], [0.5494, 0.2517, 0.0529], [0.4998, 0.5360, 0.9956]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4)) # verify postprocessing results = image_processor.post_process_object_detection( outputs, threshold=0.3, target_sizes=[image.size[::-1]] )[0] expected_scores = torch.tensor([0.9982, 0.9960, 0.9955, 0.9988, 0.9987]).to(torch_device) expected_labels = [75, 75, 63, 17, 17] expected_slice_boxes = torch.tensor([40.1633, 70.8115, 175.5471, 117.9841]).to(torch_device) self.assertEqual(len(results["scores"]), 5) self.assertTrue(torch.allclose(results["scores"], expected_scores, atol=1e-4)) self.assertSequenceEqual(results["labels"].tolist(), expected_labels) self.assertTrue(torch.allclose(results["boxes"][0, :], expected_slice_boxes)) def test_inference_panoptic_segmentation_head(self): model = DetrForSegmentation.from_pretrained("facebook/detr-resnet-50-panoptic").to(torch_device) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt").to(torch_device) pixel_values = encoding["pixel_values"].to(torch_device) pixel_mask = encoding["pixel_mask"].to(torch_device) with torch.no_grad(): outputs = model(pixel_values, pixel_mask) # verify outputs expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.num_labels + 1)) self.assertEqual(outputs.logits.shape, expected_shape_logits) expected_slice_logits = torch.tensor( [[-18.1565, -1.7568, -13.5029], [-16.8888, -1.4138, -14.1028], [-17.5709, -2.5080, -11.8654]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice_logits, atol=1e-4)) expected_shape_boxes = torch.Size((1, model.config.num_queries, 4)) self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes) expected_slice_boxes = torch.tensor( [[0.5344, 0.1789, 0.9285], [0.4420, 0.0572, 0.0875], [0.6630, 0.6887, 0.1017]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4)) expected_shape_masks = torch.Size((1, model.config.num_queries, 200, 267)) self.assertEqual(outputs.pred_masks.shape, expected_shape_masks) expected_slice_masks = torch.tensor( [[-7.7558, -10.8788, -11.9797], [-11.8881, -16.4329, -17.7451], [-14.7316, -19.7383, -20.3004]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.pred_masks[0, 0, :3, :3], expected_slice_masks, atol=1e-3)) # verify postprocessing results = image_processor.post_process_panoptic_segmentation( outputs, threshold=0.3, target_sizes=[image.size[::-1]] )[0] expected_shape = torch.Size([480, 640]) expected_slice_segmentation = torch.tensor([[4, 4, 4], [4, 4, 4], [4, 4, 4]], dtype=torch.int32).to( torch_device ) expected_number_of_segments = 5 expected_first_segment = {"id": 1, "label_id": 17, "was_fused": False, "score": 0.994096} number_of_unique_segments = len(torch.unique(results["segmentation"])) self.assertTrue( number_of_unique_segments, expected_number_of_segments + 1 ) # we add 1 for the background class self.assertTrue(results["segmentation"].shape, expected_shape) self.assertTrue(torch.allclose(results["segmentation"][:3, :3], expected_slice_segmentation, atol=1e-4)) self.assertTrue(len(results["segments_info"]), expected_number_of_segments) self.assertDictEqual(results["segments_info"][0], expected_first_segment) @require_vision @require_torch @slow class DetrModelIntegrationTests(unittest.TestCase): @cached_property def default_image_processor(self): return ( DetrImageProcessor.from_pretrained("facebook/detr-resnet-50", revision="no_timm") if is_vision_available() else None ) def test_inference_no_head(self): model = DetrModel.from_pretrained("facebook/detr-resnet-50", revision="no_timm").to(torch_device) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**encoding) expected_shape = torch.Size((1, 100, 256)) assert outputs.last_hidden_state.shape == expected_shape expected_slice = torch.tensor( [[0.0616, -0.5146, -0.4032], [-0.7629, -0.4934, -1.7153], [-0.4768, -0.6403, -0.7826]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/mobilebert/test_modeling_tf_mobilebert.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class TFMobileBertModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFMobileBertModel, "fill-mask": TFMobileBertForMaskedLM, "question-answering": TFMobileBertForQuestionAnswering, "text-classification": TFMobileBertForSequenceClassification, "token-classification": TFMobileBertForTokenClassification, "zero-shot": TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False # special case for ForPreTraining model, same as BERT tests def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(TF_MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["next_sentence_label"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) return inputs_dict class TFMobileBertModelTester(object): def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, embedding_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.embedding_size = embedding_size def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = MobileBertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, embedding_size=self.embedding_size, ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def create_and_check_mobilebert_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFMobileBertModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_mobilebert_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFMobileBertForMaskedLM(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_mobilebert_for_next_sequence_prediction( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFMobileBertForNextSentencePrediction(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, 2)) def create_and_check_mobilebert_for_pretraining( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFMobileBertForPreTraining(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual( result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2)) def create_and_check_mobilebert_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFMobileBertForSequenceClassification(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_mobilebert_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = TFMobileBertForMultipleChoice(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_mobilebert_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFMobileBertForTokenClassification(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_mobilebert_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFMobileBertForQuestionAnswering(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict def setUp(self): self.model_tester = TFMobileBertModelTest.TFMobileBertModelTester(self) self.config_tester = ConfigTester(self, config_class=MobileBertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_mobilebert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*config_and_inputs) def test_for_next_sequence_prediction(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): # for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google/mobilebert-uncased"]: model = TFMobileBertModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_tf class TFMobileBertModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased") input_ids = tf.constant([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] expected_shape = [1, 6, 30522] self.assertEqual(output.shape, expected_shape) expected_slice = tf.constant( [ [ [-4.5919547, -9.248295, -9.645256], [-6.7306175, -6.440284, -6.6052837], [-7.2743506, -6.7847915, -6.024673], ] ] ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-4)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/mobilebert/test_modeling_mobilebert.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class MobileBertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=64, embedding_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.embedding_size = embedding_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return MobileBertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, embedding_size=self.embedding_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def create_and_check_mobilebert_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = MobileBertModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_mobilebert_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = MobileBertForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_mobilebert_for_next_sequence_prediction( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = MobileBertForNextSentencePrediction(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, 2)) def create_and_check_mobilebert_for_pretraining( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = MobileBertForPreTraining(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, next_sentence_label=sequence_labels, ) self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2)) def create_and_check_mobilebert_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = MobileBertForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_mobilebert_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = MobileBertForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_mobilebert_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = MobileBertForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_mobilebert_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = MobileBertForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class MobileBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": MobileBertModel, "fill-mask": MobileBertForMaskedLM, "question-answering": MobileBertForQuestionAnswering, "text-classification": MobileBertForSequenceClassification, "token-classification": MobileBertForTokenClassification, "zero-shot": MobileBertForSequenceClassification, } if is_torch_available() else {} ) fx_compatible = True # special case for ForPreTraining model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) inputs_dict["next_sentence_label"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = MobileBertModelTester(self) self.config_tester = ConfigTester(self, config_class=MobileBertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_mobilebert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*config_and_inputs) def test_for_next_sequence_prediction(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*config_and_inputs) def _long_tensor(tok_lst): return torch.tensor( tok_lst, dtype=torch.long, device=torch_device, ) TOLERANCE = 1e-3 @require_torch @require_sentencepiece @require_tokenizers class MobileBertModelIntegrationTests(unittest.TestCase): @slow def test_inference_no_head(self): model = MobileBertModel.from_pretrained("google/mobilebert-uncased").to(torch_device) input_ids = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]]) with torch.no_grad(): output = model(input_ids)[0] expected_shape = torch.Size((1, 9, 512)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [ [ [-2.4736526e07, 8.2691656e04, 1.6521838e05], [-5.7541704e-01, 3.9056022e00, 4.4011507e00], [2.6047359e00, 1.5677652e00, -1.7324188e-01], ] ], device=torch_device, ) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE lower_bound = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE) upper_bound = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE) self.assertTrue(lower_bound and upper_bound)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/mobilebert/test_tokenization_mobilebert.py
# coding=utf-8 # Copyright 2022 Leon Derczynski. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the MobileBERT tokenizer. """ import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english # Copied from transformers.tests.models.bert.test_modeling_bert.py with Bert->MobileBert and pathfix @require_tokenizers class MobileBERTTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = MobileBertTokenizer rust_tokenizer_class = MobileBertTokenizerFast test_rust_tokenizer = True space_between_special_tokens = True from_pretrained_filter = filter_non_english pre_trained_model_path = "google/mobilebert-uncased" def setUp(self): super().setUp() vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) self.tokenizers_list = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def get_input_output_texts(self, tokenizer): input_text = "UNwant\u00E9d,running" output_text = "unwanted, running" return input_text, output_text def test_full_tokenizer(self): tokenizer = self.tokenizer_class(self.vocab_file) tokens = tokenizer.tokenize("UNwant\u00E9d,running") self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [9, 6, 7, 12, 10, 11]) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: return tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence = "UNwant\u00E9d,running" tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) # With lower casing tokenizer = self.get_tokenizer(do_lower_case=True) rust_tokenizer = self.get_rust_tokenizer(do_lower_case=True) sequence = "UNwant\u00E9d,running" tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) def test_chinese(self): tokenizer = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz"), ["ah", "\u535A", "\u63A8", "zz"]) def test_basic_tokenizer_lower(self): tokenizer = BasicTokenizer(do_lower_case=True) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"]) def test_basic_tokenizer_lower_strip_accents_false(self): tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=False) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["h\u00E9llo"]) def test_basic_tokenizer_lower_strip_accents_true(self): tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=True) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"]) def test_basic_tokenizer_lower_strip_accents_default(self): tokenizer = BasicTokenizer(do_lower_case=True) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"]) def test_basic_tokenizer_no_lower(self): tokenizer = BasicTokenizer(do_lower_case=False) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def test_basic_tokenizer_no_lower_strip_accents_false(self): tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=False) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def test_basic_tokenizer_no_lower_strip_accents_true(self): tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=True) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def test_basic_tokenizer_respects_never_split_tokens(self): tokenizer = BasicTokenizer(do_lower_case=False, never_split=["[UNK]"]) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]"), ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def test_wordpiece_tokenizer(self): vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] vocab = {} for i, token in enumerate(vocab_tokens): vocab[token] = i tokenizer = WordpieceTokenizer(vocab=vocab, unk_token="[UNK]") self.assertListEqual(tokenizer.tokenize(""), []) self.assertListEqual(tokenizer.tokenize("unwanted running"), ["un", "##want", "##ed", "runn", "##ing"]) self.assertListEqual(tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"]) def test_is_whitespace(self): self.assertTrue(_is_whitespace(" ")) self.assertTrue(_is_whitespace("\t")) self.assertTrue(_is_whitespace("\r")) self.assertTrue(_is_whitespace("\n")) self.assertTrue(_is_whitespace("\u00A0")) self.assertFalse(_is_whitespace("A")) self.assertFalse(_is_whitespace("-")) def test_is_control(self): self.assertTrue(_is_control("\u0005")) self.assertFalse(_is_control("A")) self.assertFalse(_is_control(" ")) self.assertFalse(_is_control("\t")) self.assertFalse(_is_control("\r")) def test_is_punctuation(self): self.assertTrue(_is_punctuation("-")) self.assertTrue(_is_punctuation("$")) self.assertTrue(_is_punctuation("`")) self.assertTrue(_is_punctuation(".")) self.assertFalse(_is_punctuation("A")) self.assertFalse(_is_punctuation(" ")) def test_clean_text(self): tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(t) for t in ["Test", "\xad", "test"]], [["[UNK]"], [], ["[UNK]"]]) self.assertListEqual( [rust_tokenizer.tokenize(t) for t in ["Test", "\xad", "test"]], [["[UNK]"], [], ["[UNK]"]] ) @slow def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("google/mobilebert-uncased") text = tokenizer.encode("sequence builders", add_special_tokens=False) text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_2 + [102] def test_offsets_with_special_characters(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) sentence = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence." tokens = tokenizer_r.encode_plus( sentence, return_attention_mask=False, return_token_type_ids=False, return_offsets_mapping=True, add_special_tokens=True, ) do_lower_case = tokenizer_r.do_lower_case if hasattr(tokenizer_r, "do_lower_case") else False expected_results = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["input_ids"]) ) self.assertEqual([e[0] for e in expected_results], tokens["offset_mapping"]) def test_change_tokenize_chinese_chars(self): list_of_commun_chinese_char = ["的", "人", "有"] text_with_chinese_char = "".join(list_of_commun_chinese_char) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): kwargs["tokenize_chinese_chars"] = True tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) ids_without_spe_char_p = tokenizer_p.encode(text_with_chinese_char, add_special_tokens=False) ids_without_spe_char_r = tokenizer_r.encode(text_with_chinese_char, add_special_tokens=False) tokens_without_spe_char_r = tokenizer_r.convert_ids_to_tokens(ids_without_spe_char_r) tokens_without_spe_char_p = tokenizer_p.convert_ids_to_tokens(ids_without_spe_char_p) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(tokens_without_spe_char_p, list_of_commun_chinese_char) self.assertListEqual(tokens_without_spe_char_r, list_of_commun_chinese_char) kwargs["tokenize_chinese_chars"] = False tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) ids_without_spe_char_r = tokenizer_r.encode(text_with_chinese_char, add_special_tokens=False) ids_without_spe_char_p = tokenizer_p.encode(text_with_chinese_char, add_special_tokens=False) tokens_without_spe_char_r = tokenizer_r.convert_ids_to_tokens(ids_without_spe_char_r) tokens_without_spe_char_p = tokenizer_p.convert_ids_to_tokens(ids_without_spe_char_p) # it is expected that only the first Chinese character is not preceded by "##". expected_tokens = [ f"##{token}" if idx != 0 else token for idx, token in enumerate(list_of_commun_chinese_char) ] self.assertListEqual(tokens_without_spe_char_p, expected_tokens) self.assertListEqual(tokens_without_spe_char_r, expected_tokens)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/layoutlmv2/test_modeling_layoutlmv2.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch LayoutLMv2 model. """ import unittest from transformers.testing_utils import require_detectron2, require_torch, require_torch_multi_gpu, slow, torch_device from transformers.utils import is_detectron2_available, is_torch_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LayoutLMv2Config, LayoutLMv2ForQuestionAnswering, LayoutLMv2ForSequenceClassification, LayoutLMv2ForTokenClassification, LayoutLMv2Model, ) from transformers.models.layoutlmv2.modeling_layoutlmv2 import LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_detectron2_available(): from detectron2.structures.image_list import ImageList class LayoutLMv2ModelTester: def __init__( self, parent, batch_size=2, num_channels=3, image_size=4, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=36, num_hidden_layers=3, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, image_feature_pool_shape=[7, 7, 256], coordinate_size=6, shape_size=6, num_labels=3, num_choices=4, scope=None, range_bbox=1000, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.image_feature_pool_shape = image_feature_pool_shape self.coordinate_size = coordinate_size self.shape_size = shape_size self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.range_bbox = range_bbox def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) bbox = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: t = bbox[i, j, 3] bbox[i, j, 3] = bbox[i, j, 1] bbox[i, j, 1] = t if bbox[i, j, 2] < bbox[i, j, 0]: t = bbox[i, j, 2] bbox[i, j, 2] = bbox[i, j, 0] bbox[i, j, 0] = t image = ImageList( torch.zeros(self.batch_size, self.num_channels, self.image_size, self.image_size, device=torch_device), self.image_size, ) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) config = LayoutLMv2Config( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, image_feature_pool_shape=self.image_feature_pool_shape, coordinate_size=self.coordinate_size, shape_size=self.shape_size, ) # use smaller resnet backbone to make tests faster config.detectron2_config_args["MODEL.RESNETS.DEPTH"] = 18 config.detectron2_config_args["MODEL.RESNETS.RES2_OUT_CHANNELS"] = 64 config.detectron2_config_args["MODEL.RESNETS.NUM_GROUPS"] = 1 return config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels def create_and_check_model( self, config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels ): model = LayoutLMv2Model(config=config) model.to(torch_device) model.eval() result = model(input_ids, bbox=bbox, image=image, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, bbox=bbox, image=image, token_type_ids=token_type_ids) result = model(input_ids, bbox=bbox, image=image) # LayoutLMv2 has a different expected sequence length, namely also visual tokens are added expected_seq_len = self.seq_length + self.image_feature_pool_shape[0] * self.image_feature_pool_shape[1] self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_sequence_classification( self, config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels ): config.num_labels = self.num_labels model = LayoutLMv2ForSequenceClassification(config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, image=image, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels ): config.num_labels = self.num_labels model = LayoutLMv2ForTokenClassification(config=config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, image=image, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels ): model = LayoutLMv2ForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, image=image, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "bbox": bbox, "image": image, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch @require_detectron2 class LayoutLMv2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False test_torchscript = True test_mismatched_shapes = False all_model_classes = ( ( LayoutLMv2Model, LayoutLMv2ForSequenceClassification, LayoutLMv2ForTokenClassification, LayoutLMv2ForQuestionAnswering, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"document-question-answering": LayoutLMv2ForQuestionAnswering, "feature-extraction": LayoutLMv2Model} if is_torch_available() else {} ) def setUp(self): self.model_tester = LayoutLMv2ModelTester(self) self.config_tester = ConfigTester(self, config_class=LayoutLMv2Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @require_torch_multi_gpu @unittest.skip( reason=( "LayoutLMV2 and its dependency `detectron2` have some layers using `add_module` which doesn't work well" " with `nn.DataParallel`" ) ) def test_multi_gpu_data_parallel_forward(self): pass def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True # LayoutLMv2 has a different expected sequence length expected_seq_len = ( self.model_tester.seq_length + self.model_tester.image_feature_pool_shape[0] * self.model_tester.image_feature_pool_shape[1] ) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, expected_seq_len, expected_seq_len], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, expected_seq_len, expected_seq_len], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) # LayoutLMv2 has a different expected sequence length expected_seq_len = ( self.model_tester.seq_length + self.model_tester.image_feature_pool_shape[0] * self.model_tester.image_feature_pool_shape[1] ) self.assertListEqual( list(hidden_states[0].shape[-2:]), [expected_seq_len, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) @unittest.skip("We cannot configure detectron2 to output a smaller backbone") def test_model_is_small(self): pass @slow def test_model_from_pretrained(self): for model_name in LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = LayoutLMv2Model.from_pretrained(model_name) self.assertIsNotNone(model) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if "backbone" in name or "visual_segment_embedding" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def prepare_layoutlmv2_batch_inputs(): # Here we prepare a batch of 2 sequences to test a LayoutLMv2 forward pass on: # fmt: off input_ids = torch.tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]]) # noqa: E231 bbox = torch.tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]]) # noqa: E231 image = ImageList(torch.randn((2,3,224,224)), image_sizes=[(224,224), (224,224)]) # noqa: E231 attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],]) # noqa: E231 token_type_ids = torch.tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]) # noqa: E231 # fmt: on return input_ids, bbox, image, attention_mask, token_type_ids @require_torch @require_detectron2 class LayoutLMv2ModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head(self): model = LayoutLMv2Model.from_pretrained("microsoft/layoutlmv2-base-uncased").to(torch_device) ( input_ids, bbox, image, attention_mask, token_type_ids, ) = prepare_layoutlmv2_batch_inputs() # forward pass outputs = model( input_ids=input_ids.to(torch_device), bbox=bbox.to(torch_device), image=image.to(torch_device), attention_mask=attention_mask.to(torch_device), token_type_ids=token_type_ids.to(torch_device), ) # verify the sequence output expected_shape = torch.Size( ( 2, input_ids.shape[1] + model.config.image_feature_pool_shape[0] * model.config.image_feature_pool_shape[1], model.config.hidden_size, ) ) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[-0.1087, 0.0727, -0.3075], [0.0799, -0.0427, -0.0751], [-0.0367, 0.0480, -0.1358]], device=torch_device ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-3)) # verify the pooled output expected_shape = torch.Size((2, model.config.hidden_size)) self.assertEqual(outputs.pooler_output.shape, expected_shape)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/layoutlmv2/test_tokenization_layoutlmv2.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import os import re import shutil import tempfile import unittest from typing import List from transformers import ( AddedToken, LayoutLMv2TokenizerFast, SpecialTokensMixin, is_tf_available, is_torch_available, logging, ) from transformers.models.layoutlmv2.tokenization_layoutlmv2 import ( VOCAB_FILES_NAMES, BasicTokenizer, LayoutLMv2Tokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import ( is_pt_tf_cross_test, require_detectron2, require_pandas, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import ( SMALL_TRAINING_CORPUS, TokenizerTesterMixin, filter_non_english, merge_model_tokenizer_mappings, ) logger = logging.get_logger(__name__) @require_tokenizers @require_pandas class LayoutLMv2TokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = LayoutLMv2Tokenizer rust_tokenizer_class = LayoutLMv2TokenizerFast test_rust_tokenizer = True space_between_special_tokens = True from_pretrained_filter = filter_non_english test_seq2seq = False def get_words_and_boxes(self): words = ["a", "weirdly", "test"] boxes = [[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]] return words, boxes def get_words_and_boxes_batch(self): words = [["a", "weirdly", "test"], ["hello", "my", "name", "is", "bob"]] boxes = [ [[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]], [[961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69]], ] return words, boxes def get_question_words_and_boxes(self): question = "what's his name?" words = ["a", "weirdly", "test"] boxes = [[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]] return question, words, boxes def get_question_words_and_boxes_batch(self): questions = ["what's his name?", "how is he called?"] words = [["a", "weirdly", "test"], ["what", "a", "laif", "gastn"]] boxes = [ [[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]], [[256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69]], ] return questions, words, boxes def setUp(self): super().setUp() vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "what", "s", "his", "name", "?", "a", "weird", "##ly", "test", "lowest", ] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def get_input_output_texts(self, tokenizer): input_text = "UNwant\u00E9d,running" output_text = "unwanted, running" return input_text, output_text def test_chinese(self): tokenizer = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz"), ["ah", "\u535A", "\u63A8", "zz"]) def test_basic_tokenizer_lower(self): tokenizer = BasicTokenizer(do_lower_case=True) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"]) def test_basic_tokenizer_lower_strip_accents_false(self): tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=False) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["h\u00E9llo"]) def test_basic_tokenizer_lower_strip_accents_true(self): tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=True) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"]) def test_basic_tokenizer_lower_strip_accents_default(self): tokenizer = BasicTokenizer(do_lower_case=True) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"]) def test_basic_tokenizer_no_lower(self): tokenizer = BasicTokenizer(do_lower_case=False) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def test_basic_tokenizer_no_lower_strip_accents_false(self): tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=False) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def test_basic_tokenizer_no_lower_strip_accents_true(self): tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=True) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def test_basic_tokenizer_respects_never_split_tokens(self): tokenizer = BasicTokenizer(do_lower_case=False, never_split=["[UNK]"]) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]"), ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def test_wordpiece_tokenizer(self): vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] vocab = {} for i, token in enumerate(vocab_tokens): vocab[token] = i tokenizer = WordpieceTokenizer(vocab=vocab, unk_token="[UNK]") self.assertListEqual(tokenizer.tokenize(""), []) self.assertListEqual(tokenizer.tokenize("unwanted running"), ["un", "##want", "##ed", "runn", "##ing"]) self.assertListEqual(tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"]) def test_is_whitespace(self): self.assertTrue(_is_whitespace(" ")) self.assertTrue(_is_whitespace("\t")) self.assertTrue(_is_whitespace("\r")) self.assertTrue(_is_whitespace("\n")) self.assertTrue(_is_whitespace("\u00A0")) self.assertFalse(_is_whitespace("A")) self.assertFalse(_is_whitespace("-")) def test_is_control(self): self.assertTrue(_is_control("\u0005")) self.assertFalse(_is_control("A")) self.assertFalse(_is_control(" ")) self.assertFalse(_is_control("\t")) self.assertFalse(_is_control("\r")) def test_is_punctuation(self): self.assertTrue(_is_punctuation("-")) self.assertTrue(_is_punctuation("$")) self.assertTrue(_is_punctuation("`")) self.assertTrue(_is_punctuation(".")) self.assertFalse(_is_punctuation("A")) self.assertFalse(_is_punctuation(" ")) def test_clean_text(self): tokenizer = self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(t) for t in ["Hello", "\xad", "hello"]], [["[UNK]"], [], ["[UNK]"]]) @slow def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("microsoft/layoutlmv2-base-uncased") question, words, boxes = self.get_question_words_and_boxes() text = tokenizer.encode( question.split(), boxes=[tokenizer.pad_token_box for _ in range(len(question.split()))], add_special_tokens=False, ) text_2 = tokenizer.encode(words, boxes=boxes, add_special_tokens=False) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_pair == [101] + text + [102] + text_2 + [102] def test_offsets_with_special_characters(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) words, boxes = self.get_words_and_boxes() words[1] = tokenizer_r.mask_token tokens = tokenizer_r.encode_plus( words, boxes=boxes, return_attention_mask=False, return_token_type_ids=False, return_offsets_mapping=True, add_special_tokens=True, ) expected_results = [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((0, 6), tokenizer_r.mask_token), ((0, 4), "test"), ((0, 0), tokenizer_r.sep_token), ] self.assertEqual( [e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["input_ids"]) ) self.assertEqual([e[0] for e in expected_results], tokens["offset_mapping"]) def test_add_special_tokens(self): tokenizers: List[LayoutLMv2Tokenizer] = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): special_token = "[SPECIAL_TOKEN]" special_token_box = [1000, 1000, 1000, 1000] tokenizer.add_special_tokens({"cls_token": special_token}) encoded_special_token = tokenizer.encode( [special_token], boxes=[special_token_box], add_special_tokens=False ) self.assertEqual(len(encoded_special_token), 1) decoded = tokenizer.decode(encoded_special_token, skip_special_tokens=True) self.assertTrue(special_token not in decoded) def test_add_tokens_tokenizer(self): tokenizers: List[LayoutLMv2Tokenizer] = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): vocab_size = tokenizer.vocab_size all_size = len(tokenizer) self.assertNotEqual(vocab_size, 0) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) new_toks = ["aaaaa", "bbbbbb", "cccccccccdddddddd"] added_toks = tokenizer.add_tokens(new_toks) vocab_size_2 = tokenizer.vocab_size all_size_2 = len(tokenizer) self.assertNotEqual(vocab_size_2, 0) self.assertEqual(vocab_size, vocab_size_2) self.assertEqual(added_toks, len(new_toks)) self.assertEqual(all_size_2, all_size + len(new_toks)) words = "aaaaa bbbbbb low cccccccccdddddddd l".split() boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))] tokens = tokenizer.encode(words, boxes=boxes, add_special_tokens=False) self.assertGreaterEqual(len(tokens), 4) self.assertGreater(tokens[0], tokenizer.vocab_size - 1) self.assertGreater(tokens[-2], tokenizer.vocab_size - 1) new_toks_2 = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"} added_toks_2 = tokenizer.add_special_tokens(new_toks_2) vocab_size_3 = tokenizer.vocab_size all_size_3 = len(tokenizer) self.assertNotEqual(vocab_size_3, 0) self.assertEqual(vocab_size, vocab_size_3) self.assertEqual(added_toks_2, len(new_toks_2)) self.assertEqual(all_size_3, all_size_2 + len(new_toks_2)) words = ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l".split() boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))] tokens = tokenizer.encode( words, boxes=boxes, add_special_tokens=False, ) self.assertGreaterEqual(len(tokens), 6) self.assertGreater(tokens[0], tokenizer.vocab_size - 1) self.assertGreater(tokens[0], tokens[1]) self.assertGreater(tokens[-2], tokenizer.vocab_size - 1) self.assertGreater(tokens[-2], tokens[-3]) self.assertEqual(tokens[0], tokenizer.eos_token_id) self.assertEqual(tokens[-2], tokenizer.pad_token_id) @require_tokenizers def test_encode_decode_with_spaces(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes() new_toks = [AddedToken("[ABC]", normalized=False), AddedToken("[DEF]", normalized=False)] tokenizer.add_tokens(new_toks) input = "[ABC][DEF][ABC][DEF]" if self.space_between_special_tokens: output = "[ABC] [DEF] [ABC] [DEF]" else: output = input encoded = tokenizer.encode(input.split(), boxes=boxes, add_special_tokens=False) decoded = tokenizer.decode(encoded, spaces_between_special_tokens=self.space_between_special_tokens) self.assertIn(decoded, [output, output.lower()]) @unittest.skip("Not implemented") def test_right_and_left_truncation(self): pass def test_encode_plus_with_padding(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes() # check correct behaviour if no pad_token_id exists and add it eventually self._check_no_pad_token_padding(tokenizer, words) padding_size = 10 padding_idx = tokenizer.pad_token_id encoded_sequence = tokenizer.encode_plus(words, boxes=boxes, return_special_tokens_mask=True) input_ids = encoded_sequence["input_ids"] special_tokens_mask = encoded_sequence["special_tokens_mask"] sequence_length = len(input_ids) # Test 'longest' and 'no_padding' don't do anything tokenizer.padding_side = "right" not_padded_sequence = tokenizer.encode_plus( words, boxes=boxes, padding=False, return_special_tokens_mask=True, ) not_padded_input_ids = not_padded_sequence["input_ids"] not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"] not_padded_sequence_length = len(not_padded_input_ids) self.assertTrue(sequence_length == not_padded_sequence_length) self.assertTrue(input_ids == not_padded_input_ids) self.assertTrue(special_tokens_mask == not_padded_special_tokens_mask) not_padded_sequence = tokenizer.encode_plus( words, boxes=boxes, padding=False, return_special_tokens_mask=True, ) not_padded_input_ids = not_padded_sequence["input_ids"] not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"] not_padded_sequence_length = len(not_padded_input_ids) self.assertTrue(sequence_length == not_padded_sequence_length) self.assertTrue(input_ids == not_padded_input_ids) self.assertTrue(special_tokens_mask == not_padded_special_tokens_mask) # Test right padding tokenizer.padding_side = "right" right_padded_sequence = tokenizer.encode_plus( words, boxes=boxes, max_length=sequence_length + padding_size, padding="max_length", return_special_tokens_mask=True, ) right_padded_input_ids = right_padded_sequence["input_ids"] right_padded_special_tokens_mask = right_padded_sequence["special_tokens_mask"] right_padded_sequence_length = len(right_padded_input_ids) self.assertTrue(sequence_length + padding_size == right_padded_sequence_length) self.assertTrue(input_ids + [padding_idx] * padding_size == right_padded_input_ids) self.assertTrue(special_tokens_mask + [1] * padding_size == right_padded_special_tokens_mask) # Test left padding tokenizer.padding_side = "left" left_padded_sequence = tokenizer.encode_plus( words, boxes=boxes, max_length=sequence_length + padding_size, padding="max_length", return_special_tokens_mask=True, ) left_padded_input_ids = left_padded_sequence["input_ids"] left_padded_special_tokens_mask = left_padded_sequence["special_tokens_mask"] left_padded_sequence_length = len(left_padded_input_ids) self.assertTrue(sequence_length + padding_size == left_padded_sequence_length) self.assertTrue([padding_idx] * padding_size + input_ids == left_padded_input_ids) self.assertTrue([1] * padding_size + special_tokens_mask == left_padded_special_tokens_mask) if "token_type_ids" in tokenizer.model_input_names: token_type_ids = encoded_sequence["token_type_ids"] left_padded_token_type_ids = left_padded_sequence["token_type_ids"] right_padded_token_type_ids = right_padded_sequence["token_type_ids"] assert token_type_ids + [0] * padding_size == right_padded_token_type_ids assert [0] * padding_size + token_type_ids == left_padded_token_type_ids if "attention_mask" in tokenizer.model_input_names: attention_mask = encoded_sequence["attention_mask"] right_padded_attention_mask = right_padded_sequence["attention_mask"] left_padded_attention_mask = left_padded_sequence["attention_mask"] self.assertTrue(attention_mask + [0] * padding_size == right_padded_attention_mask) self.assertTrue([0] * padding_size + attention_mask == left_padded_attention_mask) def test_internal_consistency(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes() tokens = [] for word in words: tokens.extend(tokenizer.tokenize(word)) ids = tokenizer.convert_tokens_to_ids(tokens) ids_2 = tokenizer.encode(words, boxes=boxes, add_special_tokens=False) self.assertListEqual(ids, ids_2) tokens_2 = tokenizer.convert_ids_to_tokens(ids) self.assertNotEqual(len(tokens_2), 0) text_2 = tokenizer.decode(ids) self.assertIsInstance(text_2, str) output_text = "a weirdly test" self.assertEqual(text_2, output_text) def test_mask_output(self): tokenizers = self.get_tokenizers(fast=False, do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes() if ( tokenizer.build_inputs_with_special_tokens.__qualname__.split(".")[0] != "PreTrainedTokenizer" and "token_type_ids" in tokenizer.model_input_names ): information = tokenizer.encode_plus(words, boxes=boxes, add_special_tokens=True) sequences, mask = information["input_ids"], information["token_type_ids"] self.assertEqual(len(sequences), len(mask)) def test_number_of_added_tokens(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # test 1: single sequence words, boxes = self.get_words_and_boxes() sequences = tokenizer.encode(words, boxes=boxes, add_special_tokens=False) attached_sequences = tokenizer.encode(words, boxes=boxes, add_special_tokens=True) # Method is implemented (e.g. not GPT-2) if len(attached_sequences) != 2: self.assertEqual( tokenizer.num_special_tokens_to_add(pair=False), len(attached_sequences) - len(sequences) ) # test 2: two sequences question, words, boxes = self.get_question_words_and_boxes() sequences = tokenizer.encode(question, words, boxes=boxes, add_special_tokens=False) attached_sequences = tokenizer.encode(question, words, boxes=boxes, add_special_tokens=True) # Method is implemented (e.g. not GPT-2) if len(attached_sequences) != 2: self.assertEqual( tokenizer.num_special_tokens_to_add(pair=True), len(attached_sequences) - len(sequences) ) def test_padding_to_max_length(self): """We keep this test for backward compatibility but it should be removed when `pad_to_max_length` will be deprecated""" tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes() padding_size = 10 # check correct behaviour if no pad_token_id exists and add it eventually self._check_no_pad_token_padding(tokenizer, words) padding_idx = tokenizer.pad_token_id # Check that it correctly pads when a maximum length is specified along with the padding flag set to True tokenizer.padding_side = "right" encoded_sequence = tokenizer.encode(words, boxes=boxes) sequence_length = len(encoded_sequence) # FIXME: the next line should be padding(max_length) to avoid warning padded_sequence = tokenizer.encode( words, boxes=boxes, max_length=sequence_length + padding_size, pad_to_max_length=True ) padded_sequence_length = len(padded_sequence) assert sequence_length + padding_size == padded_sequence_length assert encoded_sequence + [padding_idx] * padding_size == padded_sequence # Check that nothing is done when a maximum length is not specified encoded_sequence = tokenizer.encode(words, boxes=boxes) sequence_length = len(encoded_sequence) tokenizer.padding_side = "right" padded_sequence_right = tokenizer.encode(words, boxes=boxes, pad_to_max_length=True) padded_sequence_right_length = len(padded_sequence_right) assert sequence_length == padded_sequence_right_length assert encoded_sequence == padded_sequence_right def test_padding(self, max_length=50): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id) pad_token_id = tokenizer_p.pad_token_id # Encode - Simple input words, boxes = self.get_words_and_boxes() input_r = tokenizer_r.encode(words, boxes=boxes, max_length=max_length, pad_to_max_length=True) input_p = tokenizer_p.encode(words, boxes=boxes, max_length=max_length, pad_to_max_length=True) self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.encode(words, boxes=boxes, max_length=max_length, padding="max_length") input_p = tokenizer_p.encode(words, boxes=boxes, max_length=max_length, padding="max_length") self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.encode(words, boxes=boxes, padding="longest") input_p = tokenizer_p.encode(words, boxes=boxes, padding=True) self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id) # Encode - Pair input question, words, boxes = self.get_question_words_and_boxes() input_r = tokenizer_r.encode( question, words, boxes=boxes, max_length=max_length, pad_to_max_length=True ) input_p = tokenizer_p.encode( question, words, boxes=boxes, max_length=max_length, pad_to_max_length=True ) self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.encode(question, words, boxes=boxes, max_length=max_length, padding="max_length") input_p = tokenizer_p.encode(question, words, boxes=boxes, max_length=max_length, padding="max_length") self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.encode(question, words, boxes=boxes, padding=True) input_p = tokenizer_p.encode(question, words, boxes=boxes, padding="longest") self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id) # Encode_plus - Simple input words, boxes = self.get_words_and_boxes() input_r = tokenizer_r.encode_plus(words, boxes=boxes, max_length=max_length, pad_to_max_length=True) input_p = tokenizer_p.encode_plus(words, boxes=boxes, max_length=max_length, pad_to_max_length=True) self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) input_r = tokenizer_r.encode_plus(words, boxes=boxes, max_length=max_length, padding="max_length") input_p = tokenizer_p.encode_plus(words, boxes=boxes, max_length=max_length, padding="max_length") self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) input_r = tokenizer_r.encode_plus(words, boxes=boxes, padding="longest") input_p = tokenizer_p.encode_plus(words, boxes=boxes, padding=True) self.assert_padded_input_match( input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id ) self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) # Encode_plus - Pair input question, words, boxes = self.get_question_words_and_boxes() input_r = tokenizer_r.encode_plus( question, words, boxes=boxes, max_length=max_length, pad_to_max_length=True ) input_p = tokenizer_p.encode_plus( question, words, boxes=boxes, max_length=max_length, pad_to_max_length=True ) self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) input_r = tokenizer_r.encode_plus( question, words, boxes=boxes, max_length=max_length, padding="max_length" ) input_p = tokenizer_p.encode_plus( question, words, boxes=boxes, max_length=max_length, padding="max_length" ) self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) input_r = tokenizer_r.encode_plus(question, words, boxes=boxes, padding="longest") input_p = tokenizer_p.encode_plus(question, words, boxes=boxes, padding=True) self.assert_padded_input_match( input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id ) self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) # Batch_encode_plus - Simple input words, boxes = self.get_words_and_boxes_batch() input_r = tokenizer_r.batch_encode_plus( words, boxes=boxes, max_length=max_length, pad_to_max_length=True, ) input_p = tokenizer_p.batch_encode_plus( words, boxes=boxes, max_length=max_length, pad_to_max_length=True, ) self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.batch_encode_plus( words, boxes=boxes, max_length=max_length, padding="max_length", ) input_p = tokenizer_p.batch_encode_plus( words, boxes=boxes, max_length=max_length, padding="max_length", ) self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.batch_encode_plus( words, boxes=boxes, max_length=max_length, padding="longest", ) input_p = tokenizer_p.batch_encode_plus( words, boxes=boxes, max_length=max_length, padding=True, ) self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id) input_r = tokenizer_r.batch_encode_plus(words, boxes=boxes, padding="longest") input_p = tokenizer_p.batch_encode_plus(words, boxes=boxes, padding=True) self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id) # Batch_encode_plus - Pair input questions, words, boxes = self.get_question_words_and_boxes_batch() input_r = tokenizer_r.batch_encode_plus( list(zip(questions, words)), is_pair=True, boxes=boxes, max_length=max_length, truncation=True, padding="max_length", ) input_p = tokenizer_p.batch_encode_plus( list(zip(questions, words)), is_pair=True, boxes=boxes, max_length=max_length, truncation=True, padding="max_length", ) self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.batch_encode_plus( list(zip(questions, words)), is_pair=True, boxes=boxes, padding=True, ) input_p = tokenizer_p.batch_encode_plus( list(zip(questions, words)), is_pair=True, boxes=boxes, padding="longest", ) self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id) # Using pad on single examples after tokenization words, boxes = self.get_words_and_boxes() input_r = tokenizer_r.encode_plus(words, boxes=boxes) input_r = tokenizer_r.pad(input_r) input_p = tokenizer_r.encode_plus(words, boxes=boxes) input_p = tokenizer_r.pad(input_p) self.assert_padded_input_match( input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id ) # Using pad on single examples after tokenization input_r = tokenizer_r.encode_plus(words, boxes=boxes) input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length") input_p = tokenizer_r.encode_plus(words, boxes=boxes) input_p = tokenizer_r.pad(input_p, max_length=max_length, padding="max_length") self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) # Using pad after tokenization words, boxes = self.get_words_and_boxes_batch() input_r = tokenizer_r.batch_encode_plus( words, boxes=boxes, ) input_r = tokenizer_r.pad(input_r) input_p = tokenizer_r.batch_encode_plus( words, boxes=boxes, ) input_p = tokenizer_r.pad(input_p) self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id) # Using pad after tokenization words, boxes = self.get_words_and_boxes_batch() input_r = tokenizer_r.batch_encode_plus( words, boxes=boxes, ) input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length") input_p = tokenizer_r.batch_encode_plus( words, boxes=boxes, ) input_p = tokenizer_r.pad(input_p, max_length=max_length, padding="max_length") self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id) def test_padding_warning_message_fast_tokenizer(self): if not self.test_rust_tokenizer: return words, boxes = self.get_words_and_boxes_batch() tokenizer_fast = self.get_rust_tokenizer() encoding_fast = tokenizer_fast( words, boxes=boxes, ) with self.assertLogs("transformers", level="WARNING") as cm: tokenizer_fast.pad(encoding_fast) self.assertEqual(len(cm.records), 1) self.assertIn( "Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to" " encode the text followed by a call to the `pad` method to get a padded encoding.", cm.records[0].message, ) if not self.test_slow_tokenizer: return tokenizer_slow = self.get_tokenizer() encoding_slow = tokenizer_slow( words, boxes=boxes, ) with self.assertLogs(level="WARNING") as cm: # We want to assert there are no warnings, but the 'assertLogs' method does not support that. # Therefore, we are adding a dummy warning, and then we will assert it is the only warning. logger.warning("Dummy warning") tokenizer_slow.pad(encoding_slow) self.assertEqual(len(cm.records), 1) self.assertIn( "Dummy warning", cm.records[0].message, ) def test_call(self): # Tests that all call wrap to encode_plus and batch_encode_plus tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Test not batched words, boxes = self.get_words_and_boxes() encoded_sequences_1 = tokenizer.encode_plus(words, boxes=boxes) encoded_sequences_2 = tokenizer(words, boxes=boxes) self.assertEqual(encoded_sequences_1, encoded_sequences_2) # Test not batched pairs question, words, boxes = self.get_question_words_and_boxes() encoded_sequences_1 = tokenizer.encode_plus(words, boxes=boxes) encoded_sequences_2 = tokenizer(words, boxes=boxes) self.assertEqual(encoded_sequences_1, encoded_sequences_2) # Test batched words, boxes = self.get_words_and_boxes_batch() encoded_sequences_1 = tokenizer.batch_encode_plus(words, is_pair=False, boxes=boxes) encoded_sequences_2 = tokenizer(words, boxes=boxes) self.assertEqual(encoded_sequences_1, encoded_sequences_2) def test_batch_encode_plus_batch_sequence_length(self): # Tests that all encoded values have the correct size tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes_batch() encoded_sequences = [ tokenizer.encode_plus(words_example, boxes=boxes_example) for words_example, boxes_example in zip(words, boxes) ] encoded_sequences_batch = tokenizer.batch_encode_plus(words, is_pair=False, boxes=boxes, padding=False) self.assertListEqual( encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch) ) maximum_length = len( max([encoded_sequence["input_ids"] for encoded_sequence in encoded_sequences], key=len) ) # check correct behaviour if no pad_token_id exists and add it eventually self._check_no_pad_token_padding(tokenizer, words) encoded_sequences_padded = [ tokenizer.encode_plus( words_example, boxes=boxes_example, max_length=maximum_length, padding="max_length" ) for words_example, boxes_example in zip(words, boxes) ] encoded_sequences_batch_padded = tokenizer.batch_encode_plus( words, is_pair=False, boxes=boxes, padding=True ) self.assertListEqual( encoded_sequences_padded, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch_padded), ) # check 'longest' is unsensitive to a max length encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus( words, is_pair=False, boxes=boxes, padding=True ) encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus( words, is_pair=False, boxes=boxes, max_length=maximum_length + 10, padding="longest" ) for key in encoded_sequences_batch_padded_1.keys(): self.assertListEqual( encoded_sequences_batch_padded_1[key], encoded_sequences_batch_padded_2[key], ) # check 'no_padding' is unsensitive to a max length encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus( words, is_pair=False, boxes=boxes, padding=False ) encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus( words, is_pair=False, boxes=boxes, max_length=maximum_length + 10, padding=False ) for key in encoded_sequences_batch_padded_1.keys(): self.assertListEqual( encoded_sequences_batch_padded_1[key], encoded_sequences_batch_padded_2[key], ) @unittest.skip("batch_encode_plus does not handle overflowing tokens.") def test_batch_encode_plus_overflowing_tokens(self): pass def test_batch_encode_plus_padding(self): # Test that padded sequences are equivalent between batch_encode_plus and encode_plus # Right padding tests tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes_batch() max_length = 100 # check correct behaviour if no pad_token_id exists and add it eventually self._check_no_pad_token_padding(tokenizer, words) encoded_sequences = [ tokenizer.encode_plus( words_example, boxes=boxes_example, max_length=max_length, padding="max_length" ) for words_example, boxes_example in zip(words, boxes) ] encoded_sequences_batch = tokenizer.batch_encode_plus( words, is_pair=False, boxes=boxes, max_length=max_length, padding="max_length" ) self.assertListEqual( encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch) ) # Left padding tests tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): tokenizer.padding_side = "left" words, boxes = self.get_words_and_boxes_batch() max_length = 100 # check correct behaviour if no pad_token_id exists and add it eventually self._check_no_pad_token_padding(tokenizer, words) encoded_sequences = [ tokenizer.encode_plus( words_example, boxes=boxes_example, max_length=max_length, padding="max_length" ) for words_example, boxes_example in zip(words, boxes) ] encoded_sequences_batch = tokenizer.batch_encode_plus( words, is_pair=False, boxes=boxes, max_length=max_length, padding="max_length" ) self.assertListEqual( encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch) ) def test_padding_to_multiple_of(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): if tokenizer.pad_token is None: self.skipTest("No padding token.") else: words, boxes = self.get_words_and_boxes() # empty_tokens = tokenizer([""], [[]], padding=True, pad_to_multiple_of=8) normal_tokens = tokenizer(words, boxes=boxes, padding=True, pad_to_multiple_of=8) # for key, value in empty_tokens.items(): # self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") for key, value in normal_tokens.items(): self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") normal_tokens = tokenizer(words, boxes=boxes, pad_to_multiple_of=8) for key, value in normal_tokens.items(): self.assertNotEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") # Should also work with truncation normal_tokens = tokenizer(words, boxes=boxes, padding=True, truncation=True, pad_to_multiple_of=8) for key, value in normal_tokens.items(): self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") # truncation to something which is not a multiple of pad_to_multiple_of raises an error self.assertRaises( ValueError, tokenizer.__call__, words, boxes=boxes, padding=True, truncation=True, max_length=12, pad_to_multiple_of=8, ) def test_tokenizer_slow_store_full_signature(self): signature = inspect.signature(self.tokenizer_class.__init__) tokenizer = self.get_tokenizer() for parameter_name, parameter in signature.parameters.items(): if parameter.default != inspect.Parameter.empty: self.assertIn(parameter_name, tokenizer.init_kwargs) def test_build_inputs_with_special_tokens(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) # Input tokens id words, boxes = self.get_words_and_boxes() input_simple = tokenizer_p.encode(words, boxes=boxes, add_special_tokens=False) input_pair = tokenizer_p.encode(words, boxes=boxes, add_special_tokens=False) # Generate output output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple) output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple) self.assertEqual(output_p, output_r) # Generate pair output output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple, input_pair) output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple, input_pair) self.assertEqual(output_p, output_r) def test_special_tokens_mask_input_pairs(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes() encoded_sequence = tokenizer.encode(words, boxes=boxes, add_special_tokens=False) encoded_sequence_dict = tokenizer.encode_plus( words, boxes=boxes, add_special_tokens=True, return_special_tokens_mask=True, # add_prefix_space=False, ) encoded_sequence_w_special = encoded_sequence_dict["input_ids"] special_tokens_mask = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special)) filtered_sequence = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(encoded_sequence_w_special) ] filtered_sequence = [x for x in filtered_sequence if x is not None] self.assertEqual(encoded_sequence, filtered_sequence) def test_special_tokens_mask(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes() # Testing single inputs encoded_sequence = tokenizer.encode(words, boxes=boxes, add_special_tokens=False) encoded_sequence_dict = tokenizer.encode_plus( words, boxes=boxes, add_special_tokens=True, return_special_tokens_mask=True ) encoded_sequence_w_special = encoded_sequence_dict["input_ids"] special_tokens_mask = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special)) filtered_sequence = [x for i, x in enumerate(encoded_sequence_w_special) if not special_tokens_mask[i]] self.assertEqual(encoded_sequence, filtered_sequence) def test_save_and_load_tokenizer(self): # safety check on max_len default value so we are sure the test works tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): self.assertNotEqual(tokenizer.model_max_length, 42) # Now let's start the test tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Isolate this from the other tests because we save additional tokens/etc words, boxes = self.get_words_and_boxes() tmpdirname = tempfile.mkdtemp() before_tokens = tokenizer.encode(words, boxes=boxes, add_special_tokens=False) before_vocab = tokenizer.get_vocab() tokenizer.save_pretrained(tmpdirname) after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) after_tokens = after_tokenizer.encode(words, boxes=boxes, add_special_tokens=False) after_vocab = after_tokenizer.get_vocab() self.assertListEqual(before_tokens, after_tokens) self.assertDictEqual(before_vocab, after_vocab) shutil.rmtree(tmpdirname) def test_right_and_left_padding(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes() sequence = "Sequence" padding_size = 10 # check correct behaviour if no pad_token_id exists and add it eventually self._check_no_pad_token_padding(tokenizer, sequence) padding_idx = tokenizer.pad_token_id # RIGHT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True tokenizer.padding_side = "right" encoded_sequence = tokenizer.encode(words, boxes=boxes) sequence_length = len(encoded_sequence) padded_sequence = tokenizer.encode( words, boxes=boxes, max_length=sequence_length + padding_size, padding="max_length" ) padded_sequence_length = len(padded_sequence) assert sequence_length + padding_size == padded_sequence_length assert encoded_sequence + [padding_idx] * padding_size == padded_sequence # LEFT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True tokenizer.padding_side = "left" encoded_sequence = tokenizer.encode(words, boxes=boxes) sequence_length = len(encoded_sequence) padded_sequence = tokenizer.encode( words, boxes=boxes, max_length=sequence_length + padding_size, padding="max_length" ) padded_sequence_length = len(padded_sequence) assert sequence_length + padding_size == padded_sequence_length assert [padding_idx] * padding_size + encoded_sequence == padded_sequence # RIGHT & LEFT PADDING - Check that nothing is done for 'longest' and 'no_padding' encoded_sequence = tokenizer.encode(words, boxes=boxes) sequence_length = len(encoded_sequence) tokenizer.padding_side = "right" padded_sequence_right = tokenizer.encode(words, boxes=boxes, padding=True) padded_sequence_right_length = len(padded_sequence_right) assert sequence_length == padded_sequence_right_length assert encoded_sequence == padded_sequence_right tokenizer.padding_side = "left" padded_sequence_left = tokenizer.encode(words, boxes=boxes, padding="longest") padded_sequence_left_length = len(padded_sequence_left) assert sequence_length == padded_sequence_left_length assert encoded_sequence == padded_sequence_left tokenizer.padding_side = "right" padded_sequence_right = tokenizer.encode(words, boxes=boxes) padded_sequence_right_length = len(padded_sequence_right) assert sequence_length == padded_sequence_right_length assert encoded_sequence == padded_sequence_right tokenizer.padding_side = "left" padded_sequence_left = tokenizer.encode(words, boxes=boxes, padding=False) padded_sequence_left_length = len(padded_sequence_left) assert sequence_length == padded_sequence_left_length assert encoded_sequence == padded_sequence_left def test_token_type_ids(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # test 1: single sequence words, boxes = self.get_words_and_boxes() output = tokenizer(words, boxes=boxes, return_token_type_ids=True) # Assert that the token type IDs have the same length as the input IDs self.assertEqual(len(output["token_type_ids"]), len(output["input_ids"])) # Assert that the token type IDs have the same length as the attention mask self.assertEqual(len(output["token_type_ids"]), len(output["attention_mask"])) self.assertIn(0, output["token_type_ids"]) self.assertNotIn(1, output["token_type_ids"]) # test 2: two sequences (question + words) question, words, boxes = self.get_question_words_and_boxes() output = tokenizer(question, words, boxes, return_token_type_ids=True) # Assert that the token type IDs have the same length as the input IDs self.assertEqual(len(output["token_type_ids"]), len(output["input_ids"])) # Assert that the token type IDs have the same length as the attention mask self.assertEqual(len(output["token_type_ids"]), len(output["attention_mask"])) self.assertIn(0, output["token_type_ids"]) self.assertIn(1, output["token_type_ids"]) def test_offsets_mapping(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) text = ["a", "wonderful", "test"] boxes = [[1, 8, 12, 20] for _ in range(len(text))] # No pair tokens_with_offsets = tokenizer_r.encode_plus( text, boxes=boxes, return_special_tokens_mask=True, return_offsets_mapping=True, add_special_tokens=True, ) added_tokens = tokenizer_r.num_special_tokens_to_add(False) offsets = tokens_with_offsets["offset_mapping"] # Assert there is the same number of tokens and offsets self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"])) # Assert there is online added_tokens special_tokens self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens) # Pairs text = "what's his name" pair = ["a", "wonderful", "test"] boxes = [[1, 8, 12, 20] for _ in range(len(pair))] tokens_with_offsets = tokenizer_r.encode_plus( text, pair, boxes=boxes, return_special_tokens_mask=True, return_offsets_mapping=True, add_special_tokens=True, ) added_tokens = tokenizer_r.num_special_tokens_to_add(True) offsets = tokens_with_offsets["offset_mapping"] # Assert there is the same number of tokens and offsets self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"])) # Assert there is online added_tokens special_tokens self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens) @require_torch @require_detectron2 @slow def test_torch_encode_plus_sent_to_model(self): import torch from transformers import MODEL_MAPPING, TOKENIZER_MAPPING MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(MODEL_MAPPING, TOKENIZER_MAPPING) tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING: return config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__] config = config_class() if config.is_encoder_decoder or config.pad_token_id is None: return model = model_class(config) # Make sure the model contains at least the full vocabulary size in its embedding matrix is_using_common_embeddings = hasattr(model.get_input_embeddings(), "weight") assert ( (model.get_input_embeddings().weight.shape[0] >= len(tokenizer)) if is_using_common_embeddings else True ) # Build sequence words, boxes = self.get_words_and_boxes() encoded_sequence = tokenizer.encode_plus(words, boxes=boxes, return_tensors="pt") batch_encoded_sequence = tokenizer.batch_encode_plus( [words, words], boxes=[boxes, boxes], return_tensors="pt" ) # We add dummy image keys (as LayoutLMv2 actually also requires a feature extractor # to prepare the image input) encoded_sequence["image"] = torch.randn(1, 3, 224, 224) batch_encoded_sequence["image"] = torch.randn(2, 3, 224, 224) # This should not fail with torch.no_grad(): # saves some time model(**encoded_sequence) model(**batch_encoded_sequence) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: return if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() words, boxes = self.get_words_and_boxes() ids = tokenizer.encode(words, boxes=boxes, add_special_tokens=False) rust_ids = rust_tokenizer.encode(words, boxes=boxes, add_special_tokens=False) self.assertListEqual(ids, rust_ids) ids = tokenizer.encode(words, boxes=boxes, add_special_tokens=True) rust_ids = rust_tokenizer.encode(words, boxes=boxes, add_special_tokens=True) self.assertListEqual(ids, rust_ids) def test_tokenization_python_rust_equals(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) words, boxes = self.get_words_and_boxes() # Ensure basic input match input_p = tokenizer_p.encode_plus(words, boxes=boxes) input_r = tokenizer_r.encode_plus(words, boxes=boxes) for key in filter( lambda x: x in ["input_ids", "token_type_ids", "attention_mask", "bbox"], input_p.keys() ): self.assertSequenceEqual(input_p[key], input_r[key]) input_pairs_p = tokenizer_p.encode_plus(words, boxes=boxes) input_pairs_r = tokenizer_r.encode_plus(words, boxes=boxes) for key in filter( lambda x: x in ["input_ids", "token_type_ids", "attention_mask", "bbox"], input_p.keys() ): self.assertSequenceEqual(input_pairs_p[key], input_pairs_r[key]) words = ["hello" for _ in range(1000)] boxes = [[1000, 1000, 1000, 1000] for _ in range(1000)] # Ensure truncation match input_p = tokenizer_p.encode_plus(words, boxes=boxes, max_length=512, truncation=True) input_r = tokenizer_r.encode_plus(words, boxes=boxes, max_length=512, truncation=True) for key in filter( lambda x: x in ["input_ids", "token_type_ids", "attention_mask", "bbox"], input_p.keys() ): self.assertSequenceEqual(input_p[key], input_r[key]) # Ensure truncation with stride match input_p = tokenizer_p.encode_plus( words, boxes=boxes, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True ) input_r = tokenizer_r.encode_plus( words, boxes=boxes, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True ) for key in filter( lambda x: x in ["input_ids", "token_type_ids", "attention_mask", "bbox"], input_p.keys() ): self.assertSequenceEqual(input_p[key], input_r[key][0]) def test_embeded_special_tokens(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) words, boxes = self.get_words_and_boxes() tokens_r = tokenizer_r.encode_plus( words, boxes=boxes, add_special_tokens=True, ) tokens_p = tokenizer_p.encode_plus( words, boxes=boxes, add_special_tokens=True, ) for key in tokens_p.keys(): self.assertEqual(tokens_r[key], tokens_p[key]) if "token_type_ids" in tokens_r: self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"])) tokens_r = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"]) tokens_p = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"]) self.assertSequenceEqual(tokens_r, tokens_p) def test_compare_add_special_tokens(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) simple_num_special_tokens_to_add = tokenizer_r.num_special_tokens_to_add(pair=False) words, boxes = self.get_words_and_boxes() # tokenize() no_special_tokens = tokenizer_r.tokenize(" ".join(words), add_special_tokens=False) with_special_tokens = tokenizer_r.tokenize(" ".join(words), add_special_tokens=True) self.assertEqual(len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add) # encode() no_special_tokens = tokenizer_r.encode(words, boxes=boxes, add_special_tokens=False) with_special_tokens = tokenizer_r.encode(words, boxes=boxes, add_special_tokens=True) self.assertEqual(len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add) # encode_plus() no_special_tokens = tokenizer_r.encode_plus(words, boxes=boxes, add_special_tokens=False) with_special_tokens = tokenizer_r.encode_plus(words, boxes=boxes, add_special_tokens=True) for key in no_special_tokens.keys(): self.assertEqual( len(no_special_tokens[key]), len(with_special_tokens[key]) - simple_num_special_tokens_to_add, ) # # batch_encode_plus words, boxes = self.get_words_and_boxes_batch() no_special_tokens = tokenizer_r.batch_encode_plus(words, boxes=boxes, add_special_tokens=False) with_special_tokens = tokenizer_r.batch_encode_plus(words, boxes=boxes, add_special_tokens=True) for key in no_special_tokens.keys(): for i_no, i_with in zip(no_special_tokens[key], with_special_tokens[key]): self.assertEqual(len(i_no), len(i_with) - simple_num_special_tokens_to_add) @slow def test_layoutlmv2_truncation_integration_test(self): words, boxes = self.get_words_and_boxes() tokenizer = LayoutLMv2Tokenizer.from_pretrained("microsoft/layoutlmv2-base-uncased", model_max_length=512) for i in range(12, 512): new_encoded_inputs = tokenizer.encode(words, boxes=boxes, max_length=i, truncation=True) # Ensure that the input IDs are less than the max length defined. self.assertLessEqual(len(new_encoded_inputs), i) tokenizer.model_max_length = 20 new_encoded_inputs = tokenizer.encode(words, boxes=boxes, truncation=True) dropped_encoded_inputs = tokenizer.encode(words, boxes=boxes, truncation=True) # Ensure that the input IDs are still truncated when no max_length is specified self.assertListEqual(new_encoded_inputs, dropped_encoded_inputs) self.assertLessEqual(len(new_encoded_inputs), 20) @is_pt_tf_cross_test def test_batch_encode_plus_tensors(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes_batch() # A Tensor cannot be build by sequences which are not the same size self.assertRaises(ValueError, tokenizer.batch_encode_plus, words, boxes=boxes, return_tensors="pt") self.assertRaises(ValueError, tokenizer.batch_encode_plus, words, boxes=boxes, return_tensors="tf") if tokenizer.pad_token_id is None: self.assertRaises( ValueError, tokenizer.batch_encode_plus, words, boxes=boxes, padding=True, return_tensors="pt", ) self.assertRaises( ValueError, tokenizer.batch_encode_plus, words, boxes=boxes, padding="longest", return_tensors="tf", ) else: pytorch_tensor = tokenizer.batch_encode_plus(words, boxes=boxes, padding=True, return_tensors="pt") tensorflow_tensor = tokenizer.batch_encode_plus( words, boxes=boxes, padding="longest", return_tensors="tf" ) encoded_sequences = tokenizer.batch_encode_plus(words, boxes=boxes, padding=True) for key in encoded_sequences.keys(): pytorch_value = pytorch_tensor[key].tolist() tensorflow_value = tensorflow_tensor[key].numpy().tolist() encoded_value = encoded_sequences[key] self.assertEqual(pytorch_value, tensorflow_value, encoded_value) def test_sequence_ids(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: if not tokenizer.is_fast: continue with self.subTest(f"{tokenizer.__class__.__name__}"): seq_0 = "Test this method." seq_1 = ["With", "these", "inputs."] boxes = [[1000, 1000, 1000, 1000] for _ in range(len(seq_1))] # We want to have sequence 0 and sequence 1 are tagged # respectively with 0 and 1 token_ids # (regardless of whether the model use token type ids) # We use this assumption in the QA pipeline among other place output = tokenizer(seq_0.split(), boxes=boxes) self.assertIn(0, output.sequence_ids()) output = tokenizer(seq_0, seq_1, boxes=boxes) self.assertIn(0, output.sequence_ids()) self.assertIn(1, output.sequence_ids()) if tokenizer.num_special_tokens_to_add(pair=True): self.assertIn(None, output.sequence_ids()) def test_special_tokens_initialization(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): added_tokens = [AddedToken("<special>", lstrip=True)] tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) words = "Hey this is a <special> token".split() boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))] r_output = tokenizer_r.encode(words, boxes=boxes) special_token_id = tokenizer_r.encode( ["<special>"], boxes=[1000, 1000, 1000, 1000], add_special_tokens=False )[0] self.assertTrue(special_token_id in r_output) if self.test_slow_tokenizer: tokenizer_cr = self.rust_tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs, from_slow=True ) tokenizer_p = self.tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) words = "Hey this is a <special> token".split() boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))] p_output = tokenizer_p.encode(words, boxes=boxes) cr_output = tokenizer_cr.encode(words, boxes=boxes) self.assertEqual(p_output, r_output) self.assertEqual(cr_output, r_output) self.assertTrue(special_token_id in p_output) self.assertTrue(special_token_id in cr_output) def test_training_new_tokenizer(self): # This feature only exists for fast tokenizers if not self.test_rust_tokenizer: return tokenizer = self.get_rust_tokenizer() new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100) # Test we can use the new tokenizer with something not seen during training text = [["this", "is", "the"], ["how", "are", "you"]] boxes = [[[1, 2, 3, 4], [5, 6, 7, 8], [1, 3, 4, 8]], [[5, 6, 7, 8], [4, 5, 6, 7], [3, 9, 2, 7]]] inputs = new_tokenizer(text, boxes=boxes) self.assertEqual(len(inputs["input_ids"]), 2) decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True) expected_result = "this is the" if tokenizer.backend_tokenizer.normalizer is not None: expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result) self.assertEqual(expected_result, decoded_input) # We check that the parameters of the tokenizer remained the same # Check we have the same number of added_tokens for both pair and non-pair inputs. self.assertEqual(tokenizer.num_special_tokens_to_add(False), new_tokenizer.num_special_tokens_to_add(False)) self.assertEqual(tokenizer.num_special_tokens_to_add(True), new_tokenizer.num_special_tokens_to_add(True)) # Check we have the correct max_length for both pair and non-pair inputs. self.assertEqual(tokenizer.max_len_single_sentence, new_tokenizer.max_len_single_sentence) self.assertEqual(tokenizer.max_len_sentences_pair, new_tokenizer.max_len_sentences_pair) # Assert the set of special tokens match as we didn't ask to change them self.assertSequenceEqual( tokenizer.all_special_tokens_extended, new_tokenizer.all_special_tokens_extended, ) self.assertDictEqual(tokenizer.special_tokens_map, new_tokenizer.special_tokens_map) def test_training_new_tokenizer_with_special_tokens_change(self): # This feature only exists for fast tokenizers if not self.test_rust_tokenizer: return tokenizer = self.get_rust_tokenizer() # Test with a special tokens map class_signature = inspect.signature(tokenizer.__class__) if "cls_token" in class_signature.parameters: new_tokenizer = tokenizer.train_new_from_iterator( SMALL_TRAINING_CORPUS, 100, special_tokens_map={tokenizer.cls_token: "<cls>"} ) cls_id = new_tokenizer.get_vocab()["<cls>"] self.assertEqual(new_tokenizer.cls_token, "<cls>") self.assertEqual(new_tokenizer.cls_token_id, cls_id) # Create a new mapping from the special tokens defined in the original tokenizer special_tokens_list = SpecialTokensMixin.SPECIAL_TOKENS_ATTRIBUTES.copy() special_tokens_list.remove("additional_special_tokens") special_tokens_map = {} for token in special_tokens_list: # Get the private one to avoid unnecessary warnings. if getattr(tokenizer, f"_{token}") is not None: special_token = getattr(tokenizer, token) special_tokens_map[special_token] = f"{special_token}a" # Train new tokenizer new_tokenizer = tokenizer.train_new_from_iterator( SMALL_TRAINING_CORPUS, 100, special_tokens_map=special_tokens_map ) # Check the changes for token in special_tokens_list: # Get the private one to avoid unnecessary warnings. if getattr(tokenizer, f"_{token}") is None: continue special_token = getattr(tokenizer, token) if special_token in special_tokens_map: new_special_token = getattr(new_tokenizer, token) self.assertEqual(special_tokens_map[special_token], new_special_token) new_id = new_tokenizer.get_vocab()[new_special_token] self.assertEqual(getattr(new_tokenizer, f"{token}_id"), new_id) # Check if the AddedToken / string format has been kept for special_token in tokenizer.all_special_tokens_extended: if isinstance(special_token, AddedToken) and special_token.content not in special_tokens_map: # The special token must appear identically in the list of the new tokenizer. self.assertTrue( special_token in new_tokenizer.all_special_tokens_extended, f"'{special_token}' should be in {new_tokenizer.all_special_tokens_extended}", ) elif isinstance(special_token, AddedToken): # The special token must appear in the list of the new tokenizer as an object of type AddedToken with # the same parameters as the old AddedToken except the content that the user has requested to change. special_token_str = special_token.content new_special_token_str = special_tokens_map[special_token_str] find = False for candidate in new_tokenizer.all_special_tokens_extended: if ( isinstance(candidate, AddedToken) and candidate.content == new_special_token_str and candidate.lstrip == special_token.lstrip and candidate.rstrip == special_token.rstrip and candidate.normalized == special_token.normalized and candidate.single_word == special_token.single_word ): find = True break self.assertTrue( find, f"'{new_special_token_str}' doesn't appear in the list " f"'{new_tokenizer.all_special_tokens_extended}' as an AddedToken with the same parameters as " f"'{special_token}' in the list {tokenizer.all_special_tokens_extended}", ) elif special_token not in special_tokens_map: # The special token must appear identically in the list of the new tokenizer. self.assertTrue( special_token in new_tokenizer.all_special_tokens_extended, f"'{special_token}' should be in {new_tokenizer.all_special_tokens_extended}", ) else: # The special token must appear in the list of the new tokenizer as an object of type string. self.assertTrue(special_tokens_map[special_token] in new_tokenizer.all_special_tokens_extended) # Test we can use the new tokenizer with something not seen during training words = [["this", "is"], ["hello", "🤗"]] boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[1, 2, 3, 4], [5, 6, 7, 8]]] inputs = new_tokenizer(words, boxes=boxes) self.assertEqual(len(inputs["input_ids"]), 2) decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True) expected_result = "this is" if tokenizer.backend_tokenizer.normalizer is not None: expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result) self.assertEqual(expected_result, decoded_input) def test_prepare_for_model(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: # only test prepare_for_model for the slow tokenizer if tokenizer.__class__.__name__ == "LayoutLMv2TokenizerFast": continue with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes() prepared_input_dict = tokenizer.prepare_for_model(words, boxes=boxes, add_special_tokens=True) input_dict = tokenizer.encode_plus(words, boxes=boxes, add_special_tokens=True) self.assertEqual(input_dict, prepared_input_dict) def test_padding_different_model_input_name(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id) pad_token_id = tokenizer_p.pad_token_id words, boxes = self.get_words_and_boxes_batch() input_r = tokenizer_r.batch_encode_plus(words, boxes=boxes) input_p = tokenizer_r.batch_encode_plus(words, boxes=boxes) # rename encoded batch to "inputs" input_r["inputs"] = input_r[tokenizer_r.model_input_names[0]] del input_r[tokenizer_r.model_input_names[0]] input_p["inputs"] = input_p[tokenizer_p.model_input_names[0]] del input_p[tokenizer_p.model_input_names[0]] # Renaming `input_ids` to `inputs` tokenizer_r.model_input_names = ["inputs"] + tokenizer_r.model_input_names[1:] tokenizer_p.model_input_names = ["inputs"] + tokenizer_p.model_input_names[1:] input_r = tokenizer_r.pad(input_r, padding="longest") input_p = tokenizer_r.pad(input_p, padding="longest") max_length = len(input_p["inputs"][0]) self.assert_batch_padded_input_match( input_r, input_p, max_length, pad_token_id, model_main_input_name="inputs" ) def test_batch_encode_dynamic_overflowing(self): """ When calling batch_encode with multiple sequences, it can return different number of overflowing encoding for each sequence: [ Sequence 1: [Encoding 1, Encoding 2], Sequence 2: [Encoding 1], Sequence 3: [Encoding 1, Encoding 2, ... Encoding N] ] This needs to be padded so that it can represented as a tensor """ for tokenizer, pretrained_name, kwargs in self.tokenizers_list: tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})"): if is_torch_available(): returned_tensor = "pt" elif is_tf_available(): returned_tensor = "tf" else: returned_tensor = "jax" # Single example words, boxes = self.get_words_and_boxes() tokens = tokenizer.encode_plus( words, boxes=boxes, max_length=6, padding=True, truncation=True, return_tensors=returned_tensor, return_overflowing_tokens=True, ) for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()): if key != "bbox": self.assertEqual(len(tokens[key].shape), 2) else: self.assertEqual(len(tokens[key].shape), 3) # Batch of examples # For these 2 examples, 3 training examples will be created words, boxes = self.get_words_and_boxes_batch() tokens = tokenizer.batch_encode_plus( words, boxes=boxes, max_length=6, padding=True, truncation="only_first", return_tensors=returned_tensor, return_overflowing_tokens=True, ) for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()): if key != "bbox": self.assertEqual(len(tokens[key].shape), 2) self.assertEqual(tokens[key].shape[-1], 6) else: self.assertEqual(len(tokens[key].shape), 3) self.assertEqual(tokens[key].shape[-1], 4) @unittest.skip("TO DO: overwrite this very extensive test.") def test_alignement_methods(self): pass def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5): toks = [(i, tokenizer.decode([i], clean_up_tokenization_spaces=False)) for i in range(len(tokenizer))] toks = list(filter(lambda t: re.match(r"^[ a-zA-Z]+$", t[1]), toks)) toks = list( filter( lambda t: [t[0]] == tokenizer.encode(t[1].split(" "), boxes=len(t[1]) * [[1, 1, 1, 1]], add_special_tokens=False), toks, ) ) if max_length is not None and len(toks) > max_length: toks = toks[:max_length] if min_length is not None and len(toks) < min_length and len(toks) > 0: while len(toks) < min_length: toks = toks + toks # toks_str = [t[1] for t in toks] toks_ids = [t[0] for t in toks] # Ensure consistency output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False) if " " not in output_txt and len(toks_ids) > 1: output_txt = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=False) + " " + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=False) ) if with_prefix_space: output_txt = " " + output_txt words = output_txt.split(" ") boxes = [[i, i, i, i] for i in range(len(words))] output_ids = tokenizer.encode(words, boxes=boxes, add_special_tokens=False) return words, boxes, output_ids # @unittest.skip("LayoutLMv2 tokenizer requires boxes besides sequences.") def test_maximum_encoding_length_pair_input(self): tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Build a sequence from our model's vocabulary stride = 2 seq_0, boxes_0, ids = self.get_clean_sequence(tokenizer, max_length=20) question_0 = " ".join(map(str, seq_0)) if len(ids) <= 2 + stride: seq_0 = (seq_0 + " ") * (2 + stride) ids = None seq0_tokens = tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False) self.assertGreater(len(seq0_tokens["input_ids"]), 2 + stride) question_1 = "This is another sentence to be encoded." seq_1 = ["what", "a", "weird", "test", "weirdly", "weird"] boxes_1 = [[i, i, i, i] for i in range(len(seq_1))] seq1_tokens = tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False) if abs(len(seq0_tokens["input_ids"]) - len(seq1_tokens["input_ids"])) <= 2: seq1_tokens_input_ids = seq1_tokens["input_ids"] + seq1_tokens["input_ids"] seq_1 = tokenizer.decode(seq1_tokens_input_ids, clean_up_tokenization_spaces=False) seq_1 = seq_1.split(" ") boxes_1 = [[i, i, i, i] for i in range(len(seq_1))] seq1_tokens = tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False) self.assertGreater(len(seq1_tokens["input_ids"]), 2 + stride) smallest = ( seq1_tokens["input_ids"] if len(seq0_tokens["input_ids"]) > len(seq1_tokens["input_ids"]) else seq0_tokens["input_ids"] ) # We are not using the special tokens - a bit too hard to test all the tokenizers with this # TODO try this again later sequence = tokenizer( question_0, seq_1, boxes=boxes_1, add_special_tokens=False ) # , add_prefix_space=False) # Test with max model input length model_max_length = tokenizer.model_max_length self.assertEqual(model_max_length, 100) seq_2 = seq_0 * model_max_length question_2 = " ".join(map(str, seq_2)) boxes_2 = boxes_0 * model_max_length self.assertGreater(len(seq_2), model_max_length) sequence1 = tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False) total_length1 = len(sequence1["input_ids"]) sequence2 = tokenizer(question_2, seq_1, boxes=boxes_1, add_special_tokens=False) total_length2 = len(sequence2["input_ids"]) self.assertLess(total_length1, model_max_length, "Issue with the testing sequence, please update it.") self.assertGreater( total_length2, model_max_length, "Issue with the testing sequence, please update it." ) # Simple padding_strategies = ( [False, True, "longest"] if tokenizer.pad_token and tokenizer.pad_token_id >= 0 else [False] ) for padding_state in padding_strategies: with self.subTest(f"{tokenizer.__class__.__name__} Padding: {padding_state}"): for truncation_state in [True, "longest_first", "only_first"]: with self.subTest(f"{tokenizer.__class__.__name__} Truncation: {truncation_state}"): output = tokenizer( question_2, seq_1, boxes=boxes_1, padding=padding_state, truncation=truncation_state, ) self.assertEqual(len(output["input_ids"]), model_max_length) self.assertEqual(len(output["bbox"]), model_max_length) output = tokenizer( [question_2], [seq_1], boxes=[boxes_1], padding=padding_state, truncation=truncation_state, ) self.assertEqual(len(output["input_ids"][0]), model_max_length) self.assertEqual(len(output["bbox"][0]), model_max_length) # Simple output = tokenizer( question_1, seq_2, boxes=boxes_2, padding=padding_state, truncation="only_second" ) self.assertEqual(len(output["input_ids"]), model_max_length) self.assertEqual(len(output["bbox"]), model_max_length) output = tokenizer( [question_1], [seq_2], boxes=[boxes_2], padding=padding_state, truncation="only_second" ) self.assertEqual(len(output["input_ids"][0]), model_max_length) self.assertEqual(len(output["bbox"][0]), model_max_length) # Simple with no truncation # Reset warnings tokenizer.deprecation_warnings = {} with self.assertLogs("transformers", level="WARNING") as cm: output = tokenizer( question_1, seq_2, boxes=boxes_2, padding=padding_state, truncation=False ) self.assertNotEqual(len(output["input_ids"]), model_max_length) self.assertNotEqual(len(output["bbox"]), model_max_length) self.assertEqual(len(cm.records), 1) self.assertTrue( cm.records[0].message.startswith( "Token indices sequence length is longer than the specified maximum sequence length" " for this model" ) ) tokenizer.deprecation_warnings = {} with self.assertLogs("transformers", level="WARNING") as cm: output = tokenizer( [question_1], [seq_2], boxes=[boxes_2], padding=padding_state, truncation=False ) self.assertNotEqual(len(output["input_ids"][0]), model_max_length) self.assertNotEqual(len(output["bbox"][0]), model_max_length) self.assertEqual(len(cm.records), 1) self.assertTrue( cm.records[0].message.startswith( "Token indices sequence length is longer than the specified maximum sequence length" " for this model" ) ) # Check the order of Sequence of input ids, overflowing tokens and bbox sequence with truncation truncated_first_sequence = ( tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)["input_ids"][:-2] + tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)["input_ids"] ) truncated_second_sequence = ( tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)["input_ids"] + tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)["input_ids"][:-2] ) truncated_longest_sequence = ( truncated_first_sequence if len(seq0_tokens) > len(seq1_tokens) else truncated_second_sequence ) overflow_first_sequence = ( tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)["input_ids"][-(2 + stride) :] + tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)["input_ids"] ) overflow_second_sequence = ( tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)["input_ids"] + tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)["input_ids"][-(2 + stride) :] ) overflow_longest_sequence = ( overflow_first_sequence if len(seq0_tokens) > len(seq1_tokens) else overflow_second_sequence ) bbox_first = [[0, 0, 0, 0]] * (len(seq_0) - 2) bbox_first_sequence = bbox_first + tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)["bbox"] overflowing_token_bbox_first_sequence_slow = [[0, 0, 0, 0]] * (2 + stride) overflowing_token_bbox_first_sequence_fast = [[0, 0, 0, 0]] * (2 + stride) + tokenizer( seq_1, boxes=boxes_1, add_special_tokens=False )["bbox"] bbox_second = [[0, 0, 0, 0]] * len(seq_0) bbox_second_sequence = ( bbox_second + tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)["bbox"][:-2] ) overflowing_token_bbox_second_sequence_slow = tokenizer( seq_1, boxes=boxes_1, add_special_tokens=False )["bbox"][-(2 + stride) :] overflowing_token_bbox_second_sequence_fast = [[0, 0, 0, 0]] * len(seq_0) + tokenizer( seq_1, boxes=boxes_1, add_special_tokens=False )["bbox"][-(2 + stride) :] bbox_longest_sequence = ( bbox_first_sequence if len(seq0_tokens) > len(seq1_tokens) else bbox_second_sequence ) overflowing_token_bbox_longest_sequence_fast = ( overflowing_token_bbox_first_sequence_fast if len(seq0_tokens) > len(seq1_tokens) else overflowing_token_bbox_second_sequence_fast ) # Overflowing tokens are handled quite differently in slow and fast tokenizers if isinstance(tokenizer, LayoutLMv2TokenizerFast): information = tokenizer( question_0, seq_1, boxes=boxes_1, max_length=len(sequence["input_ids"]) - 2, add_special_tokens=False, stride=stride, truncation="longest_first", return_overflowing_tokens=True, # add_prefix_space=False, ) truncated_sequence = information["input_ids"][0] overflowing_tokens = information["input_ids"][1] bbox = information["bbox"][0] overflowing_bbox = information["bbox"][1] self.assertEqual(len(information["input_ids"]), 2) self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2) self.assertEqual(truncated_sequence, truncated_longest_sequence) self.assertEqual(len(overflowing_tokens), 2 + stride + len(smallest)) self.assertEqual(overflowing_tokens, overflow_longest_sequence) self.assertEqual(bbox, bbox_longest_sequence) self.assertEqual(len(overflowing_bbox), 2 + stride + len(smallest)) self.assertEqual(overflowing_bbox, overflowing_token_bbox_longest_sequence_fast) else: # No overflowing tokens when using 'longest' in python tokenizers with self.assertRaises(ValueError) as context: information = tokenizer( question_0, seq_1, boxes=boxes_1, max_length=len(sequence["input_ids"]) - 2, add_special_tokens=False, stride=stride, truncation="longest_first", return_overflowing_tokens=True, # add_prefix_space=False, ) self.assertTrue( context.exception.args[0].startswith( "Not possible to return overflowing tokens for pair of sequences with the " "`longest_first`. Please select another truncation strategy than `longest_first`, " "for instance `only_second` or `only_first`." ) ) # Overflowing tokens are handled quite differently in slow and fast tokenizers if isinstance(tokenizer, LayoutLMv2TokenizerFast): information = tokenizer( question_0, seq_1, boxes=boxes_1, max_length=len(sequence["input_ids"]) - 2, add_special_tokens=False, stride=stride, truncation=True, return_overflowing_tokens=True, # add_prefix_space=False, ) truncated_sequence = information["input_ids"][0] overflowing_tokens = information["input_ids"][1] bbox = information["bbox"][0] overflowing_bbox = information["bbox"][1] self.assertEqual(len(information["input_ids"]), 2) self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2) self.assertEqual(truncated_sequence, truncated_longest_sequence) self.assertEqual(len(overflowing_tokens), 2 + stride + len(smallest)) self.assertEqual(overflowing_tokens, overflow_longest_sequence) self.assertEqual(bbox, bbox_longest_sequence) self.assertEqual(overflowing_bbox, overflowing_token_bbox_longest_sequence_fast) else: # No overflowing tokens when using 'longest' in python tokenizers with self.assertRaises(ValueError) as context: information = tokenizer( question_0, seq_1, boxes=boxes_1, max_length=len(sequence["input_ids"]) - 2, add_special_tokens=False, stride=stride, truncation=True, return_overflowing_tokens=True, # add_prefix_space=False, ) self.assertTrue( context.exception.args[0].startswith( "Not possible to return overflowing tokens for pair of sequences with the " "`longest_first`. Please select another truncation strategy than `longest_first`, " "for instance `only_second` or `only_first`." ) ) information_first_truncated = tokenizer( question_0, seq_1, boxes=boxes_1, max_length=len(sequence["input_ids"]) - 2, add_special_tokens=False, stride=stride, truncation="only_first", return_overflowing_tokens=True, # add_prefix_space=False, ) # Overflowing tokens are handled quite differently in slow and fast tokenizers if isinstance(tokenizer, LayoutLMv2TokenizerFast): truncated_sequence = information_first_truncated["input_ids"][0] overflowing_tokens = information_first_truncated["input_ids"][1] bbox = information_first_truncated["bbox"][0] overflowing_bbox = information_first_truncated["bbox"][1] self.assertEqual(len(information_first_truncated["input_ids"]), 2) self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2) self.assertEqual(truncated_sequence, truncated_first_sequence) self.assertEqual(len(overflowing_tokens), 2 + stride + len(seq1_tokens["input_ids"])) self.assertEqual(overflowing_tokens, overflow_first_sequence) self.assertEqual(bbox, bbox_first_sequence) self.assertEqual(overflowing_bbox, overflowing_token_bbox_first_sequence_fast) else: truncated_sequence = information_first_truncated["input_ids"] overflowing_tokens = information_first_truncated["overflowing_tokens"] overflowing_bbox = information_first_truncated["overflowing_token_boxes"] bbox = information_first_truncated["bbox"] self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2) self.assertEqual(truncated_sequence, truncated_first_sequence) self.assertEqual(len(overflowing_tokens), 2 + stride) self.assertEqual(overflowing_tokens, seq0_tokens["input_ids"][-(2 + stride) :]) self.assertEqual(bbox, bbox_first_sequence) self.assertEqual(overflowing_bbox, overflowing_token_bbox_first_sequence_slow) information_second_truncated = tokenizer( question_0, seq_1, boxes=boxes_1, max_length=len(sequence["input_ids"]) - 2, add_special_tokens=False, stride=stride, truncation="only_second", return_overflowing_tokens=True, # add_prefix_space=False, ) # Overflowing tokens are handled quite differently in slow and fast tokenizers if isinstance(tokenizer, LayoutLMv2TokenizerFast): truncated_sequence = information_second_truncated["input_ids"][0] overflowing_tokens = information_second_truncated["input_ids"][1] bbox = information_second_truncated["bbox"][0] overflowing_bbox = information_second_truncated["bbox"][1] self.assertEqual(len(information_second_truncated["input_ids"]), 2) self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2) self.assertEqual(truncated_sequence, truncated_second_sequence) self.assertEqual(len(overflowing_tokens), 2 + stride + len(seq0_tokens["input_ids"])) self.assertEqual(overflowing_tokens, overflow_second_sequence) self.assertEqual(bbox, bbox_second_sequence) self.assertEqual(overflowing_bbox, overflowing_token_bbox_second_sequence_fast) else: truncated_sequence = information_second_truncated["input_ids"] overflowing_tokens = information_second_truncated["overflowing_tokens"] bbox = information_second_truncated["bbox"] overflowing_bbox = information_second_truncated["overflowing_token_boxes"] self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2) self.assertEqual(truncated_sequence, truncated_second_sequence) self.assertEqual(len(overflowing_tokens), 2 + stride) self.assertEqual(overflowing_tokens, seq1_tokens["input_ids"][-(2 + stride) :]) self.assertEqual(bbox, bbox_second_sequence) self.assertEqual(overflowing_bbox, overflowing_token_bbox_second_sequence_slow) # @unittest.skip("LayoutLMv2 tokenizer requires boxes besides sequences.") def test_maximum_encoding_length_single_input(self): tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): seq_0, boxes_0, ids = self.get_clean_sequence(tokenizer, max_length=20) sequence = tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False) total_length = len(sequence["input_ids"]) self.assertGreater( total_length, 4, "Issue with the testing sequence, please update it, it's too short" ) # Test with max model input length model_max_length = tokenizer.model_max_length self.assertEqual(model_max_length, 100) seq_1 = seq_0 * model_max_length boxes_1 = boxes_0 * model_max_length sequence1 = tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False) total_length1 = len(sequence1["input_ids"]) self.assertGreater( total_length1, model_max_length, "Issue with the testing sequence, please update it, it's too short", ) # Simple padding_strategies = ( [False, True, "longest"] if tokenizer.pad_token and tokenizer.pad_token_id >= 0 else [False] ) for padding_state in padding_strategies: with self.subTest(f"Padding: {padding_state}"): for truncation_state in [True, "longest_first", "only_first"]: with self.subTest(f"Truncation: {truncation_state}"): output = tokenizer( seq_1, boxes=boxes_1, padding=padding_state, truncation=truncation_state, ) self.assertEqual(len(output["input_ids"]), model_max_length) self.assertEqual(len(output["bbox"]), model_max_length) output = tokenizer( [seq_1], boxes=[boxes_1], padding=padding_state, truncation=truncation_state, ) self.assertEqual(len(output["input_ids"][0]), model_max_length) self.assertEqual(len(output["bbox"][0]), model_max_length) # Simple with no truncation # Reset warnings tokenizer.deprecation_warnings = {} with self.assertLogs("transformers", level="WARNING") as cm: output = tokenizer(seq_1, boxes=boxes_1, padding=padding_state, truncation=False) self.assertNotEqual(len(output["input_ids"]), model_max_length) self.assertNotEqual(len(output["bbox"]), model_max_length) self.assertEqual(len(cm.records), 1) self.assertTrue( cm.records[0].message.startswith( "Token indices sequence length is longer than the specified maximum sequence length" " for this model" ) ) tokenizer.deprecation_warnings = {} with self.assertLogs("transformers", level="WARNING") as cm: output = tokenizer([seq_1], boxes=[boxes_1], padding=padding_state, truncation=False) self.assertNotEqual(len(output["input_ids"][0]), model_max_length) self.assertNotEqual(len(output["bbox"][0]), model_max_length) self.assertEqual(len(cm.records), 1) self.assertTrue( cm.records[0].message.startswith( "Token indices sequence length is longer than the specified maximum sequence length" " for this model" ) ) # Check the order of Sequence of input ids, overflowing tokens and bbox sequence with truncation stride = 2 information = tokenizer( seq_0, boxes=boxes_0, max_length=total_length - 2, add_special_tokens=False, stride=stride, truncation=True, return_overflowing_tokens=True, # add_prefix_space=False, ) # Overflowing tokens are handled quite differently in slow and fast tokenizers if isinstance(tokenizer, LayoutLMv2TokenizerFast): truncated_sequence = information["input_ids"][0] overflowing_tokens = information["input_ids"][1] bbox = information["bbox"][0] overflowing_bbox = information["bbox"][1] self.assertEqual(len(information["input_ids"]), 2) self.assertEqual(len(truncated_sequence), total_length - 2) self.assertEqual(truncated_sequence, sequence["input_ids"][:-2]) self.assertEqual(len(overflowing_tokens), 2 + stride) self.assertEqual(overflowing_tokens, sequence["input_ids"][-(2 + stride) :]) self.assertEqual(bbox, sequence["bbox"][:-2]) self.assertEqual(overflowing_bbox, sequence["bbox"][-(2 + stride) :]) else: truncated_sequence = information["input_ids"] overflowing_tokens = information["overflowing_tokens"] bbox = information["bbox"] overflowing_bbox = information["overflowing_token_boxes"] self.assertEqual(len(truncated_sequence), total_length - 2) self.assertEqual(truncated_sequence, sequence["input_ids"][:-2]) self.assertEqual(len(overflowing_tokens), 2 + stride) self.assertEqual(overflowing_tokens, sequence["input_ids"][-(2 + stride) :]) self.assertEqual(bbox, sequence["bbox"][:-2]) self.assertEqual(overflowing_bbox, sequence["bbox"][-(2 + stride) :]) @unittest.skip("LayoutLMv2 tokenizer requires boxes besides sequences.") def test_pretokenized_inputs(self): pass @unittest.skip("LayoutLMv2 tokenizer always expects pretokenized inputs.") def test_compare_pretokenized_inputs(self): pass @unittest.skip("LayoutLMv2 fast tokenizer does not support prepare_for_model") def test_compare_prepare_for_model(self): pass @slow def test_only_label_first_subword(self): words = ["hello", "niels"] boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))] word_labels = [0, 1] # test slow tokenizer tokenizer_p = LayoutLMv2Tokenizer.from_pretrained("microsoft/layoutlmv2-base-uncased") encoding = tokenizer_p(words, boxes=boxes, word_labels=word_labels) self.assertListEqual(encoding.labels, [-100, 0, 1, -100, -100]) tokenizer_p = LayoutLMv2Tokenizer.from_pretrained( "microsoft/layoutlmv2-base-uncased", only_label_first_subword=False ) encoding = tokenizer_p(words, boxes=boxes, word_labels=word_labels) self.assertListEqual(encoding.labels, [-100, 0, 1, 1, -100]) # test fast tokenizer tokenizer_r = LayoutLMv2TokenizerFast.from_pretrained("microsoft/layoutlmv2-base-uncased") encoding = tokenizer_r(words, boxes=boxes, word_labels=word_labels) self.assertListEqual(encoding.labels, [-100, 0, 1, -100, -100]) tokenizer_r = LayoutLMv2Tokenizer.from_pretrained( "microsoft/layoutlmv2-base-uncased", only_label_first_subword=False ) encoding = tokenizer_r(words, boxes=boxes, word_labels=word_labels) self.assertListEqual(encoding.labels, [-100, 0, 1, 1, -100]) @slow def test_layoutlmv2_integration_test(self): tokenizer_p = LayoutLMv2Tokenizer.from_pretrained("microsoft/layoutlmv2-base-uncased") tokenizer_r = LayoutLMv2TokenizerFast.from_pretrained("microsoft/layoutlmv2-base-uncased") # There are 3 cases: # CASE 1: document image classification (training + inference), document image token classification (inference), # in which case only words and normalized bounding boxes are provided to the tokenizer # CASE 2: document image token classification (training), # in which case one also provides word labels to the tokenizer # CASE 3: document image visual question answering (inference), # in which case one also provides a question to the tokenizer # We need to test all 3 cases both on batched and non-batched inputs. # CASE 1: not batched words, boxes = self.get_words_and_boxes() # fmt: off expected_results = {'input_ids': [101, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'bbox': [[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # noqa: E231 # fmt: on encoding_p = tokenizer_p(words, boxes=boxes, padding="max_length", max_length=20) encoding_r = tokenizer_r(words, boxes=boxes, padding="max_length", max_length=20) self.assertDictEqual(dict(encoding_p), expected_results) self.assertDictEqual(dict(encoding_r), expected_results) # CASE 1: batched words, boxes = self.get_words_and_boxes_batch() # fmt: off expected_results = {'input_ids': [[101, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [101, 7592, 2026, 2171, 2003, 3960, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'bbox': [[[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E231 # fmt: on encoding_p = tokenizer_p(words, boxes=boxes, padding="max_length", max_length=20) encoding_r = tokenizer_r(words, boxes=boxes, padding="max_length", max_length=20) self.assertDictEqual(dict(encoding_p), expected_results) self.assertDictEqual(dict(encoding_r), expected_results) # CASE 2: not batched words, boxes = self.get_words_and_boxes() word_labels = [1, 2, 3] # fmt: off expected_results = {'input_ids': [101, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'bbox': [[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'labels': [-100, 1, 2, -100, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], 'attention_mask': [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # noqa: E231 # fmt: on encoding_p = tokenizer_p(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20) encoding_r = tokenizer_r(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20) self.assertDictEqual(dict(encoding_p), expected_results) self.assertDictEqual(dict(encoding_r), expected_results) # CASE 2: batched words, boxes = self.get_words_and_boxes_batch() word_labels = [[1, 2, 3], [2, 46, 17, 22, 3]] # fmt: off expected_results = {'input_ids': [[101, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [101, 7592, 2026, 2171, 2003, 3960, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'bbox': [[[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'labels': [[-100, 1, 2, -100, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], [-100, 2, 46, 17, 22, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E231 # fmt: on encoding_p = tokenizer_p(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20) encoding_r = tokenizer_r(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20) self.assertDictEqual(dict(encoding_p), expected_results) self.assertDictEqual(dict(encoding_r), expected_results) # CASE 3: not batched question, words, boxes = self.get_question_words_and_boxes() # fmt: off expected_results = {'input_ids': [101, 2054, 1005, 1055, 2010, 2171, 1029, 102, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0], 'bbox': [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]} # noqa: E231 # fmt: on encoding_p = tokenizer_p(question, words, boxes, padding="max_length", max_length=20) encoding_r = tokenizer_r(question, words, boxes, padding="max_length", max_length=20) self.assertDictEqual(dict(encoding_p), expected_results) self.assertDictEqual(dict(encoding_r), expected_results) # CASE 3: batched questions, words, boxes = self.get_question_words_and_boxes_batch() # fmt: off expected_results = {'input_ids': [[101, 2054, 1005, 1055, 2010, 2171, 1029, 102, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0], [101, 2129, 2003, 2002, 2170, 1029, 102, 2054, 1037, 21110, 2546, 3806, 2102, 2078, 102, 0, 0, 0, 0, 0]], 'bbox': [[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [336, 42, 353, 57], [34, 42, 66, 69], [34, 42, 66, 69], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]]} # noqa: E231 # fmt: on encoding_p = tokenizer_p(questions, words, boxes, padding="max_length", max_length=20) encoding_r = tokenizer_r(questions, words, boxes, padding="max_length", max_length=20) self.assertDictEqual(dict(encoding_p), expected_results) self.assertDictEqual(dict(encoding_r), expected_results) @unittest.skip("Doesn't support another framework than PyTorch") def test_np_encode_plus_sent_to_model(self): pass
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py
# coding=utf-8 # Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMv2ImageProcessor class LayoutLMv2ImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, apply_ocr=True, ): size = size if size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.apply_ocr = apply_ocr def prepare_image_processor_dict(self): return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class LayoutLMv2ImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = LayoutLMv2ImageProcessor if is_pytesseract_available() else None def setUp(self): self.image_processor_tester = LayoutLMv2ImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "apply_ocr")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) def test_batch_feature(self): pass def test_call_pil(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input encoding = image_processing(image_inputs[0], return_tensors="pt") self.assertEqual( encoding.pixel_values.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) self.assertIsInstance(encoding.words, list) self.assertIsInstance(encoding.boxes, list) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) def test_call_numpy(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) def test_call_pytorch(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) def test_layoutlmv2_integration_test(self): # with apply_OCR = True image_processing = LayoutLMv2ImageProcessor() from datasets import load_dataset ds = load_dataset("hf-internal-testing/fixtures_docvqa", split="test") image = Image.open(ds[0]["file"]).convert("RGB") encoding = image_processing(image, return_tensors="pt") self.assertEqual(encoding.pixel_values.shape, (1, 3, 224, 224)) self.assertEqual(len(encoding.words), len(encoding.boxes)) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 expected_words = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231 expected_boxes = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words, expected_words) self.assertListEqual(encoding.boxes, expected_boxes) # with apply_OCR = False image_processing = LayoutLMv2ImageProcessor(apply_ocr=False) encoding = image_processing(image, return_tensors="pt") self.assertEqual(encoding.pixel_values.shape, (1, 3, 224, 224))
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/layoutlmv2/test_processor_layoutlmv2.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import shutil import tempfile import unittest from typing import List import numpy as np from transformers import PreTrainedTokenizer, PreTrainedTokenizerBase, PreTrainedTokenizerFast from transformers.models.layoutlmv2 import LayoutLMv2Tokenizer, LayoutLMv2TokenizerFast from transformers.models.layoutlmv2.tokenization_layoutlmv2 import VOCAB_FILES_NAMES from transformers.testing_utils import require_pytesseract, require_tokenizers, require_torch, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, cached_property, is_pytesseract_available if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMv2ImageProcessor, LayoutLMv2Processor @require_pytesseract @require_tokenizers class LayoutLMv2ProcessorTest(unittest.TestCase): tokenizer_class = LayoutLMv2Tokenizer rust_tokenizer_class = LayoutLMv2TokenizerFast def setUp(self): vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] image_processor_map = { "do_resize": True, "size": 224, "apply_ocr": True, } self.tmpdirname = tempfile.mkdtemp() self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) self.image_processing_file = os.path.join(self.tmpdirname, FEATURE_EXTRACTOR_NAME) with open(self.image_processing_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(image_processor_map) + "\n") def get_tokenizer(self, **kwargs) -> PreTrainedTokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def get_rust_tokenizer(self, **kwargs) -> PreTrainedTokenizerFast: return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def get_tokenizers(self, **kwargs) -> List[PreTrainedTokenizerBase]: return [self.get_tokenizer(**kwargs), self.get_rust_tokenizer(**kwargs)] def get_image_processor(self, **kwargs): return LayoutLMv2ImageProcessor.from_pretrained(self.tmpdirname, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def prepare_image_inputs(self): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. """ image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] return image_inputs def test_save_load_pretrained_default(self): image_processor = self.get_image_processor() tokenizers = self.get_tokenizers() for tokenizer in tokenizers: processor = LayoutLMv2Processor(image_processor=image_processor, tokenizer=tokenizer) processor.save_pretrained(self.tmpdirname) processor = LayoutLMv2Processor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer, (LayoutLMv2Tokenizer, LayoutLMv2TokenizerFast)) self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string()) self.assertIsInstance(processor.image_processor, LayoutLMv2ImageProcessor) def test_save_load_pretrained_additional_features(self): processor = LayoutLMv2Processor(image_processor=self.get_image_processor(), tokenizer=self.get_tokenizer()) processor.save_pretrained(self.tmpdirname) # slow tokenizer tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_resize=False, size=30) processor = LayoutLMv2Processor.from_pretrained( self.tmpdirname, use_fast=False, bos_token="(BOS)", eos_token="(EOS)", do_resize=False, size=30 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, LayoutLMv2Tokenizer) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, LayoutLMv2ImageProcessor) # fast tokenizer tokenizer_add_kwargs = self.get_rust_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_resize=False, size=30) processor = LayoutLMv2Processor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_resize=False, size=30 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, LayoutLMv2TokenizerFast) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, LayoutLMv2ImageProcessor) def test_model_input_names(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = LayoutLMv2Processor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" image_input = self.prepare_image_inputs() # add extra args inputs = processor(text=input_str, images=image_input, return_codebook_pixels=False, return_image_mask=False) self.assertListEqual(list(inputs.keys()), processor.model_input_names) @slow def test_overflowing_tokens(self): # In the case of overflowing tokens, test that we still have 1-to-1 mapping between the images and input_ids (sequences that are too long are broken down into multiple sequences). from datasets import load_dataset # set up datasets = load_dataset("nielsr/funsd") processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased", revision="no_ocr") def preprocess_data(examples): images = [Image.open(path).convert("RGB") for path in examples["image_path"]] words = examples["words"] boxes = examples["bboxes"] word_labels = examples["ner_tags"] encoded_inputs = processor( images, words, boxes=boxes, word_labels=word_labels, padding="max_length", truncation=True, return_overflowing_tokens=True, stride=50, return_offsets_mapping=True, return_tensors="pt", ) return encoded_inputs train_data = preprocess_data(datasets["train"]) self.assertEqual(len(train_data["image"]), len(train_data["input_ids"])) # different use cases tests @require_torch @require_pytesseract class LayoutLMv2ProcessorIntegrationTests(unittest.TestCase): @cached_property def get_images(self): # we verify our implementation on 2 document images from the DocVQA dataset from datasets import load_dataset ds = load_dataset("hf-internal-testing/fixtures_docvqa", split="test") image_1 = Image.open(ds[0]["file"]).convert("RGB") image_2 = Image.open(ds[1]["file"]).convert("RGB") return image_1, image_2 @cached_property def get_tokenizers(self): slow_tokenizer = LayoutLMv2Tokenizer.from_pretrained("microsoft/layoutlmv2-base-uncased") fast_tokenizer = LayoutLMv2TokenizerFast.from_pretrained("microsoft/layoutlmv2-base-uncased") return [slow_tokenizer, fast_tokenizer] @slow def test_processor_case_1(self): # case 1: document image classification (training, inference) + token classification (inference), apply_ocr = True image_processor = LayoutLMv2ImageProcessor() tokenizers = self.get_tokenizers images = self.get_images for tokenizer in tokenizers: processor = LayoutLMv2Processor(image_processor=image_processor, tokenizer=tokenizer) # not batched input_image_proc = image_processor(images[0], return_tensors="pt") input_processor = processor(images[0], return_tensors="pt") # verify keys expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"] actual_keys = sorted(input_processor.keys()) self.assertListEqual(actual_keys, expected_keys) # verify image self.assertAlmostEqual(input_image_proc["pixel_values"].sum(), input_processor["image"].sum(), delta=1e-2) # verify input_ids # this was obtained with Tesseract 4.1.1 # fmt: off expected_decoding = "[CLS] 11 : 14 to 11 : 39 a. m 11 : 39 to 11 : 44 a. m. 11 : 44 a. m. to 12 : 25 p. m. 12 : 25 to 12 : 58 p. m. 12 : 58 to 4 : 00 p. m. 2 : 00 to 5 : 00 p. m. coffee break coffee will be served for men and women in the lobby adjacent to exhibit area. please move into exhibit area. ( exhibits open ) trrf general session ( part | ) presiding : lee a. waller trrf vice president “ introductory remarks ” lee a. waller, trrf vice presi - dent individual interviews with trrf public board members and sci - entific advisory council mem - bers conducted by trrf treasurer philip g. kuehn to get answers which the public refrigerated warehousing industry is looking for. plus questions from the floor. dr. emil m. mrak, university of cal - ifornia, chairman, trrf board ; sam r. cecil, university of georgia college of agriculture ; dr. stanley charm, tufts university school of medicine ; dr. robert h. cotton, itt continental baking company ; dr. owen fennema, university of wis - consin ; dr. robert e. hardenburg, usda. questions and answers exhibits open capt. jack stoney room trrf scientific advisory council meeting ballroom foyer [SEP]" # noqa: E231 # fmt: on decoding = processor.decode(input_processor.input_ids.squeeze().tolist()) self.assertSequenceEqual(decoding, expected_decoding) # batched input_image_proc = image_processor(images, return_tensors="pt") input_processor = processor(images, padding=True, return_tensors="pt") # verify keys expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"] actual_keys = sorted(input_processor.keys()) self.assertListEqual(actual_keys, expected_keys) # verify images self.assertAlmostEqual(input_image_proc["pixel_values"].sum(), input_processor["image"].sum(), delta=1e-2) # verify input_ids # this was obtained with Tesseract 4.1.1 # fmt: off expected_decoding = "[CLS] 7 itc limited report and accounts 2013 itc ’ s brands : an asset for the nation the consumer needs and aspirations they fulfil, the benefit they generate for millions across itc ’ s value chains, the future - ready capabilities that support them, and the value that they create for the country, have made itc ’ s brands national assets, adding to india ’ s competitiveness. it is itc ’ s aspiration to be the no 1 fmcg player in the country, driven by its new fmcg businesses. a recent nielsen report has highlighted that itc's new fmcg businesses are the fastest growing among the top consumer goods companies operating in india. itc takes justifiable pride that, along with generating economic value, these celebrated indian brands also drive the creation of larger societal capital through the virtuous cycle of sustainable and inclusive growth. di wills * ; love delightfully soft skin? aia ans source : https : / / www. industrydocuments. ucsf. edu / docs / snbx0223 [SEP] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD]" # noqa: E231 # fmt: on decoding = processor.decode(input_processor.input_ids[1].tolist()) self.assertSequenceEqual(decoding, expected_decoding) @slow def test_processor_case_2(self): # case 2: document image classification (training, inference) + token classification (inference), apply_ocr=False image_processor = LayoutLMv2ImageProcessor(apply_ocr=False) tokenizers = self.get_tokenizers images = self.get_images for tokenizer in tokenizers: processor = LayoutLMv2Processor(image_processor=image_processor, tokenizer=tokenizer) # not batched words = ["hello", "world"] boxes = [[1, 2, 3, 4], [5, 6, 7, 8]] input_processor = processor(images[0], words, boxes=boxes, return_tensors="pt") # verify keys expected_keys = ["input_ids", "bbox", "token_type_ids", "attention_mask", "image"] actual_keys = list(input_processor.keys()) for key in expected_keys: self.assertIn(key, actual_keys) # verify input_ids expected_decoding = "[CLS] hello world [SEP]" decoding = processor.decode(input_processor.input_ids.squeeze().tolist()) self.assertSequenceEqual(decoding, expected_decoding) # batched words = [["hello", "world"], ["my", "name", "is", "niels"]] boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]] input_processor = processor(images, words, boxes=boxes, padding=True, return_tensors="pt") # verify keys expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"] actual_keys = sorted(input_processor.keys()) self.assertListEqual(actual_keys, expected_keys) # verify input_ids expected_decoding = "[CLS] hello world [SEP] [PAD] [PAD] [PAD]" decoding = processor.decode(input_processor.input_ids[0].tolist()) self.assertSequenceEqual(decoding, expected_decoding) # verify bbox expected_bbox = [ [0, 0, 0, 0], [3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3], [1, 1, 2, 3], [1000, 1000, 1000, 1000], ] self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox) @slow def test_processor_case_3(self): # case 3: token classification (training), apply_ocr=False image_processor = LayoutLMv2ImageProcessor(apply_ocr=False) tokenizers = self.get_tokenizers images = self.get_images for tokenizer in tokenizers: processor = LayoutLMv2Processor(image_processor=image_processor, tokenizer=tokenizer) # not batched words = ["weirdly", "world"] boxes = [[1, 2, 3, 4], [5, 6, 7, 8]] word_labels = [1, 2] input_processor = processor(images[0], words, boxes=boxes, word_labels=word_labels, return_tensors="pt") # verify keys expected_keys = ["attention_mask", "bbox", "image", "input_ids", "labels", "token_type_ids"] actual_keys = sorted(input_processor.keys()) self.assertListEqual(actual_keys, expected_keys) # verify input_ids expected_decoding = "[CLS] weirdly world [SEP]" decoding = processor.decode(input_processor.input_ids.squeeze().tolist()) self.assertSequenceEqual(decoding, expected_decoding) # verify labels expected_labels = [-100, 1, -100, 2, -100] self.assertListEqual(input_processor.labels.squeeze().tolist(), expected_labels) # batched words = [["hello", "world"], ["my", "name", "is", "niels"]] boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]] word_labels = [[1, 2], [6, 3, 10, 2]] input_processor = processor( images, words, boxes=boxes, word_labels=word_labels, padding=True, return_tensors="pt" ) # verify keys expected_keys = ["attention_mask", "bbox", "image", "input_ids", "labels", "token_type_ids"] actual_keys = sorted(input_processor.keys()) self.assertListEqual(actual_keys, expected_keys) # verify input_ids expected_decoding = "[CLS] my name is niels [SEP]" decoding = processor.decode(input_processor.input_ids[1].tolist()) self.assertSequenceEqual(decoding, expected_decoding) # verify bbox expected_bbox = [ [0, 0, 0, 0], [3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3], [1, 1, 2, 3], [1000, 1000, 1000, 1000], ] self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox) # verify labels expected_labels = [-100, 6, 3, 10, 2, -100, -100] self.assertListEqual(input_processor.labels[1].tolist(), expected_labels) @slow def test_processor_case_4(self): # case 4: visual question answering (inference), apply_ocr=True image_processor = LayoutLMv2ImageProcessor() tokenizers = self.get_tokenizers images = self.get_images for tokenizer in tokenizers: processor = LayoutLMv2Processor(image_processor=image_processor, tokenizer=tokenizer) # not batched question = "What's his name?" input_processor = processor(images[0], question, return_tensors="pt") # verify keys expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"] actual_keys = sorted(input_processor.keys()) self.assertListEqual(actual_keys, expected_keys) # verify input_ids # this was obtained with Tesseract 4.1.1 # fmt: off expected_decoding = "[CLS] what's his name? [SEP] 11 : 14 to 11 : 39 a. m 11 : 39 to 11 : 44 a. m. 11 : 44 a. m. to 12 : 25 p. m. 12 : 25 to 12 : 58 p. m. 12 : 58 to 4 : 00 p. m. 2 : 00 to 5 : 00 p. m. coffee break coffee will be served for men and women in the lobby adjacent to exhibit area. please move into exhibit area. ( exhibits open ) trrf general session ( part | ) presiding : lee a. waller trrf vice president “ introductory remarks ” lee a. waller, trrf vice presi - dent individual interviews with trrf public board members and sci - entific advisory council mem - bers conducted by trrf treasurer philip g. kuehn to get answers which the public refrigerated warehousing industry is looking for. plus questions from the floor. dr. emil m. mrak, university of cal - ifornia, chairman, trrf board ; sam r. cecil, university of georgia college of agriculture ; dr. stanley charm, tufts university school of medicine ; dr. robert h. cotton, itt continental baking company ; dr. owen fennema, university of wis - consin ; dr. robert e. hardenburg, usda. questions and answers exhibits open capt. jack stoney room trrf scientific advisory council meeting ballroom foyer [SEP]" # noqa: E231 # fmt: on decoding = processor.decode(input_processor.input_ids.squeeze().tolist()) self.assertSequenceEqual(decoding, expected_decoding) # batched questions = ["How old is he?", "what's the time"] input_processor = processor( images, questions, padding="max_length", max_length=20, truncation=True, return_tensors="pt" ) # verify keys expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"] actual_keys = sorted(input_processor.keys()) self.assertListEqual(actual_keys, expected_keys) # verify input_ids # this was obtained with Tesseract 4.1.1 expected_decoding = "[CLS] what's the time [SEP] 7 itc limited report and accounts 2013 itc ’ s [SEP]" decoding = processor.decode(input_processor.input_ids[1].tolist()) self.assertSequenceEqual(decoding, expected_decoding) # verify bbox # fmt: off expected_bbox = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [0, 45, 67, 80], [72, 56, 109, 67], [72, 56, 109, 67], [116, 56, 189, 67], [198, 59, 253, 66], [257, 59, 285, 66], [289, 59, 365, 66], [372, 59, 407, 66], [74, 136, 161, 158], [74, 136, 161, 158], [74, 136, 161, 158], [74, 136, 161, 158], [1000, 1000, 1000, 1000]] # noqa: E231 # fmt: on self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox) @slow def test_processor_case_5(self): # case 5: visual question answering (inference), apply_ocr=False image_processor = LayoutLMv2ImageProcessor(apply_ocr=False) tokenizers = self.get_tokenizers images = self.get_images for tokenizer in tokenizers: processor = LayoutLMv2Processor(image_processor=image_processor, tokenizer=tokenizer) # not batched question = "What's his name?" words = ["hello", "world"] boxes = [[1, 2, 3, 4], [5, 6, 7, 8]] input_processor = processor(images[0], question, words, boxes, return_tensors="pt") # verify keys expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"] actual_keys = sorted(input_processor.keys()) self.assertListEqual(actual_keys, expected_keys) # verify input_ids expected_decoding = "[CLS] what's his name? [SEP] hello world [SEP]" decoding = processor.decode(input_processor.input_ids.squeeze().tolist()) self.assertSequenceEqual(decoding, expected_decoding) # batched questions = ["How old is he?", "what's the time"] words = [["hello", "world"], ["my", "name", "is", "niels"]] boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]] input_processor = processor(images, questions, words, boxes, padding=True, return_tensors="pt") # verify keys expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"] actual_keys = sorted(input_processor.keys()) self.assertListEqual(actual_keys, expected_keys) # verify input_ids expected_decoding = "[CLS] how old is he? [SEP] hello world [SEP] [PAD] [PAD] [PAD]" decoding = processor.decode(input_processor.input_ids[0].tolist()) self.assertSequenceEqual(decoding, expected_decoding) expected_decoding = "[CLS] what's the time [SEP] my name is niels [SEP]" decoding = processor.decode(input_processor.input_ids[1].tolist()) self.assertSequenceEqual(decoding, expected_decoding) # verify bbox expected_bbox = [[6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3], [1, 1, 2, 3], [1000, 1000, 1000, 1000]] self.assertListEqual(input_processor.bbox[1].tolist()[-5:], expected_bbox)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/vit_msn/test_modeling_vit_msn.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch ViTMSN model. """ import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class ViTMSNModelTester: def __init__( self, parent, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return ViTMSNConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values, labels): model = ViTMSNModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = ViTMSNForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}") print("Labels: {labels}") self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) # test greyscale images config.num_channels = 1 model = ViTMSNForImageClassification(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class ViTMSNModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as ViTMSN does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification} if is_torch_available() else {} ) test_pruning = False test_torchscript = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = ViTMSNModelTester(self) self.config_tester = ConfigTester(self, config_class=ViTMSNConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="ViTMSN does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ViTMSNModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class ViTMSNModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ViTImageProcessor.from_pretrained("facebook/vit-msn-small") if is_vision_available() else None @slow def test_inference_image_classification_head(self): torch.manual_seed(2) model = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-0.0803, -0.4454, -0.2375]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/falcon/test_modeling_falcon.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Falcon model. """ import unittest from transformers import AutoTokenizer, FalconConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class FalconModelTester: def __init__( self, parent, batch_size=3, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return FalconConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, pad_token_id=1, new_decoder_architecture=True, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = FalconModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = FalconModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = FalconForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = FalconForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class FalconModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) all_generative_model_classes = (FalconForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": FalconModel, "text-classification": FalconForSequenceClassification, "text-generation": FalconForCausalLM, "question-answering": FalconForQuestionAnswering, "token-classification": FalconForTokenClassification, "zero-shot": FalconForSequenceClassification, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False def setUp(self): self.model_tester = FalconModelTester(self) self.config_tester = ConfigTester(self, config_class=FalconConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_position_embedding_types(self): config, *inputs = self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: config.alibi = alibi self.model_tester.create_and_check_model(config, *inputs) def test_falcon_sequence_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = FalconForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_falcon_sequence_classification_model_for_single_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "single_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = FalconForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_cache_conversions(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = input_dict["input_ids"] model = FalconForCausalLM(config) model.to(torch_device) model.eval() result = model(input_ids, use_cache=True) batch_size = input_ids.shape[0] rw_cache = model._convert_to_rw_cache(result.past_key_values) standard_cache = model._convert_cache_to_standard_format(rw_cache, batch_size) for layer in range(len(rw_cache)): for tensor_idx in range(2): self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3) self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4) self.assertTrue( torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx]) ) def test_falcon_sequence_classification_model_for_multi_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "multi_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor( [self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float) model = FalconForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_past_key_values_format(self): # Falcon can have different numbers of KV-heads than the number of query heads, so we need # to override this test to use the right head counts. for model_class in self.all_generative_model_classes: config, inputs = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(config, "use_cache"): return model = model_class(config).to(torch_device) if "use_cache" not in inputs: inputs["use_cache"] = True outputs = model(**inputs) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: return num_hidden_layers = ( getattr(config, "decoder_layers", None) or getattr(config, "num_decoder_layers", None) or config.num_hidden_layers ) num_attention_heads = getattr(config, "num_kv_heads", config.num_attention_heads) embed_dim = getattr(config, "d_model", config.hidden_size) per_head_embed_dim = embed_dim // num_attention_heads past_kv = outputs["past_key_values"] self.assertEqual(len(past_kv), num_hidden_layers) batch_size, seq_length = inputs["input_ids"].shape for i in range(num_hidden_layers): if config.new_decoder_architecture: num_attention_heads = config.num_attention_heads elif config.multi_query: num_attention_heads = 1 self.assertEqual(len(past_kv[0]), 2) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape, (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape, (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) @require_torch class FalconLanguageGenerationTest(unittest.TestCase): @slow def test_lm_generate_falcon(self): tokenizer = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b") model = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b") model.eval() model.to(torch_device) inputs = tokenizer("My favorite food is", return_tensors="pt").to(torch_device) EXPECTED_OUTPUT = ( "My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday." ) output_ids = model.generate(**inputs, do_sample=False, max_new_tokens=19) output_str = tokenizer.batch_decode(output_ids)[0] self.assertEqual(output_str, EXPECTED_OUTPUT) @slow def test_lm_generation_big_models(self): # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: tokenizer = AutoTokenizer.from_pretrained(repo) model = FalconForCausalLM.from_pretrained(repo) model.eval() model.to(torch_device) inputs = tokenizer("My favorite food is", return_tensors="pt").to(torch_device) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**inputs, do_sample=False, max_new_tokens=4) model.generate(**inputs, do_sample=True, max_new_tokens=4) model.generate(**inputs, num_beams=2, max_new_tokens=4) @slow def test_lm_generation_use_cache(self): # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: tokenizer = AutoTokenizer.from_pretrained(repo) model = FalconForCausalLM.from_pretrained(repo) model.eval() model.to(device=torch_device) inputs = tokenizer("My favorite food is", return_tensors="pt").to(torch_device) # Test results are the same with and without cache outputs_no_cache = model.generate(**inputs, do_sample=False, max_new_tokens=20, use_cache=False) outputs_cache = model.generate(**inputs, do_sample=False, max_new_tokens=20, use_cache=True) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/roformer/test_modeling_flax_roformer.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class FlaxRoFormerModelTester(unittest.TestCase): def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_attention_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_choices=4, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_choices = num_choices def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) config = RoFormerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, attention_mask def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, token_type_ids, attention_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict @require_flax class FlaxRoFormerModelTest(FlaxModelTesterMixin, unittest.TestCase): test_head_masking = True all_model_classes = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def setUp(self): self.model_tester = FlaxRoFormerModelTester(self) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("junnyu/roformer_chinese_small", from_pt=True) outputs = model(np.ones((1, 1))) self.assertIsNotNone(outputs) @require_flax class FlaxRoFormerModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base") input_ids = jnp.array([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] vocab_size = 50000 expected_shape = (1, 6, vocab_size) self.assertEqual(output.shape, expected_shape) expected_slice = jnp.array( [[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/roformer/test_modeling_roformer.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch RoFormer model. """ import unittest from transformers import RoFormerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerModel, ) from transformers.models.roformer.modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerSelfAttention, RoFormerSinusoidalPositionalEmbedding, ) class RoFormerModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return RoFormerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = RoFormerModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = RoFormerModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = RoFormerForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_generate_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): model = RoFormerForCausalLM(config=config).to(torch_device).eval() torch.manual_seed(0) output_without_past_cache = model.generate( input_ids[:1], num_beams=2, max_length=15, do_sample=True, use_cache=False ) torch.manual_seed(0) output_with_past_cache = model.generate(input_ids[:1], num_beams=2, max_length=15, do_sample=True) self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = RoFormerForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = RoFormerForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = RoFormerForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = RoFormerForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = RoFormerForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = RoFormerForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class RoFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( RoFormerModel, RoFormerForMaskedLM, RoFormerForCausalLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, ) if is_torch_available() else () ) all_generative_model_classes = (RoFormerForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": RoFormerModel, "fill-mask": RoFormerForMaskedLM, "question-answering": RoFormerForQuestionAnswering, "text-classification": RoFormerForSequenceClassification, "text-generation": RoFormerForCausalLM, "token-classification": RoFormerForTokenClassification, "zero-shot": RoFormerForSequenceClassification, } if is_torch_available() else {} ) def setUp(self): self.model_tester = RoFormerModelTester(self) self.config_tester = ConfigTester(self, config_class=RoFormerConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_generate_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_generate_causal_lm(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): # This regression test was failing with PyTorch < 1.3 ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) @slow def test_model_from_pretrained(self): for model_name in ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = RoFormerModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class RoFormerModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = RoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base") input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]]) with torch.no_grad(): output = model(input_ids)[0] # TODO Replace vocab size vocab_size = 50000 expected_shape = torch.Size((1, 6, vocab_size)) self.assertEqual(output.shape, expected_shape) # TODO Replace values below with what was printed above. expected_slice = torch.tensor( [[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4)) @require_torch class RoFormerSinusoidalPositionalEmbeddingTest(unittest.TestCase): tolerance = 1e-4 def test_basic(self): input_ids = torch.tensor([[4, 10]], dtype=torch.long, device=torch_device) emb1 = RoFormerSinusoidalPositionalEmbedding(num_positions=6, embedding_dim=6).to(torch_device) emb = emb1(input_ids.shape) desired_weights = torch.tensor( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ).to(torch_device) self.assertTrue( torch.allclose(emb, desired_weights, atol=self.tolerance), msg=f"\nexp:\n{desired_weights}\ngot:\n{emb[0]}\n", ) def test_positional_emb_weights_against_roformer(self): desired_weights = torch.tensor( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ).to(torch_device) emb1 = RoFormerSinusoidalPositionalEmbedding(num_positions=512, embedding_dim=512).to(torch_device) weights = emb1.weight.data[:3, :5].to(torch_device) self.assertTrue( torch.allclose(weights, desired_weights, atol=self.tolerance), msg=f"\nexp:\n{desired_weights}\ngot:\n{weights}\n", ) @require_torch class RoFormerSelfAttentionRotaryPositionEmbeddingTest(unittest.TestCase): tolerance = 1e-4 def test_apply_rotary_position_embeddings(self): # 2,12,16,64 query_layer = ( torch.arange(2 * 12 * 16 * 64, dtype=torch.float, device=torch_device).reshape(2, 12, 16, 64) / 100 ).to(torch_device) key_layer = ( -torch.arange(2 * 12 * 16 * 64, dtype=torch.float, device=torch_device).reshape(2, 12, 16, 64) / 100 ).to(torch_device) embed_positions = RoFormerSinusoidalPositionalEmbedding(num_positions=32, embedding_dim=64).to(torch_device) sinusoidal_pos = embed_positions([2, 16, 768])[None, None, :, :] query_layer, key_layer = RoFormerSelfAttention.apply_rotary_position_embeddings( sinusoidal_pos, query_layer, key_layer ) desired_query_layer = torch.tensor( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ).to(torch_device) desired_key_layer = torch.tensor( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ).to(torch_device) self.assertTrue( torch.allclose(query_layer[0, 0, :6, :8], desired_query_layer, atol=self.tolerance), msg=f"\nexp:\n{desired_query_layer}\ngot:\n{query_layer}\n", ) self.assertTrue( torch.allclose(key_layer[0, 0, :6, :8], desired_key_layer, atol=self.tolerance), msg=f"\nexp:\n{desired_key_layer}\ngot:\n{key_layer}\n", )
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/roformer/test_modeling_tf_roformer.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class TFRoFormerModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_mask = True self.use_token_type_ids = True self.use_labels = True self.vocab_size = 99 self.hidden_size = 32 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.scope = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = RoFormerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, return_dict=True, ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFRoFormerModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_lm_head( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.is_decoder = True model = TFRoFormerForCausalLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } prediction_scores = model(inputs)["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape), [self.batch_size, self.seq_length, self.vocab_size] ) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFRoFormerForMaskedLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFRoFormerForSequenceClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = TFRoFormerForMultipleChoice(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFRoFormerForTokenClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFRoFormerForQuestionAnswering(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFRoFormerModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFRoFormerModel, "fill-mask": TFRoFormerForMaskedLM, "question-answering": TFRoFormerForQuestionAnswering, "text-classification": TFRoFormerForSequenceClassification, "text-generation": TFRoFormerForCausalLM, "token-classification": TFRoFormerForTokenClassification, "zero-shot": TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False # TODO: add `prepare_inputs_for_generation` for `TFRoFormerForCausalLM` def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def setUp(self): self.model_tester = TFRoFormerModelTester(self) self.config_tester = ConfigTester(self, config_class=RoFormerConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base") self.assertIsNotNone(model) @require_tf class TFRoFormerModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base") input_ids = tf.constant([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] # TODO Replace vocab size vocab_size = 50000 expected_shape = [1, 6, vocab_size] self.assertEqual(output.shape, expected_shape) print(output[:, :3, :3]) # TODO Replace values below with what was printed above. expected_slice = tf.constant( [ [ [-0.12053341, -1.0264901, 0.29221946], [-1.5133783, 0.197433, 0.15190607], [-5.0135403, -3.900256, -0.84038764], ] ] ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-4) @require_tf class TFRoFormerSinusoidalPositionalEmbeddingTest(unittest.TestCase): tolerance = 1e-4 def test_basic(self): input_ids = tf.constant([[4, 10]]) emb1 = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6, embedding_dim=6) emb = emb1(input_ids.shape) desired_weights = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ) tf.debugging.assert_near(emb, desired_weights, atol=self.tolerance) def test_positional_emb_weights_against_roformer(self): desired_weights = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ) emb1 = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512, embedding_dim=512) emb1([2, 16, 512]) weights = emb1.weight[:3, :5] tf.debugging.assert_near(weights, desired_weights, atol=self.tolerance) @require_tf class TFRoFormerSelfAttentionRotaryPositionEmbeddingTest(unittest.TestCase): tolerance = 1e-4 def test_apply_rotary_position_embeddings(self): # 2,12,16,64 query_layer = tf.reshape(tf.range(2 * 12 * 16 * 64, dtype=tf.float32), shape=(2, 12, 16, 64)) / 100 key_layer = -tf.reshape(tf.range(2 * 12 * 16 * 64, dtype=tf.float32), shape=(2, 12, 16, 64)) / 100 embed_positions = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32, embedding_dim=64) sinusoidal_pos = embed_positions([2, 16, 768])[None, None, :, :] query_layer, key_layer = TFRoFormerSelfAttention.apply_rotary_position_embeddings( sinusoidal_pos, query_layer, key_layer ) desired_query_layer = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ) desired_key_layer = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8], desired_query_layer, atol=self.tolerance) tf.debugging.assert_near(key_layer[0, 0, :6, :8], desired_key_layer, atol=self.tolerance)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/roformer/test_tokenization_roformer.py
# coding=utf-8 # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import RoFormerTokenizer, RoFormerTokenizerFast from transformers.testing_utils import require_rjieba, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_rjieba @require_tokenizers class RoFormerTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = RoFormerTokenizer rust_tokenizer_class = RoFormerTokenizerFast space_between_special_tokens = True test_rust_tokenizer = True def setUp(self): super().setUp() def get_tokenizer(self, **kwargs): return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base", **kwargs) def get_rust_tokenizer(self, **kwargs): return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base", **kwargs) def get_chinese_input_output_texts(self): input_text = "永和服装饰品有限公司,今天天气非常好" output_text = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好" return input_text, output_text def test_tokenizer(self): tokenizer = self.get_tokenizer() input_text, output_text = self.get_chinese_input_output_texts() tokens = tokenizer.tokenize(input_text) self.assertListEqual(tokens, output_text.split()) input_tokens = tokens + [tokenizer.unk_token] exp_tokens = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), exp_tokens) def test_rust_tokenizer(self): tokenizer = self.get_rust_tokenizer() input_text, output_text = self.get_chinese_input_output_texts() tokens = tokenizer.tokenize(input_text) self.assertListEqual(tokens, output_text.split()) input_tokens = tokens + [tokenizer.unk_token] exp_tokens = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), exp_tokens) # can't train new_tokenizer via Tokenizers lib def test_training_new_tokenizer(self): pass # can't train new_tokenizer via Tokenizers lib def test_training_new_tokenizer_with_special_tokens_change(self): pass # can't serialise custom PreTokenizer def test_save_slow_from_fast_and_reload_fast(self): pass
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/splinter/test_modeling_splinter.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Splinter model. """ import copy import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import SplinterConfig, SplinterForPreTraining, SplinterForQuestionAnswering, SplinterModel from transformers.models.splinter.modeling_splinter import SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST class SplinterModelTester: def __init__( self, parent, batch_size=13, num_questions=3, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, question_token_id=1, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.num_questions = num_questions self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.question_token_id = question_token_id self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_ids[:, 1] = self.question_token_id input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) start_positions = None end_positions = None question_positions = None if self.use_labels: start_positions = ids_tensor([self.batch_size, self.num_questions], self.type_sequence_label_size) end_positions = ids_tensor([self.batch_size, self.num_questions], self.type_sequence_label_size) question_positions = ids_tensor([self.batch_size, self.num_questions], self.num_labels) config = SplinterConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, question_token_id=self.question_token_id, ) return (config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions, ): model = SplinterModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions, ): model = SplinterForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=start_positions[:, 0], end_positions=end_positions[:, 0], ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_pretraining( self, config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions, ): model = SplinterForPreTraining(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=start_positions, end_positions=end_positions, question_positions=question_positions, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.num_questions, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.num_questions, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class SplinterModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( SplinterModel, SplinterForQuestionAnswering, SplinterForPreTraining, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"feature-extraction": SplinterModel, "question-answering": SplinterForQuestionAnswering} if is_torch_available() else {} ) # TODO: Fix the failed tests when this model gets more usage def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "QAPipelineTests": return True elif pipeline_test_casse_name == "FeatureExtractionPipelineTests" and tokenizer_name.endswith("Fast"): return True return False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if return_labels: if issubclass(model_class, SplinterForPreTraining): inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, self.model_tester.num_questions, dtype=torch.long, device=torch_device, ) inputs_dict["end_positions"] = torch.zeros( self.model_tester.batch_size, self.model_tester.num_questions, dtype=torch.long, device=torch_device, ) inputs_dict["question_positions"] = torch.zeros( self.model_tester.batch_size, self.model_tester.num_questions, dtype=torch.long, device=torch_device, ) elif issubclass(model_class, SplinterForQuestionAnswering): inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) inputs_dict["end_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = SplinterModelTester(self) self.config_tester = ConfigTester(self, config_class=SplinterConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): if isinstance(model, SplinterForPreTraining): with self.assertRaises(TypeError): # question_positions must not be None. model(**inputs)[0] else: model(**inputs)[0] @slow def test_model_from_pretrained(self): for model_name in SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = SplinterModel.from_pretrained(model_name) self.assertIsNotNone(model) # overwrite from common since `SplinterForPreTraining` could contain different number of question tokens in inputs. # When the batch is distributed to multiple devices, each replica could get different values for the maximal number # of question tokens (see `SplinterForPreTraining._prepare_question_positions()`), and the model returns different # shape along dimension 1 (i.e. `num_questions`) that could not be combined into a single tensor as an output. @require_torch_multi_gpu def test_multi_gpu_data_parallel_forward(self): from torch import nn config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # some params shouldn't be scattered by nn.DataParallel # so just remove them if they are present. blacklist_non_batched_params = ["head_mask", "decoder_head_mask", "cross_attn_head_mask"] for k in blacklist_non_batched_params: inputs_dict.pop(k, None) # move input tensors to cuda:O for k, v in inputs_dict.items(): if torch.is_tensor(v): inputs_dict[k] = v.to(0) for model_class in self.all_model_classes: # Skip this case since it will fail sometimes, as described above. if model_class == SplinterForPreTraining: continue model = model_class(config=config) model.to(0) model.eval() # Wrap model in nn.DataParallel model = nn.DataParallel(model) with torch.no_grad(): _ = model(**self._prepare_for_class(inputs_dict, model_class)) @require_torch class SplinterModelIntegrationTest(unittest.TestCase): @slow def test_splinter_question_answering(self): model = SplinterForQuestionAnswering.from_pretrained("tau/splinter-base-qass") # Input: "[CLS] Brad was born in [QUESTION] . He returned to the United Kingdom later . [SEP]" # Output should be the span "the United Kingdom" input_ids = torch.tensor( [[101, 7796, 1108, 1255, 1107, 104, 119, 1124, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102]] ) output = model(input_ids) expected_shape = torch.Size((1, 16)) self.assertEqual(output.start_logits.shape, expected_shape) self.assertEqual(output.end_logits.shape, expected_shape) self.assertEqual(torch.argmax(output.start_logits), 10) self.assertEqual(torch.argmax(output.end_logits), 12) @slow def test_splinter_pretraining(self): model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass") # Input: "[CLS] [QUESTION] was born in [QUESTION] . Brad returned to the United Kingdom later . [SEP]" # Output should be the spans "Brad" and "the United Kingdom" input_ids = torch.tensor( [[101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102]] ) question_positions = torch.tensor([[1, 5]], dtype=torch.long) output = model(input_ids, question_positions=question_positions) expected_shape = torch.Size((1, 2, 16)) self.assertEqual(output.start_logits.shape, expected_shape) self.assertEqual(output.end_logits.shape, expected_shape) self.assertEqual(torch.argmax(output.start_logits[0, 0]), 7) self.assertEqual(torch.argmax(output.end_logits[0, 0]), 7) self.assertEqual(torch.argmax(output.start_logits[0, 1]), 10) self.assertEqual(torch.argmax(output.end_logits[0, 1]), 12) @slow def test_splinter_pretraining_loss_requires_question_positions(self): model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass") # Input: "[CLS] [QUESTION] was born in [QUESTION] . Brad returned to the United Kingdom later . [SEP]" # Output should be the spans "Brad" and "the United Kingdom" input_ids = torch.tensor( [[101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102]] ) start_positions = torch.tensor([[7, 10]], dtype=torch.long) end_positions = torch.tensor([7, 12], dtype=torch.long) with self.assertRaises(TypeError): model( input_ids, start_positions=start_positions, end_positions=end_positions, ) @slow def test_splinter_pretraining_loss(self): model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass") # Input: "[CLS] [QUESTION] was born in [QUESTION] . Brad returned to the United Kingdom later . [SEP]" # Output should be the spans "Brad" and "the United Kingdom" input_ids = torch.tensor( [ [101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102], [101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102], ] ) start_positions = torch.tensor([[7, 10], [7, 10]], dtype=torch.long) end_positions = torch.tensor([[7, 12], [7, 12]], dtype=torch.long) question_positions = torch.tensor([[1, 5], [1, 5]], dtype=torch.long) output = model( input_ids, start_positions=start_positions, end_positions=end_positions, question_positions=question_positions, ) self.assertAlmostEqual(output.loss.item(), 0.0024, 4) @slow def test_splinter_pretraining_loss_with_padding(self): model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass") # Input: "[CLS] [QUESTION] was born in [QUESTION] . Brad returned to the United Kingdom later . [SEP]" # Output should be the spans "Brad" and "the United Kingdom" input_ids = torch.tensor( [ [101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102], ] ) start_positions = torch.tensor([[7, 10]], dtype=torch.long) end_positions = torch.tensor([7, 12], dtype=torch.long) question_positions = torch.tensor([[1, 5]], dtype=torch.long) start_positions_with_padding = torch.tensor([[7, 10, 0]], dtype=torch.long) end_positions_with_padding = torch.tensor([7, 12, 0], dtype=torch.long) question_positions_with_padding = torch.tensor([[1, 5, 0]], dtype=torch.long) output = model( input_ids, start_positions=start_positions, end_positions=end_positions, question_positions=question_positions, ) output_with_padding = model( input_ids, start_positions=start_positions_with_padding, end_positions=end_positions_with_padding, question_positions=question_positions_with_padding, ) self.assertAlmostEqual(output.loss.item(), output_with_padding.loss.item(), 4) # Note that the original code uses 0 to denote padded question tokens # and their start and end positions. As the pad_token_id of the model's # config is used for the losse's ignore_index in SplinterForPreTraining, # we add this test to ensure anybody making changes to the default # value of the config, will be aware of the implication. self.assertEqual(model.config.pad_token_id, 0) @slow def test_splinter_pretraining_prepare_question_positions(self): model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass") input_ids = torch.tensor( [ [101, 104, 1, 2, 104, 3, 4, 102], [101, 1, 104, 2, 104, 3, 104, 102], [101, 1, 2, 104, 104, 3, 4, 102], [101, 1, 2, 3, 4, 5, 104, 102], ] ) question_positions = torch.tensor([[1, 4, 0], [2, 4, 6], [3, 4, 0], [6, 0, 0]], dtype=torch.long) output_without_positions = model(input_ids) output_with_positions = model(input_ids, question_positions=question_positions) self.assertTrue((output_without_positions.start_logits == output_with_positions.start_logits).all()) self.assertTrue((output_without_positions.end_logits == output_with_positions.end_logits).all())
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/cpm/test_tokenization_cpm.py
# coding=utf-8 # Copyright 2018 HuggingFace Inc. team. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from transformers.models.cpm.tokenization_cpm import CpmTokenizer from transformers.testing_utils import custom_tokenizers from ..xlnet.test_modeling_xlnet import XLNetModelTest @custom_tokenizers class CpmTokenizationTest(XLNetModelTest): # There is no `CpmModel` def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return True def test_pre_tokenization(self): tokenizer = CpmTokenizer.from_pretrained("TsinghuaAI/CPM-Generate") text = "Hugging Face大法好,谁用谁知道。" normalized_text = "Hugging Face大法好,谁用谁知道。<unk>" bpe_tokens = "▁Hu gg ing ▁ ▂ ▁F ace ▁大法 ▁好 ▁ , ▁谁 ▁用 ▁谁 ▁知 道 ▁ 。".split() tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + [tokenizer.unk_token] input_bpe_tokens = [13789, 13283, 1421, 8, 10, 1164, 13608, 16528, 63, 8, 9, 440, 108, 440, 121, 90, 8, 12, 0] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) reconstructed_text = tokenizer.decode(input_bpe_tokens) self.assertEqual(reconstructed_text, normalized_text)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/plbart/test_tokenization_plbart.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.plbart.modeling_plbart import shift_tokens_right EN_CODE = 50003 PYTHON_CODE = 50002 @require_sentencepiece @require_tokenizers class PLBartTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = PLBartTokenizer rust_tokenizer_class = None test_rust_tokenizer = False def setUp(self): super().setUp() # We have a SentencePiece fixture for testing tokenizer = PLBartTokenizer(SAMPLE_VOCAB, language_codes="base", keep_accents=True) tokenizer.save_pretrained(self.tmpdirname) def test_full_base_tokenizer(self): tokenizer = PLBartTokenizer(SAMPLE_VOCAB, language_codes="base", keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual( ids, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ], ) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) end = tokenizer.vocab_size language_tokens = [tokenizer.convert_ids_to_tokens(x) for x in range(end - 4, end)] self.assertListEqual(language_tokens, ["__java__", "__python__", "__en_XX__", "<mask>"]) code = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go" input_ids = tokenizer(code).input_ids self.assertEqual( tokenizer.decode(input_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False), code, ) def test_full_multi_tokenizer(self): tokenizer = PLBartTokenizer(SAMPLE_VOCAB, language_codes="multi", keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual( ids, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ], ) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) end = tokenizer.vocab_size language_tokens = [tokenizer.convert_ids_to_tokens(x) for x in range(end - 7, end)] self.assertListEqual( language_tokens, ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] ) code = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go" input_ids = tokenizer(code).input_ids self.assertEqual( tokenizer.decode(input_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False), code, ) @require_torch @require_sentencepiece @require_tokenizers class PLBartPythonEnIntegrationTest(unittest.TestCase): checkpoint_name = "uclanlp/plbart-python-en_XX" src_text = [ "def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])", "def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])", ] tgt_text = [ "Returns the maximum value of a b c.", "Sums the values of a b c.", ] expected_src_tokens = [ 134, 5452, 33460, 33441, 33463, 33465, 33463, 33449, 988, 20, 33456, 19, 33456, 771, 39, 4258, 889, 3318, 33441, 33463, 33465, 33463, 33449, 2471, 2, PYTHON_CODE, ] @classmethod def setUpClass(cls): cls.tokenizer: PLBartTokenizer = PLBartTokenizer.from_pretrained( cls.checkpoint_name, language_codes="base", src_lang="python", tgt_lang="en_XX" ) cls.pad_token_id = 1 return cls def check_language_codes(self): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"], 50001) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"], 50002) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"], 50003) def test_python_en_tokenizer_batch_encode_plus(self): ids = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens, ids) def test_python_en_tokenizer_decode_ignores_language_codes(self): self.assertIn(PYTHON_CODE, self.tokenizer.all_special_ids) generated_ids = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2] result = self.tokenizer.decode(generated_ids, skip_special_tokens=True) expected_english = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=True) self.assertEqual(result, expected_english) self.assertNotIn(self.tokenizer.eos_token, result) def test_python_en_tokenizer_truncation(self): src_text = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20] self.assertIsInstance(src_text[0], str) desired_max_length = 10 ids = self.tokenizer(src_text, max_length=desired_max_length, truncation=True).input_ids[0] self.assertEqual(ids[-2], 2) self.assertEqual(ids[-1], PYTHON_CODE) self.assertEqual(len(ids), desired_max_length) def test_mask_token(self): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"]), [50004, 50001]) def test_special_tokens_unaffacted_by_save_load(self): tmpdirname = tempfile.mkdtemp() original_special_tokens = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(tmpdirname) new_tok = PLBartTokenizer.from_pretrained(tmpdirname) self.assertDictEqual(new_tok.fairseq_tokens_to_ids, original_special_tokens) @require_torch def test_batch_fairseq_parity(self): batch = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=True, return_tensors="pt") batch["decoder_input_ids"] = shift_tokens_right(batch["labels"], self.tokenizer.pad_token_id) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 self.assertEqual(batch.input_ids[1][-2:].tolist(), [2, PYTHON_CODE]) self.assertEqual(batch.decoder_input_ids[1][0], EN_CODE) self.assertEqual(batch.decoder_input_ids[1][-1], 2) self.assertEqual(batch.labels[1][-2:].tolist(), [2, EN_CODE]) @require_torch def test_python_en_tokenizer_prepare_batch(self): batch = self.tokenizer( self.src_text, text_target=self.tgt_text, padding=True, truncation=True, max_length=len(self.expected_src_tokens), return_tensors="pt", ) batch["decoder_input_ids"] = shift_tokens_right(batch["labels"], self.tokenizer.pad_token_id) self.assertIsInstance(batch, BatchEncoding) self.assertEqual((2, 26), batch.input_ids.shape) self.assertEqual((2, 26), batch.attention_mask.shape) result = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens, result) self.assertEqual(2, batch.decoder_input_ids[0, -1]) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens, []) self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, PYTHON_CODE]) def test_seq2seq_max_length(self): batch = self.tokenizer(self.src_text, padding=True, truncation=True, max_length=3, return_tensors="pt") targets = self.tokenizer( text_target=self.tgt_text, padding=True, truncation=True, max_length=10, return_tensors="pt" ) labels = targets["input_ids"] batch["decoder_input_ids"] = shift_tokens_right(labels, self.tokenizer.pad_token_id) self.assertEqual(batch.input_ids.shape[1], 3) self.assertEqual(batch.decoder_input_ids.shape[1], 10) @require_torch def test_tokenizer_translation(self): inputs = self.tokenizer._build_translation_inputs( "A test", return_tensors="pt", src_lang="en_XX", tgt_lang="java" ) self.assertEqual( nested_simplify(inputs), { # A, test, EOS, en_XX "input_ids": [[150, 242, 2, 50003]], "attention_mask": [[1, 1, 1, 1]], # java "forced_bos_token_id": 50001, }, )
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/plbart/test_modeling_plbart.py
# coding=utf-8 # Copyright 2022, The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch PLBART model. """ import copy import tempfile import unittest from transformers import PLBartConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( AutoTokenizer, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, ) from transformers.models.plbart.modeling_plbart import PLBartDecoder, PLBartEncoder def prepare_plbart_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class PLBartModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=100, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_ids = input_ids.clamp(3) input_ids[:, -1] = self.eos_token_id # Eos Token decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.get_config() inputs_dict = prepare_plbart_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def get_config(self): return PLBartConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = PLBartModel(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] head_mask = inputs_dict["head_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_with_past_key_values = model( next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values ) output_from_past = output_with_past_key_values["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = PLBartModel(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = PLBartEncoder.from_pretrained(tmpdirname).to(torch_device) encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = PLBartDecoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, encoder_attention_mask=inputs_dict["attention_mask"], )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class PLBartModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (PLBartModel, PLBartForConditionalGeneration, PLBartForSequenceClassification) if is_torch_available() else () ) all_generative_model_classes = (PLBartForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "conversational": PLBartForConditionalGeneration, "feature-extraction": PLBartModel, "summarization": PLBartForConditionalGeneration, "text-classification": PLBartForSequenceClassification, "text-generation": PLBartForCausalLM, "text2text-generation": PLBartForConditionalGeneration, "translation": PLBartForConditionalGeneration, "zero-shot": PLBartForSequenceClassification, } if is_torch_available() else {} ) is_encoder_decoder = True fx_compatible = False # Fix me Michael test_pruning = False test_missing_keys = False # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `PLBartConfig` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def setUp(self): self.model_tester = PLBartModelTester(self) self.config_tester = ConfigTester(self, config_class=PLBartConfig) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) # PLBartForSequenceClassification does not support inputs_embeds def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (PLBartModel, PLBartForConditionalGeneration): model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = PLBartForConditionalGeneration(config).eval().to(torch_device) if torch_device == "cuda": model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def assert_tensors_close(a, b, atol=1e-12, prefix=""): """If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if torch.allclose(a, b, atol=atol): return True raise except Exception: pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item() if a.numel() > 100: msg = f"tensor values are {pct_different:.1%} percent different." else: msg = f"{a} != {b}" if prefix: msg = prefix + ": " + msg raise AssertionError(msg) def _long_tensor(tok_lst): return torch.tensor(tok_lst, dtype=torch.long, device=torch_device) @require_torch @require_sentencepiece @require_tokenizers class AbstractSeq2SeqIntegrationTest(unittest.TestCase): maxDiff = 1000 # longer string compare tracebacks checkpoint_name = None @classmethod def setUpClass(cls): cls.tokenizer = AutoTokenizer.from_pretrained(cls.checkpoint_name, use_fast=False) return cls @cached_property def model(self): """Only load the model if needed.""" model = PLBartForConditionalGeneration.from_pretrained(self.checkpoint_name).to(torch_device) if "cuda" in torch_device: model = model.half() return model @require_torch @require_sentencepiece @require_tokenizers class PLBartJavaCsIntegrationTest(AbstractSeq2SeqIntegrationTest): checkpoint_name = "uclanlp/plbart-java-cs" src_text = [ "public int maximum(int a, int b, int c){return Math.max(a, Math.max(b, c));}", "public int product(int a, int b, int c){return a*b*c;}", ] tgt_text = [ "public int maximum(int a, int b, int c){return Math.Max(", "public int Product(int a, int b, int c){return a * b *", ] @slow def test_java_cs_generate_one(self): batch = self.tokenizer( ["public int maximum(int a, int b, int c){return Math.max(a, Math.max(b, c));}"], return_tensors="pt" ) batch = batch.to(torch_device) translated_tokens = self.model.generate(**batch) decoded = self.tokenizer.batch_decode(translated_tokens, skip_special_tokens=True) self.assertEqual(self.tgt_text[0], decoded[0]) # self.assertEqual(self.tgt_text[1], decoded[1]) @slow def test_java_cs_generate_batch(self): batch = self.tokenizer(self.src_text, return_tensors="pt", padding=True, truncation=True) batch = batch.to(torch_device) translated_tokens = self.model.generate(**batch) decoded = self.tokenizer.batch_decode(translated_tokens, skip_special_tokens=True) assert self.tgt_text == decoded def test_plbart_java_cs_config(self): plbart_models = ["uclanlp/plbart-java-cs"] expected = {"scale_embedding": True} for name in plbart_models: config = PLBartConfig.from_pretrained(name) for k, v in expected.items(): try: self.assertEqual(v, getattr(config, k)) except AssertionError as e: e.args += (name, k) raise def test_plbart_fast_forward(self): config = PLBartConfig( vocab_size=99, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, add_final_layer_norm=True, ) lm_model = PLBartForConditionalGeneration(config).to(torch_device) context = torch.tensor( [[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], device=torch_device, dtype=torch.long ) summary = torch.tensor([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], device=torch_device, dtype=torch.long) result = lm_model(input_ids=context, decoder_input_ids=summary, labels=summary) expected_shape = (*summary.shape, config.vocab_size) self.assertEqual(result.logits.shape, expected_shape) @require_torch @require_sentencepiece @require_tokenizers class PLBartBaseIntegrationTest(AbstractSeq2SeqIntegrationTest): checkpoint_name = "uclanlp/plbart-base" src_text = ["Is 0 the first Fibonacci number ?", "Find the sum of all prime numbers ."] tgt_text = ["0 the first Fibonacci number?", "the sum of all prime numbers.......... the the"] def test_base_generate(self): inputs = self.tokenizer([self.src_text[0]], return_tensors="pt").to(torch_device) src_lan = self.tokenizer._convert_lang_code_special_format("en_XX") translated_tokens = self.model.generate( input_ids=inputs["input_ids"].to(torch_device), decoder_start_token_id=self.tokenizer.lang_code_to_id[src_lan], ) decoded = self.tokenizer.batch_decode(translated_tokens, skip_special_tokens=True) self.assertEqual(self.tgt_text[0], decoded[0]) @slow def test_fill_mask(self): inputs = self.tokenizer(["Is 0 the <mask> Fibonacci <mask> ?"], return_tensors="pt").to(torch_device) src_lan = self.tokenizer._convert_lang_code_special_format("en_XX") outputs = self.model.generate( inputs["input_ids"], decoder_start_token_id=self.tokenizer.lang_code_to_id[src_lan], num_beams=1 ) prediction: str = self.tokenizer.batch_decode( outputs, clean_up_tokenization_spaces=True, skip_special_tokens=True )[0] self.assertEqual(prediction, "0 0 the 0 the 0 the 0 the 0 the 0 the 0 the 0 the") class PLBartStandaloneDecoderModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, d_model=16, decoder_seq_length=7, is_training=True, is_decoder=True, use_attention_mask=True, use_cache=False, use_labels=True, decoder_start_token_id=2, decoder_ffn_dim=32, decoder_layers=4, encoder_attention_heads=4, decoder_attention_heads=4, max_position_embeddings=30, is_encoder_decoder=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, scope=None, ): self.parent = parent self.batch_size = batch_size self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.d_model = d_model self.hidden_size = d_model self.num_hidden_layers = decoder_layers self.decoder_layers = decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.encoder_attention_heads = encoder_attention_heads self.decoder_attention_heads = decoder_attention_heads self.num_attention_heads = decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.use_cache = use_cache self.max_position_embeddings = max_position_embeddings self.is_encoder_decoder = is_encoder_decoder self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 2 self.decoder_attention_idx = 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = PLBartConfig( vocab_size=self.vocab_size, d_model=self.d_model, decoder_layers=self.decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, encoder_attention_heads=self.encoder_attention_heads, decoder_attention_heads=self.decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, max_position_embeddings=self.max_position_embeddings, is_encoder_decoder=self.is_encoder_decoder, ) return (config, input_ids, attention_mask, lm_labels) def create_and_check_decoder_model_past( self, config, input_ids, attention_mask, lm_labels, ): config.use_cache = True model = PLBartDecoder(config=config).to(torch_device).eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, attention_mask, lm_labels, ): model = PLBartDecoder(config=config).to(torch_device).eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=attn_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids, attention_mask, lm_labels) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict @require_torch class PLBartStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (PLBartDecoder, PLBartForCausalLM) if is_torch_available() else () all_generative_model_classes = (PLBartForCausalLM,) if is_torch_available() else () test_pruning = False is_encoder_decoder = False def setUp(self): self.model_tester = PLBartStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=PLBartConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_attn_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) def test_retain_grad_hidden_states_attentions(self): # decoder cannot keep gradients return @unittest.skip("The model doesn't support left padding") # and it's not used enough to be worth fixing :) def test_left_padding_compatibility(self): pass
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/bridgetower/test_image_processing_bridgetower.py
# coding=utf-8 # Copyright 2023 The Intel Labs Team Authors, The Microsoft Research Team Authors and HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class BridgeTowerImageProcessingTester(unittest.TestCase): def __init__( self, parent, do_resize: bool = True, size: Dict[str, int] = None, size_divisor: int = 32, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, do_center_crop: bool = True, image_mean: Optional[Union[float, List[float]]] = [0.48145466, 0.4578275, 0.40821073], image_std: Optional[Union[float, List[float]]] = [0.26862954, 0.26130258, 0.27577711], do_pad: bool = True, batch_size=7, min_resolution=30, max_resolution=400, num_channels=3, ): self.parent = parent self.do_resize = do_resize self.size = size if size is not None else {"shortest_edge": 288} self.size_divisor = size_divisor self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.do_center_crop = do_center_crop self.image_mean = image_mean self.image_std = image_std self.do_pad = do_pad self.batch_size = batch_size self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def get_expected_values(self, image_inputs, batched=False): """ This function computes the expected height and width when providing images to BridgeTowerImageProcessor, assuming do_resize is set to True with a scalar size and size_divisor. """ if not batched: size = self.size["shortest_edge"] image = image_inputs[0] if isinstance(image, Image.Image): w, h = image.size else: h, w = image.shape[1], image.shape[2] scale = size / min(w, h) if h < w: newh, neww = size, scale * w else: newh, neww = scale * h, size max_size = int((1333 / 800) * size) if max(newh, neww) > max_size: scale = max_size / max(newh, neww) newh = newh * scale neww = neww * scale newh, neww = int(newh + 0.5), int(neww + 0.5) expected_height, expected_width = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width @require_torch @require_vision class BridgeTowerImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = BridgeTowerImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = BridgeTowerImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "size_divisor")) def test_batch_feature(self): pass def test_call_pil(self): # Initialize image processor image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs, batched=True) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_call_numpy(self): # Initialize image processor image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs, batched=True) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_call_pytorch(self): # Initialize image processor image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs, batched=True) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), )
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/bridgetower/test_modeling_bridgetower.py
# coding=utf-8 # Copyright 2023 The Intel Labs Team Authors, The Microsoft Research Team Authors and HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch BridgeTower model. """ import tempfile import unittest import numpy as np from transformers import ( BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, is_torch_available, is_vision_available, ) from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, ) from transformers.models.bridgetower.modeling_bridgetower import BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import BridgeTowerProcessor class BridgeTowerTextModelTester: def __init__( self, parent, hidden_act="gelu", hidden_size=64, initializer_factor=1, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=2, intermediate_size=128, tie_word_embeddings=False, output_hidden_states=False, ): self.parent = parent self.hidden_act = hidden_act self.hidden_size = hidden_size self.initializer_factor = initializer_factor self.layer_norm_eps = layer_norm_eps self.num_attention_heads = num_attention_heads self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.tie_word_embeddings = tie_word_embeddings self.vocab_size = 99 self.seq_length = 4 self.batch_size = 1 self.is_training = False self.output_hidden_states = output_hidden_states def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() return config, input_ids, attention_mask def get_config(self): return BridgeTowerTextConfig( hidden_act=self.hidden_act, hidden_size=self.hidden_size, initializer_factor=self.initializer_factor, layer_norm_eps=self.layer_norm_eps, num_attention_heads=self.num_attention_heads, num_hidden_layers=self.num_hidden_layers, intermediate_size=self.intermediate_size, tie_word_embeddings=self.tie_word_embeddings, output_hidden_states=self.output_hidden_states, vocab_size=self.vocab_size, ) class BridgeTowerImageModelTester: def __init__( self, parent, hidden_size=64, initializer_factor=1, layer_norm_eps=1e-05, num_hidden_layers=2, init_layernorm_from_vision_encoder=False, output_hidden_states=False, image_size=64, ): self.parent = parent self.hidden_size = hidden_size self.initializer_factor = initializer_factor self.layer_norm_eps = layer_norm_eps self.num_hidden_layers = num_hidden_layers self.init_layernorm_from_vision_encoder = init_layernorm_from_vision_encoder self.num_channels = 3 self.num_image_features = 17 self.batch_size = 1 self.image_size = image_size self.is_training = False self.output_hidden_states = output_hidden_states def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) pixel_mask = random_attention_mask([self.batch_size, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values, pixel_mask def get_config(self): return BridgeTowerVisionConfig( hidden_size=self.hidden_size, initializer_factor=self.initializer_factor, layer_norm_eps=self.layer_norm_eps, num_hidden_layers=self.num_hidden_layers, init_layernorm_from_vision_encoder=self.init_layernorm_from_vision_encoder, num_channels=self.num_channels, num_image_features=self.num_image_features, batch_size=self.batch_size, image_size=self.image_size, is_training=self.is_training, output_hidden_states=self.output_hidden_states, ) class BridgeTowerModelTester: def __init__( self, parent, text_kwargs=None, vision_kwargs=None, share_cross_modal_transformer_layers=True, share_link_tower_layers=False, link_tower_type="add", init_layernorm_from_vision_encoder=False, contrastive_hidden_size=512, logit_scale_init_value=2.6592, hidden_size=64, num_hidden_layers=2, num_attention_heads=4, intermediate_size=128, ): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = BridgeTowerTextModelTester(parent, **text_kwargs) self.vision_model_tester = BridgeTowerImageModelTester(parent, **vision_kwargs) self.share_cross_modal_transformer_layers = share_cross_modal_transformer_layers self.share_link_tower_layers = share_link_tower_layers self.link_tower_type = link_tower_type self.init_layernorm_from_vision_encoder = init_layernorm_from_vision_encoder self.contrastive_hidden_size = contrastive_hidden_size self.logit_scale_init_value = logit_scale_init_value self.batch_size = 1 self.expected_num_hidden_layers = 8 self.is_training = False self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values, pixel_mask = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return (config, input_ids, attention_mask, pixel_values, pixel_mask) def get_config(self): return BridgeTowerConfig.from_text_vision_configs( text_config=self.text_model_tester.get_config(), vision_config=self.vision_model_tester.get_config(), share_cross_modal_transformer_layers=self.share_cross_modal_transformer_layers, share_link_tower_layers=self.share_link_tower_layers, link_tower_type=self.link_tower_type, init_layernorm_from_vision_encoder=self.init_layernorm_from_vision_encoder, contrastive_hidden_size=self.contrastive_hidden_size, logit_scale_init_value=self.logit_scale_init_value, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, ) def create_and_check_model( self, config, input_ids, attention_mask, pixel_values, pixel_mask, ): model = BridgeTowerModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(input_ids, attention_mask=attention_mask, pixel_values=pixel_values) self.parent.assertEqual( result["text_features"].shape, (self.batch_size, self.text_model_tester.seq_length, self.text_model_tester.hidden_size), ) self.parent.assertEqual( result["image_features"].shape, (self.batch_size, self.vision_model_tester.num_image_features, self.vision_model_tester.hidden_size), ) self.parent.assertEqual( result["pooler_output"].shape, (self.batch_size, self.text_model_tester.hidden_size + self.vision_model_tester.hidden_size), ) def create_and_check_for_image_and_text_retrieval( self, config, input_ids, attention_mask, pixel_values, pixel_mask, ): bridgetower_itm_output_last_dimension = 2 model = BridgeTowerForImageAndTextRetrieval(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(input_ids, attention_mask=attention_mask, pixel_values=pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, bridgetower_itm_output_last_dimension)) def create_and_check_for_masked_language_modeling( self, config, input_ids, attention_mask, pixel_values, pixel_mask, ): model = BridgeTowerForMaskedLM(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(input_ids, attention_mask=attention_mask, pixel_values=pixel_values) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.text_model_tester.seq_length, self.text_model_tester.vocab_size), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids, attention_mask, pixel_values, pixel_mask) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "pixel_mask": pixel_mask, } return config, inputs_dict @require_torch class BridgeTowerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( BridgeTowerModel, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerForContrastiveLearning, ) if is_torch_available() else () ) pipeline_model_mapping = {"feature-extraction": BridgeTowerModel} if is_torch_available() else {} is_training = False test_headmasking = False test_pruning = False test_torchscript = False test_resize_embeddings = False has_attentions = False @unittest.skip(reason="Does not work on the tiny model as we keep hitting edge cases.") def test_cpu_offload(self): pass @unittest.skip(reason="Does not work on the tiny model as we keep hitting edge cases.") def test_disk_offload(self): pass @unittest.skip(reason="Does not work on the tiny model as we keep hitting edge cases.") def test_model_parallelism(self): pass # function to extract meaningful tensor from output per different model_class def extract_output(self, outputs, model_class): return outputs["pooler_output"] if model_class == "BridgeTowerModel" else outputs["logits"] def setUp(self): self.model_tester = BridgeTowerModelTester(self) self.config_tester = ConfigTester(self, config_class=BridgeTowerConfig, hidden_size=37, vocab_size=99) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_and_text_retrieval(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_and_text_retrieval(*config_and_inputs) def test_for_masked_language_modeling(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_language_modeling(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = BridgeTowerModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow def test_save_load_fast_init_from_base(self): # Override as it is a slow test on this model super().test_save_load_fast_init_from_base() # Override as extracting meaningful tensor from output is different for BridgeTower def test_save_load(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**input_dict) out_2 = self.extract_output(outputs, model_class.__name__) out_2 = out_2.cpu().numpy() out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname) model.to(torch_device) with torch.no_grad(): after_outputs = model(**input_dict) # Make sure we don't have nans out_1 = self.extract_output(after_outputs, model_class.__name__) out_1 = out_1.cpu().numpy() out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) # Override this as `hidden states output` is different for BridgeTower def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states_text, hidden_states_vision, hidden_states_cross = ( outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states ) expected_num_layers = self.model_tester.expected_num_hidden_layers self.assertEqual( sum((len(hidden_states_text), len(hidden_states_vision), len(hidden_states_cross))), expected_num_layers, ) seq_length = self.model_tester.text_model_tester.seq_length num_image_features = self.model_tester.vision_model_tester.num_image_features self.assertListEqual( list(hidden_states_text[0].shape[-2:]), [seq_length, self.model_tester.text_model_tester.hidden_size], ) self.assertListEqual( list(hidden_states_vision[0].shape), [num_image_features, 1, self.model_tester.vision_model_tester.hidden_size], ) self.assertListEqual( list(hidden_states_cross[0][0].shape[-2:]), [seq_length, self.model_tester.text_model_tester.hidden_size], ) self.assertListEqual( list(hidden_states_cross[0][1].shape[-2:]), [num_image_features, self.model_tester.vision_model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) # Override as `hidden states output` is different for BridgeTower def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = self.has_attentions # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0][0] hidden_states.retain_grad() if self.has_attentions: attentions = outputs.attentions[0][0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) if self.has_attentions: self.assertIsNotNone(attentions.grad) # override as the `logit_scale` parameter initilization is different for BRIDGE TOWER def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if name == "logit_scale": self.assertAlmostEqual( param.data.item(), config.logit_scale_init_value, delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip(reason="""Bridge Tower does not have input/output embeddings. So this test is not applicable.""") def test_model_common_attributes(self): pass @unittest.skip(reason="""Bridge Tower does not have input/output embeddings. Thus this test is not applicable.""") def test_inputs_embeds(self): pass # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class BridgeTowerModelIntegrationTest(unittest.TestCase): @cached_property def default_processor(self): return ( BridgeTowerProcessor.from_pretrained("BridgeTower/bridgetower-base-itm-mlm") if is_vision_available() else None ) @slow def test_image_and_text_retrieval(self): model = BridgeTowerForImageAndTextRetrieval.from_pretrained("BridgeTower/bridgetower-base-itm-mlm").to( torch_device ) model.eval() processor = self.default_processor image = prepare_img() text = "a bunch of cats laying on a tower." inputs = processor(image, text, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size([1, 2]) self.assertEqual(outputs.logits.shape, expected_shape) self.assertTrue(outputs.logits[0, 1].item() > outputs.logits[0, 0].item()) # verify loss inputs["labels"] = torch.ones(1, dtype=torch.long, device=torch_device) inputs = inputs.to(torch_device) with torch.no_grad(): outputs = model(**inputs) self.assertAlmostEqual(outputs.loss.item(), 0.5108, places=4) @slow def test_masked_language_modeling(self): model = BridgeTowerForMaskedLM.from_pretrained("BridgeTower/bridgetower-base-itm-mlm").to(torch_device) model.eval() processor = self.default_processor image = prepare_img() text = "a bunch of <mask> laying on a tower." inputs = processor(image, text, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size([1, 11, 50265]) self.assertEqual(outputs.logits.shape, expected_shape) # verify predicted word predicted_id = outputs.logits.argmax(dim=-1).squeeze(0).tolist()[4] self.assertTrue(processor.decode([predicted_id]) == " cats") # verify loss inputs["labels"] = inputs["input_ids"].clone() inputs = inputs.to(torch_device) with torch.no_grad(): outputs = model(**inputs) self.assertAlmostEqual(outputs.loss.item(), 5.7373, places=4) @slow def test_constrastive_learning(self): model = BridgeTowerForContrastiveLearning.from_pretrained("BridgeTower/bridgetower-large-itm-mlm-itc").to( torch_device ) model.eval() processor = BridgeTowerProcessor.from_pretrained("BridgeTower/bridgetower-large-itm-mlm-itc") image = prepare_img() text = "a bunch of cats laying on a tower." inputs = processor(image, text, padding=True, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs, output_hidden_states=True, return_loss=True) # verify the logits expected_shape = torch.Size([1, 3, 512]) self.assertEqual(outputs.logits.shape, expected_shape) @slow @require_torch class BridgeTowerModelTrainingTest(unittest.TestCase): all_training_supported_model_classes = ( (BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerForContrastiveLearning) if is_torch_available() else () ) def setUp(self): self.model_tester = BridgeTowerModelTester(self) self.config_tester = ConfigTester(self, config_class=BridgeTowerConfig, hidden_size=37, vocab_size=99) def _prepare_inputs_for_training(self, model_class): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if model_class == BridgeTowerForMaskedLM: inputs_dict["labels"] = inputs_dict["input_ids"] elif model_class == BridgeTowerForImageAndTextRetrieval: inputs_dict["labels"] = ids_tensor([1], 2) elif model_class == BridgeTowerForContrastiveLearning: inputs_dict["return_loss"] = True return config, inputs_dict def _get_non_used_layer_names(self, model_class): non_used_layer_names = ["text_model.pooler"] if model_class == BridgeTowerForMaskedLM: non_used_layer_names = non_used_layer_names + [ # This number `1` actually depends on the number of layers in `cross_modal_image_layers` (by minus 1) "cross_modal_image_layers.1", "cross_modal_image_pooler", "cross_modal_text_pooler", ] return non_used_layer_names def _is_layer_used(self, model_class, layer_name): non_used_layer_names = self._get_non_used_layer_names(model_class) for non_used_layer_name in non_used_layer_names: if non_used_layer_name in layer_name: return False return True def test_training(self): for model_class in self.all_training_supported_model_classes: config, inputs_dict = self._prepare_inputs_for_training(model_class) model = model_class(config) model.to(torch_device) model.train() loss = model(**inputs_dict).loss loss.backward() # verify the gradients of used layers' weight are not None for name, param in model.named_parameters(): if self._is_layer_used(model_class, name): self.assertIsNotNone(param.grad, f"Gradients should not be None - got {param.grad} for {name}")
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/bark/test_modeling_bark.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Bark model. """ import copy import inspect import tempfile import unittest from transformers import ( BarkCoarseConfig, BarkFineConfig, BarkSemanticConfig, is_torch_available, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( BarkCausalModel, BarkCoarseModel, BarkFineModel, BarkModel, BarkProcessor, BarkSemanticModel, ) class BarkSemanticModelTester: def __init__( self, parent, batch_size=2, seq_length=4, is_training=False, # for now training is not supported use_input_mask=True, use_labels=True, vocab_size=33, output_vocab_size=33, hidden_size=16, num_hidden_layers=2, num_attention_heads=2, intermediate_size=15, dropout=0.1, window_size=256, initializer_range=0.02, n_codes_total=8, # for BarkFineModel n_codes_given=1, # for BarkFineModel config_class=None, model_class=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.output_vocab_size = output_vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.window_size = window_size self.initializer_range = initializer_range self.bos_token_id = output_vocab_size - 1 self.eos_token_id = output_vocab_size - 1 self.pad_token_id = output_vocab_size - 1 self.n_codes_total = n_codes_total self.n_codes_given = n_codes_given self.is_encoder_decoder = False self.config_class = config_class self.model_class = model_class def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) inputs_dict = { "input_ids": input_ids, "head_mask": head_mask, "attention_mask": input_mask, } return config, inputs_dict def get_config(self): return self.config_class( vocab_size=self.vocab_size, output_vocab_size=self.output_vocab_size, hidden_size=self.hidden_size, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, window_size=self.window_size, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 return config def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = self.model_class(config=config).to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["logits"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "logits" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) # test no attention_mask works outputs = model(input_ids, use_cache=True) _, past_key_values = outputs.to_tuple() output_from_no_past = model(next_input_ids)["logits"] output_from_past = model(next_tokens, past_key_values=past_key_values)["logits"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) class BarkCoarseModelTester: def __init__( self, parent, batch_size=2, seq_length=4, is_training=False, # for now training is not supported use_input_mask=True, use_labels=True, vocab_size=33, output_vocab_size=33, hidden_size=16, num_hidden_layers=2, num_attention_heads=2, intermediate_size=15, dropout=0.1, window_size=256, initializer_range=0.02, n_codes_total=8, # for BarkFineModel n_codes_given=1, # for BarkFineModel config_class=None, model_class=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.output_vocab_size = output_vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.window_size = window_size self.initializer_range = initializer_range self.bos_token_id = output_vocab_size - 1 self.eos_token_id = output_vocab_size - 1 self.pad_token_id = output_vocab_size - 1 self.n_codes_total = n_codes_total self.n_codes_given = n_codes_given self.is_encoder_decoder = False self.config_class = config_class self.model_class = model_class def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) inputs_dict = { "input_ids": input_ids, "head_mask": head_mask, "attention_mask": input_mask, } return config, inputs_dict def get_config(self): return self.config_class( vocab_size=self.vocab_size, output_vocab_size=self.output_vocab_size, hidden_size=self.hidden_size, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, window_size=self.window_size, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 return config def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = self.model_class(config=config).to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["logits"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "logits" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) # test no attention_mask works outputs = model(input_ids, use_cache=True) _, past_key_values = outputs.to_tuple() output_from_no_past = model(next_input_ids)["logits"] output_from_past = model(next_tokens, past_key_values=past_key_values)["logits"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) class BarkFineModelTester: def __init__( self, parent, batch_size=2, seq_length=4, is_training=False, # for now training is not supported use_input_mask=True, use_labels=True, vocab_size=33, output_vocab_size=33, hidden_size=16, num_hidden_layers=2, num_attention_heads=2, intermediate_size=15, dropout=0.1, window_size=256, initializer_range=0.02, n_codes_total=8, # for BarkFineModel n_codes_given=1, # for BarkFineModel config_class=None, model_class=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.output_vocab_size = output_vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.window_size = window_size self.initializer_range = initializer_range self.bos_token_id = output_vocab_size - 1 self.eos_token_id = output_vocab_size - 1 self.pad_token_id = output_vocab_size - 1 self.n_codes_total = n_codes_total self.n_codes_given = n_codes_given self.is_encoder_decoder = False self.config_class = config_class self.model_class = model_class def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length, self.n_codes_total], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) # randint between self.n_codes_given - 1 and self.n_codes_total - 1 codebook_idx = ids_tensor((1,), self.n_codes_total - self.n_codes_given).item() + self.n_codes_given inputs_dict = { "codebook_idx": codebook_idx, "input_ids": input_ids, "head_mask": head_mask, "attention_mask": input_mask, } return config, inputs_dict def get_config(self): return self.config_class( vocab_size=self.vocab_size, output_vocab_size=self.output_vocab_size, hidden_size=self.hidden_size, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, window_size=self.window_size, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 return config def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = self.model_class(config=config).to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["logits"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "logits" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) # test no attention_mask works outputs = model(input_ids, use_cache=True) _, past_key_values = outputs.to_tuple() output_from_no_past = model(next_input_ids)["logits"] output_from_past = model(next_tokens, past_key_values=past_key_values)["logits"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) @require_torch class BarkSemanticModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (BarkSemanticModel,) if is_torch_available() else () all_generative_model_classes = (BarkCausalModel,) if is_torch_available() else () is_encoder_decoder = False fx_compatible = False test_missing_keys = False test_pruning = False test_model_parallel = False # no model_parallel for now test_resize_embeddings = True def setUp(self): self.model_tester = BarkSemanticModelTester( self, config_class=BarkSemanticConfig, model_class=BarkSemanticModel ) self.config_tester = ConfigTester(self, config_class=BarkSemanticConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) input_ids = inputs["input_ids"] del inputs["input_ids"] wte = model.get_input_embeddings() inputs["input_embeds"] = wte(input_ids) with torch.no_grad(): model(**inputs)[0] def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = self.all_generative_model_classes[0](config).eval().to(torch_device) if torch_device == "cuda": model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) @require_torch class BarkCoarseModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): # Same tester as BarkSemanticModelTest, except for model_class and config_class all_model_classes = (BarkCoarseModel,) if is_torch_available() else () all_generative_model_classes = (BarkCausalModel,) if is_torch_available() else () is_encoder_decoder = False fx_compatible = False test_missing_keys = False test_pruning = False test_model_parallel = False # no model_parallel for now test_resize_embeddings = True def setUp(self): self.model_tester = BarkCoarseModelTester(self, config_class=BarkCoarseConfig, model_class=BarkCoarseModel) self.config_tester = ConfigTester(self, config_class=BarkCoarseConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) input_ids = inputs["input_ids"] del inputs["input_ids"] wte = model.get_input_embeddings() inputs["input_embeds"] = wte(input_ids) with torch.no_grad(): model(**inputs)[0] def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = self.all_generative_model_classes[0](config).eval().to(torch_device) if torch_device == "cuda": model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) @require_torch class BarkFineModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (BarkFineModel,) if is_torch_available() else () is_encoder_decoder = False fx_compatible = False test_missing_keys = False test_pruning = False # no model_parallel for now test_model_parallel = False # torchscript disabled for now because forward with an int test_torchscript = False test_resize_embeddings = True def setUp(self): self.model_tester = BarkFineModelTester(self, config_class=BarkFineConfig, model_class=BarkFineModel) self.config_tester = ConfigTester(self, config_class=BarkFineConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) input_ids = inputs["input_ids"] del inputs["input_ids"] wte = model.get_input_embeddings()[inputs_dict["codebook_idx"]] inputs["input_embeds"] = wte(input_ids[:, :, inputs_dict["codebook_idx"]]) with torch.no_grad(): model(**inputs)[0] def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] # take first codebook channel model = self.all_model_classes[0](config).eval().to(torch_device) if torch_device == "cuda": model.half() # toy generation_configs semantic_generation_config = BarkSemanticGenerationConfig(semantic_vocab_size=0) coarse_generation_config = BarkCoarseGenerationConfig(n_coarse_codebooks=config.n_codes_given) fine_generation_config = BarkFineGenerationConfig( max_fine_history_length=config.block_size // 2, max_fine_input_length=config.block_size, n_fine_codebooks=config.n_codes_total, ) codebook_size = config.vocab_size - 1 model.generate( input_ids, history_prompt=None, temperature=None, semantic_generation_config=semantic_generation_config, coarse_generation_config=coarse_generation_config, fine_generation_config=fine_generation_config, codebook_size=codebook_size, ) model.generate( input_ids, history_prompt=None, temperature=0.7, semantic_generation_config=semantic_generation_config, coarse_generation_config=coarse_generation_config, fine_generation_config=fine_generation_config, codebook_size=codebook_size, ) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["codebook_idx", "input_ids"] self.assertListEqual(arg_names[:2], expected_arg_names) def test_model_common_attributes(self): # one embedding layer per codebook config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings()[0], (torch.nn.Embedding)) model.set_input_embeddings( torch.nn.ModuleList([torch.nn.Embedding(10, 10) for _ in range(config.n_codes_total)]) ) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x[0], torch.nn.Linear)) def test_resize_tokens_embeddings(self): # resizing tokens_embeddings of a ModuleList original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.vocab_size # Retrieve the embeddings and clone theme model_embed_list = model.resize_token_embeddings(model_vocab_size) cloned_embeddings_list = [model_embed.weight.clone() for model_embed in model_embed_list] # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_embed_list = model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) # Check that it actually resizes the embeddings matrix for each codebook for model_embed, cloned_embeddings in zip(model_embed_list, cloned_embeddings_list): self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model_embed_list = model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) for model_embed, cloned_embeddings in zip(model_embed_list, cloned_embeddings_list): self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that adding and removing tokens has not modified the first part of the embedding matrix. # only check for the first embedding matrix models_equal = True for p1, p2 in zip(cloned_embeddings_list[0], model_embed_list[0].weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_resize_embeddings_untied(self): # resizing tokens_embeddings of a ModuleList original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return original_config.tie_word_embeddings = False for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config).to(torch_device) # if no output embeddings -> leave test if model.get_output_embeddings() is None: continue # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_vocab_size = config.vocab_size model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) output_embeds_list = model.get_output_embeddings() for output_embeds in output_embeds_list: self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix output_embeds_list = model.get_output_embeddings() for output_embeds in output_embeds_list: self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) @require_torch class BarkModelIntegrationTests(unittest.TestCase): @cached_property def model(self): return BarkModel.from_pretrained("ylacombe/bark-large").to(torch_device) @cached_property def processor(self): return BarkProcessor.from_pretrained("ylacombe/bark-large") @cached_property def inputs(self): input_ids = self.processor("In the light of the moon, a little egg lay on a leaf", voice_preset="en_speaker_6") input_ids = input_ids.to(torch_device) return input_ids @cached_property def semantic_generation_config(self): semantic_generation_config = BarkSemanticGenerationConfig(**self.model.generation_config.semantic_config) return semantic_generation_config @cached_property def coarse_generation_config(self): coarse_generation_config = BarkCoarseGenerationConfig(**self.model.generation_config.coarse_acoustics_config) return coarse_generation_config @cached_property def fine_generation_config(self): fine_generation_config = BarkFineGenerationConfig(**self.model.generation_config.fine_acoustics_config) return fine_generation_config @slow def test_generate_semantic(self): input_ids = self.inputs # fmt: off # check first ids expected_output_ids = [7363, 321, 41, 1461, 6915, 952, 326, 41, 41, 927,] # fmt: on # greedy decoding with torch.no_grad(): output_ids = self.model.semantic.generate( **input_ids, do_sample=False, semantic_generation_config=self.semantic_generation_config, ) self.assertListEqual(output_ids[0, : len(expected_output_ids)].tolist(), expected_output_ids) @slow def test_generate_coarse(self): input_ids = self.inputs history_prompt = input_ids["history_prompt"] # fmt: off # check first ids expected_output_ids = [11018, 11391, 10651, 11418, 10857, 11620, 10642, 11366, 10312, 11528, 10531, 11516, 10474, 11051, 10524, 11051, ] # fmt: on with torch.no_grad(): output_ids = self.model.semantic.generate( **input_ids, do_sample=False, semantic_generation_config=self.semantic_generation_config, ) output_ids = self.model.coarse_acoustics.generate( output_ids, history_prompt=history_prompt, do_sample=False, semantic_generation_config=self.semantic_generation_config, coarse_generation_config=self.coarse_generation_config, codebook_size=self.model.generation_config.codebook_size, ) self.assertListEqual(output_ids[0, : len(expected_output_ids)].tolist(), expected_output_ids) @slow def test_generate_fine(self): input_ids = self.inputs history_prompt = input_ids["history_prompt"] # fmt: off expected_output_ids = [ [1018, 651, 857, 642, 312, 531, 474, 524, 524, 776,], [367, 394, 596, 342, 504, 492, 27, 27, 822, 822,], [961, 955, 221, 955, 955, 686, 939, 939, 479, 176,], [638, 365, 218, 944, 853, 363, 639, 22, 884, 456,], [302, 912, 524, 38, 174, 209, 879, 23, 910, 227,], [440, 673, 861, 666, 372, 558, 49, 172, 232, 342,], [244, 358, 123, 356, 586, 520, 499, 877, 542, 637,], [806, 685, 905, 848, 803, 810, 921, 208, 625, 203,], ] # fmt: on with torch.no_grad(): output_ids = self.model.semantic.generate( **input_ids, do_sample=False, semantic_generation_config=self.semantic_generation_config, ) output_ids = self.model.coarse_acoustics.generate( output_ids, history_prompt=history_prompt, do_sample=False, semantic_generation_config=self.semantic_generation_config, coarse_generation_config=self.coarse_generation_config, codebook_size=self.model.generation_config.codebook_size, ) # greedy decoding output_ids = self.model.fine_acoustics.generate( output_ids, history_prompt=history_prompt, temperature=None, semantic_generation_config=self.semantic_generation_config, coarse_generation_config=self.coarse_generation_config, fine_generation_config=self.fine_generation_config, codebook_size=self.model.generation_config.codebook_size, ) self.assertListEqual(output_ids[0, :, : len(expected_output_ids[0])].tolist(), expected_output_ids) @slow def test_generate_end_to_end(self): input_ids = self.inputs with torch.no_grad(): self.model.generate(**input_ids) self.model.generate(**{key: val for (key, val) in input_ids.items() if key != "history_prompt"}) @slow def test_generate_end_to_end_with_args(self): input_ids = self.inputs with torch.no_grad(): self.model.generate(**input_ids, do_sample=True, temperature=0.6, penalty_alpha=0.6) self.model.generate(**input_ids, do_sample=True, temperature=0.6, num_beams=4) @slow def test_generate_end_to_end_with_sub_models_args(self): input_ids = self.inputs with torch.no_grad(): self.model.generate(**input_ids, do_sample=False, coarse_do_sample=True, coarse_temperature=0.7) self.model.generate( **input_ids, do_sample=False, coarse_do_sample=True, coarse_temperature=0.7, fine_temperature=0.3 ) self.model.generate( **input_ids, do_sample=True, temperature=0.6, penalty_alpha=0.6, semantic_temperature=0.9, coarse_temperature=0.2, fine_temperature=0.1, )
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/bark/test_processor_bark.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class BarkProcessorTest(unittest.TestCase): def setUp(self): self.checkpoint = "ylacombe/bark-small" self.tmpdirname = tempfile.mkdtemp() self.voice_preset = "en_speaker_1" self.input_string = "This is a test string" self.speaker_embeddings_dict_path = "speaker_embeddings_path.json" self.speaker_embeddings_directory = "speaker_embeddings" def get_tokenizer(self, **kwargs): return AutoTokenizer.from_pretrained(self.checkpoint, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() processor = BarkProcessor(tokenizer=tokenizer) processor.save_pretrained(self.tmpdirname) processor = BarkProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) @slow def test_save_load_pretrained_additional_features(self): processor = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint, speaker_embeddings_dict_path=self.speaker_embeddings_dict_path, ) processor.save_pretrained( self.tmpdirname, speaker_embeddings_dict_path=self.speaker_embeddings_dict_path, speaker_embeddings_directory=self.speaker_embeddings_directory, ) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") processor = BarkProcessor.from_pretrained( self.tmpdirname, self.speaker_embeddings_dict_path, bos_token="(BOS)", eos_token="(EOS)", ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) def test_speaker_embeddings(self): processor = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint, speaker_embeddings_dict_path=self.speaker_embeddings_dict_path, ) seq_len = 35 nb_codebooks_coarse = 2 nb_codebooks_total = 8 voice_preset = { "semantic_prompt": np.ones(seq_len), "coarse_prompt": np.ones((nb_codebooks_coarse, seq_len)), "fine_prompt": np.ones((nb_codebooks_total, seq_len)), } # test providing already loaded voice_preset inputs = processor(text=self.input_string, voice_preset=voice_preset) processed_voice_preset = inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist(), processed_voice_preset.get(key, np.array([])).tolist()) # test loading voice preset from npz file tmpfilename = os.path.join(self.tmpdirname, "file.npz") np.savez(tmpfilename, **voice_preset) inputs = processor(text=self.input_string, voice_preset=tmpfilename) processed_voice_preset = inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist(), processed_voice_preset.get(key, np.array([])).tolist()) # test loading voice preset from the hub inputs = processor(text=self.input_string, voice_preset=self.voice_preset) def test_tokenizer(self): tokenizer = self.get_tokenizer() processor = BarkProcessor(tokenizer=tokenizer) encoded_processor = processor(text=self.input_string) encoded_tok = tokenizer( self.input_string, padding="max_length", max_length=256, add_special_tokens=False, return_attention_mask=True, return_token_type_ids=False, ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key].squeeze().tolist())
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/speech_encoder_decoder/test_modeling_speech_encoder_decoder.py
# coding=utf-8 # Copyright 2021 HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_modeling_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_bert import BertModelTester from ..speech_to_text.test_modeling_speech_to_text import Speech2TextModelTester from ..speech_to_text_2.test_modeling_speech_to_text_2 import Speech2Text2StandaloneDecoderModelTester from ..wav2vec2.test_modeling_wav2vec2 import Wav2Vec2ModelTester if is_torch_available(): import numpy as np import torch from transformers import ( BertLMHeadModel, Speech2Text2ForCausalLM, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, Wav2Vec2Model, ) from transformers.modeling_outputs import BaseModelOutput from transformers.models.speech_to_text.modeling_speech_to_text import Speech2TextEncoder @require_torch class EncoderDecoderMixin: def get_encoder_decoder_model(self, config, decoder_config): pass def prepare_config_and_inputs(self): pass def get_pretrained_model_and_inputs(self): pass def check_encoder_decoder_model_from_pretrained_configs( self, config, attention_mask, decoder_config, decoder_input_ids, decoder_attention_mask, input_values=None, input_features=None, **kwargs, ): encoder_decoder_config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) self.assertTrue(encoder_decoder_config.decoder.is_decoder) enc_dec_model = SpeechEncoderDecoderModel(encoder_decoder_config) enc_dec_model.to(torch_device) enc_dec_model.eval() self.assertTrue(enc_dec_model.config.is_encoder_decoder) self.assertFalse(enc_dec_model.config.tie_word_embeddings) outputs_encoder_decoder = enc_dec_model( input_values=input_values, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) def check_encoder_decoder_model( self, config, attention_mask, decoder_config, decoder_input_ids, decoder_attention_mask, input_values=None, input_features=None, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) self.assertTrue(enc_dec_model.config.decoder.is_decoder) self.assertTrue(enc_dec_model.config.decoder.add_cross_attention) self.assertTrue(enc_dec_model.config.is_encoder_decoder) enc_dec_model.to(torch_device) outputs_encoder_decoder = enc_dec_model( input_values=input_values, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, output_hidden_states=True, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) encoder_outputs = BaseModelOutput(last_hidden_state=outputs_encoder_decoder.encoder_hidden_states[-1]) outputs_encoder_decoder = enc_dec_model( encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) def check_encoder_decoder_model_with_inputs( self, config, attention_mask, decoder_config, decoder_input_ids, decoder_attention_mask, input_values=None, input_features=None, **kwargs, ): inputs = input_values if input_features is None else input_features encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model.to(torch_device) outputs_encoder_decoder = enc_dec_model( inputs, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, output_hidden_states=True, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) outputs_encoder_decoder_kwarg = enc_dec_model( inputs=inputs, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, output_hidden_states=True, ) self.assertEqual( outputs_encoder_decoder_kwarg["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) def check_encoder_decoder_model_from_pretrained( self, config, attention_mask, decoder_config, decoder_input_ids, decoder_attention_mask, return_dict, input_values=None, input_features=None, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model, "return_dict": return_dict} enc_dec_model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs) enc_dec_model.to(torch_device) outputs_encoder_decoder = enc_dec_model( input_values=input_values, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, output_hidden_states=True, return_dict=True, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) def check_save_and_load( self, config, attention_mask, decoder_config, decoder_input_ids, decoder_attention_mask, input_values=None, input_features=None, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model.to(torch_device) enc_dec_model.eval() with torch.no_grad(): outputs = enc_dec_model( input_values=input_values, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) out_2 = outputs[0].cpu().numpy() out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmpdirname: enc_dec_model.save_pretrained(tmpdirname) enc_dec_model = SpeechEncoderDecoderModel.from_pretrained(tmpdirname) enc_dec_model.to(torch_device) after_outputs = enc_dec_model( input_values=input_values, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) out_1 = after_outputs[0].cpu().numpy() out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) def check_save_and_load_encoder_decoder_model( self, config, attention_mask, decoder_config, decoder_input_ids, decoder_attention_mask, input_values=None, input_features=None, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model.to(torch_device) enc_dec_model.eval() with torch.no_grad(): outputs = enc_dec_model( input_values=input_values, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) out_2 = outputs[0].cpu().numpy() out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as encoder_tmp_dirname, tempfile.TemporaryDirectory() as decoder_tmp_dirname: enc_dec_model.encoder.save_pretrained(encoder_tmp_dirname) enc_dec_model.decoder.save_pretrained(decoder_tmp_dirname) SpeechEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=encoder_tmp_dirname, decoder_pretrained_model_name_or_path=decoder_tmp_dirname, ) after_outputs = enc_dec_model( input_values=input_values, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) out_1 = after_outputs[0].cpu().numpy() out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) def check_encoder_decoder_model_output_attentions( self, config, attention_mask, decoder_config, decoder_input_ids, decoder_attention_mask, labels=None, input_values=None, input_features=None, **kwargs, ): # make the decoder inputs a different shape from the encoder inputs to harden the test decoder_input_ids = decoder_input_ids[:, :-1] decoder_attention_mask = decoder_attention_mask[:, :-1] encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model.to(torch_device) outputs_encoder_decoder = enc_dec_model( input_values=input_values, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, output_attentions=True, ) inputs = input_values if input_features is None else input_features encoder_attentions = outputs_encoder_decoder["encoder_attentions"] self.assertEqual(len(encoder_attentions), config.num_hidden_layers) seq_len = enc_dec_model.encoder._get_feat_extract_output_lengths(inputs.shape[1]) self.assertEqual(encoder_attentions[0].shape[-3:], (config.num_attention_heads, seq_len, seq_len)) decoder_attentions = outputs_encoder_decoder["decoder_attentions"] num_decoder_layers = ( decoder_config.num_decoder_layers if hasattr(decoder_config, "num_decoder_layers") else decoder_config.num_hidden_layers ) self.assertEqual(len(decoder_attentions), num_decoder_layers) self.assertEqual( decoder_attentions[0].shape[-3:], (decoder_config.num_attention_heads, decoder_input_ids.shape[-1], decoder_input_ids.shape[-1]), ) cross_attentions = outputs_encoder_decoder["cross_attentions"] self.assertEqual(len(cross_attentions), num_decoder_layers) cross_attention_input_seq_len = decoder_input_ids.shape[-1] self.assertEqual( cross_attentions[0].shape[-3:], (decoder_config.num_attention_heads, cross_attention_input_seq_len, seq_len), ) def check_encoder_decoder_model_generate( self, config, decoder_config, input_values=None, input_features=None, **kwargs ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model.to(torch_device) # make sure EOS token is set to None to prevent early stopping of generation if hasattr(enc_dec_model.config, "eos_token_id"): enc_dec_model.config.eos_token_id = None if hasattr(enc_dec_model.config, "decoder") and hasattr(enc_dec_model.config.decoder, "eos_token_id"): enc_dec_model.config.decoder.eos_token_id = None inputs = input_values if input_features is None else input_features # Bert does not have a bos token id, so use pad_token_id instead generated_output = enc_dec_model.generate( inputs, decoder_start_token_id=enc_dec_model.config.decoder.pad_token_id ) self.assertEqual(generated_output.shape, (inputs.shape[0],) + (decoder_config.max_length,)) def test_encoder_decoder_model(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model(**input_ids_dict) def test_encoder_decoder_model_with_inputs(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_with_inputs(**input_ids_dict) def test_encoder_decoder_model_from_pretrained_configs(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained_configs(**input_ids_dict) def test_encoder_decoder_model_from_pretrained(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained(**input_ids_dict, return_dict=False) def test_encoder_decoder_model_from_pretrained_return_dict(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained(**input_ids_dict, return_dict=True) def test_save_and_load_from_pretrained(self): input_ids_dict = self.prepare_config_and_inputs() self.check_save_and_load(**input_ids_dict) def test_save_and_load_from_encoder_decoder_pretrained(self): input_ids_dict = self.prepare_config_and_inputs() self.check_save_and_load_encoder_decoder_model(**input_ids_dict) def test_encoder_decoder_model_output_attentions(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_output_attentions(**input_ids_dict) def test_encoder_decoder_model_generate(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_generate(**input_ids_dict) def test_training_gradient_checkpointing(self): inputs_dict = self.prepare_config_and_inputs() encoder_model, decoder_model = self.get_encoder_decoder_model( inputs_dict["config"], inputs_dict["decoder_config"] ) model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) model.to(torch_device) model.train() model.gradient_checkpointing_enable() model.config.decoder_start_token_id = 0 model.config.pad_token_id = 0 model_inputs = { "attention_mask": inputs_dict["attention_mask"], "labels": inputs_dict["labels"], "decoder_input_ids": inputs_dict["decoder_input_ids"], } inputs = inputs_dict["input_features"] if "input_features" in inputs_dict else inputs_dict["input_values"] loss = model(inputs, **model_inputs).loss loss.backward() @slow def test_real_model_save_load_from_pretrained(self): model_2, inputs = self.get_pretrained_model_and_inputs() model_2.to(torch_device) with torch.no_grad(): outputs = model_2(**inputs) out_2 = outputs[0].cpu().numpy() out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmp_dirname: model_2.save_pretrained(tmp_dirname) model_1 = SpeechEncoderDecoderModel.from_pretrained(tmp_dirname) model_1.to(torch_device) after_outputs = model_1(**inputs) out_1 = after_outputs[0].cpu().numpy() out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) @require_torch class Wav2Vec2BertModelTest(EncoderDecoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained( "facebook/wav2vec2-base-960h", "bert-base-cased" ) batch_size = 13 input_values = floats_tensor([batch_size, 512], scale=1.0) attention_mask = random_attention_mask([batch_size, 512]) decoder_input_ids = ids_tensor([batch_size, 4], model.decoder.config.vocab_size) decoder_attention_mask = random_attention_mask([batch_size, 4]) inputs = { "input_values": input_values, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } return model, inputs def get_encoder_decoder_model(self, config, decoder_config): encoder_model = Wav2Vec2Model(config).eval() decoder_model = BertLMHeadModel(decoder_config).eval() return encoder_model, decoder_model def prepare_config_and_inputs(self): bert_model_tester = BertModelTester(self) wav2vec2_model_tester = Wav2Vec2ModelTester(self) encoder_config_and_inputs = wav2vec2_model_tester.prepare_config_and_inputs() decoder_config_and_inputs = bert_model_tester.prepare_config_and_inputs_for_decoder() ( config, input_values, input_mask, ) = encoder_config_and_inputs ( decoder_config, decoder_input_ids, decoder_token_type_ids, decoder_input_mask, decoder_sequence_labels, decoder_token_labels, decoder_choice_labels, encoder_attention_mask, _, ) = decoder_config_and_inputs # make sure that cross attention layers are added decoder_config.add_cross_attention = True return { "config": config, "input_values": input_values, "attention_mask": input_mask, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_token_type_ids": decoder_token_type_ids, "decoder_attention_mask": decoder_input_mask, "decoder_sequence_labels": decoder_sequence_labels, "decoder_token_labels": decoder_token_labels, "decoder_choice_labels": decoder_choice_labels, "labels": decoder_token_labels, } @require_torch class Speech2TextBertModelTest(EncoderDecoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained( "facebook/s2t-small-librispeech-asr", "bert-base-cased" ) batch_size = 13 input_features = floats_tensor([batch_size, 7, 80], scale=1.0) attention_mask = random_attention_mask([batch_size, 7]) decoder_input_ids = ids_tensor([batch_size, 4], model.decoder.config.vocab_size) decoder_attention_mask = random_attention_mask([batch_size, 4]) inputs = { "input_features": input_features, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } return model, inputs def get_encoder_decoder_model(self, config, decoder_config): encoder_model = Speech2TextEncoder(config).eval() decoder_model = BertLMHeadModel(decoder_config).eval() return encoder_model, decoder_model def prepare_config_and_inputs(self): bert_model_tester = BertModelTester(self) speech2text_model_tester = Speech2TextModelTester(self) encoder_config_and_inputs = speech2text_model_tester.prepare_config_and_inputs() decoder_config_and_inputs = bert_model_tester.prepare_config_and_inputs_for_decoder() config, inputs = encoder_config_and_inputs input_features = inputs["input_features"] input_mask = inputs["attention_mask"] ( decoder_config, decoder_input_ids, decoder_token_type_ids, decoder_input_mask, decoder_sequence_labels, decoder_token_labels, decoder_choice_labels, encoder_attention_mask, _, ) = decoder_config_and_inputs # make sure that cross attention layers are added decoder_config.add_cross_attention = True return { "config": config, "input_features": input_features, "attention_mask": input_mask, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_token_type_ids": decoder_token_type_ids, "decoder_attention_mask": decoder_input_mask, "decoder_sequence_labels": decoder_sequence_labels, "decoder_token_labels": decoder_token_labels, "decoder_choice_labels": decoder_choice_labels, "labels": decoder_token_labels, } # can't save full model for now because Speech2TextModel != Speech2TextEncoder def test_encoder_decoder_model_from_pretrained_configs(self): pass # can't save full model for now because Speech2TextModel != Speech2TextEncoder def test_save_and_load_from_pretrained(self): pass # all published pretrained models are Speech2TextModel != Speech2TextEncoder def test_real_model_save_load_from_pretrained(self): pass @require_torch class Wav2Vec2Speech2Text2(EncoderDecoderMixin, unittest.TestCase): def get_encoder_decoder_model(self, config, decoder_config): encoder_model = Wav2Vec2Model(config).eval() decoder_model = Speech2Text2ForCausalLM(decoder_config).eval() return encoder_model, decoder_model def prepare_config_and_inputs(self): model_tester_encoder = Wav2Vec2ModelTester(self, batch_size=13) model_tester_decoder = Speech2Text2StandaloneDecoderModelTester( self, batch_size=13, d_model=32, max_position_embeddings=512 ) encoder_config_and_inputs = model_tester_encoder.prepare_config_and_inputs() decoder_config_and_inputs = model_tester_decoder.prepare_config_and_inputs() ( config, input_values, input_mask, ) = encoder_config_and_inputs (decoder_config, decoder_input_ids, decoder_attention_mask, _) = decoder_config_and_inputs # make sure that cross attention layers are added decoder_config.add_cross_attention = True # disable cache for now decoder_config.use_cache = False return { "config": config, "input_values": input_values, "attention_mask": input_mask, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "labels": decoder_input_ids, } # there are no published pretrained Speech2Text2ForCausalLM for now def test_real_model_save_load_from_pretrained(self): pass
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/speech_encoder_decoder/test_modeling_flax_speech_encoder_decoder.py
# coding=utf-8 # Copyright 2022 HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import numpy as np from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, slow, torch_device from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bart.test_modeling_flax_bart import FlaxBartStandaloneDecoderModelTester from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..gpt2.test_modeling_flax_gpt2 import FlaxGPT2ModelTester from ..wav2vec2.test_modeling_flax_wav2vec2 import FlaxWav2Vec2ModelTester if is_flax_available(): import jax import jax.numpy as jnp from flax.training.common_utils import onehot from flax.traverse_util import flatten_dict from transformers import ( FlaxBartForCausalLM, FlaxBertForCausalLM, FlaxGPT2LMHeadModel, FlaxSpeechEncoderDecoderModel, FlaxWav2Vec2Model, SpeechEncoderDecoderConfig, ) from transformers.modeling_flax_outputs import FlaxBaseModelOutput from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import SpeechEncoderDecoderModel @require_flax class FlaxEncoderDecoderMixin: def get_encoder_decoder_model(self, config, decoder_config): raise NotImplementedError def prepare_config_and_inputs(self): raise NotImplementedError def get_pretrained_model(self): raise NotImplementedError def check_encoder_decoder_model_from_pretrained_configs( self, config, inputs, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): encoder_decoder_config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) self.assertTrue(encoder_decoder_config.decoder.is_decoder) enc_dec_model = FlaxSpeechEncoderDecoderModel(encoder_decoder_config) self.assertTrue(enc_dec_model.config.is_encoder_decoder) self.assertFalse(enc_dec_model.config.tie_word_embeddings) outputs_encoder_decoder = enc_dec_model( inputs=inputs, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) def check_encoder_decoder_model( self, config, inputs, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) self.assertTrue(enc_dec_model.config.decoder.is_decoder) self.assertTrue(enc_dec_model.config.decoder.add_cross_attention) self.assertTrue(enc_dec_model.config.is_encoder_decoder) outputs_encoder_decoder = enc_dec_model( inputs=inputs, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) encoder_outputs = FlaxBaseModelOutput(last_hidden_state=outputs_encoder_decoder.encoder_hidden_states[-1]) outputs_encoder_decoder = enc_dec_model( attention_mask, decoder_input_ids, decoder_attention_mask, encoder_outputs=encoder_outputs ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) def check_encoder_decoder_model_from_pretrained( self, config, inputs, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, return_dict, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model, "return_dict": return_dict} enc_dec_model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs) outputs_encoder_decoder = enc_dec_model( inputs=inputs, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_hidden_states=True, return_dict=True, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) def check_save_and_load( self, config, inputs, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model} enc_dec_model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs) outputs = enc_dec_model( inputs=inputs, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) out_2 = np.array(outputs[0]) out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmpdirname: enc_dec_model.save_pretrained(tmpdirname) FlaxSpeechEncoderDecoderModel.from_pretrained(tmpdirname) after_outputs = enc_dec_model( inputs=inputs, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) out_1 = np.array(after_outputs[0]) out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 4e-2) def check_encoder_decoder_model_from_encoder_decoder_pretrained( self, config, inputs, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) # assert that loading encoder and decoder models from configs has been correctly executed self.assertEqual(config.add_adapter, encoder_model.config.add_adapter) self.assertEqual(decoder_config.use_cache, decoder_model.config.use_cache) with tempfile.TemporaryDirectory() as enc_tmpdir: with tempfile.TemporaryDirectory() as dec_tmpdir: encoder_model.save_pretrained(enc_tmpdir) decoder_model.save_pretrained(dec_tmpdir) # load a model from pretrained encoder and decoder checkpoints, setting one encoder and one decoder kwarg opposite to that specified in their respective configs enc_dec_model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=enc_tmpdir, decoder_pretrained_model_name_or_path=dec_tmpdir, encoder_add_adapter=not config.add_adapter, decoder_use_cache=not decoder_config.use_cache, ) # assert that setting encoder and decoder kwargs opposite to those in the configs has correctly been applied self.assertNotEqual(config.add_adapter, enc_dec_model.config.encoder.add_adapter) self.assertNotEqual(decoder_config.use_cache, enc_dec_model.config.decoder.use_cache) outputs_encoder_decoder = enc_dec_model( inputs=inputs, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_hidden_states=True, return_dict=True, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) def check_encoder_decoder_model_output_attentions( self, config, inputs, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): # make the decoder inputs a different shape from the encoder inputs to harden the test decoder_input_ids = decoder_input_ids[:, :-1] decoder_attention_mask = decoder_attention_mask[:, :-1] encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model} enc_dec_model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs) outputs_encoder_decoder = enc_dec_model( inputs=inputs, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_attentions=True, ) encoder_attentions = outputs_encoder_decoder["encoder_attentions"] self.assertEqual(len(encoder_attentions), config.num_hidden_layers) seq_len = enc_dec_model._get_feat_extract_output_lengths(inputs.shape[1]) self.assertEqual(encoder_attentions[0].shape[-3:], (config.num_attention_heads, seq_len, seq_len)) decoder_attentions = outputs_encoder_decoder["decoder_attentions"] num_decoder_layers = ( decoder_config.num_decoder_layers if hasattr(decoder_config, "num_decoder_layers") else decoder_config.num_hidden_layers ) self.assertEqual(len(decoder_attentions), num_decoder_layers) self.assertEqual( decoder_attentions[0].shape[-3:], (decoder_config.num_attention_heads, decoder_input_ids.shape[-1], decoder_input_ids.shape[-1]), ) cross_attentions = outputs_encoder_decoder["cross_attentions"] self.assertEqual(len(cross_attentions), num_decoder_layers) cross_attention_input_seq_len = decoder_input_ids.shape[-1] self.assertEqual( cross_attentions[0].shape[-3:], (decoder_config.num_attention_heads, cross_attention_input_seq_len, seq_len), ) def check_encoder_decoder_model_generate(self, inputs, config, decoder_config, **kwargs): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model} enc_dec_model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs) pad_token_id = enc_dec_model.config.decoder.pad_token_id eos_token_id = enc_dec_model.config.decoder.eos_token_id decoder_start_token_id = enc_dec_model.config.decoder.decoder_start_token_id # Copied from generation.utils (GPT2 doesn't have `pad_token_id`) if pad_token_id is None and eos_token_id is not None: pad_token_id = eos_token_id if decoder_start_token_id is None: decoder_start_token_id = enc_dec_model.config.decoder.bos_token_id # Bert does not have a bos token id, so use pad_token_id instead # Copied from `test_modeling_encoder_decoder.py` if decoder_start_token_id is None: decoder_start_token_id = pad_token_id generated_output = enc_dec_model.generate( inputs, pad_token_id=pad_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, ) generated_sequences = generated_output.sequences self.assertEqual(generated_sequences.shape, (inputs.shape[0],) + (decoder_config.max_length,)) def check_freeze_feature_encoder( self, config, inputs, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): encoder_decoder_config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) enc_dec_model = FlaxSpeechEncoderDecoderModel(encoder_decoder_config) params = enc_dec_model.params def cross_entropy(logits, labels): return -jnp.sum(labels * jax.nn.log_softmax(logits, axis=-1), axis=-1) # define a dummy loss function for computing the loss over a forward pass def compute_loss( params, inputs, attention_mask, decoder_input_ids, freeze_feature_encoder: bool = False, ): outputs_enc_dec = enc_dec_model( inputs=inputs, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, freeze_feature_encoder=freeze_feature_encoder, params=params, ) logits = outputs_enc_dec.logits vocab_size = logits.shape[-1] loss = cross_entropy(logits, onehot(labels=decoder_input_ids, num_classes=vocab_size)).sum() return (loss, logits) # transform the loss function to get the gradients grad_fn = jax.value_and_grad(compute_loss, has_aux=True) # compute the loss, logits, and gradients for the unfrozen model (loss, logits), grads = grad_fn( params, inputs, attention_mask, decoder_input_ids, freeze_feature_encoder=False ) # compare to the loss, logits and gradients for the frozen model (loss_frozen, logits_frozen), grads_frozen = grad_fn( params, inputs, attention_mask, decoder_input_ids, freeze_feature_encoder=True ) # ensure that the logits and losses remain precisely equal self.assertTrue((logits == logits_frozen).all()) self.assertEqual(loss, loss_frozen) grads = flatten_dict(grads) grads_frozen = flatten_dict(grads_frozen) # ensure that the dicts of gradients contain the same keys self.assertEqual(grads.keys(), grads_frozen.keys()) # ensure that the gradients of the feature extractor layers are precisely zero when frozen and contain non-zero entries when unfrozen feature_extractor_grads = tuple(grads[k] for k in grads if "feature_extractor" in k) feature_extractor_grads_frozen = tuple(grads_frozen[k] for k in grads_frozen if "feature_extractor" in k) for feature_extractor_grad, feature_extractor_grad_frozen in zip( feature_extractor_grads, feature_extractor_grads_frozen ): self.assertTrue((feature_extractor_grad_frozen == 0.0).all()) self.assertTrue((feature_extractor_grad > 0.0).any()) # ensure that the gradients of all unfrozen layers remain precisely equal, i.e. all layers excluding the frozen 'feature_extractor' grads = tuple(grads[k] for k in grads if "feature_extractor" not in k) grads_frozen = tuple(grads_frozen[k] for k in grads_frozen if "feature_extractor" not in k) for grad, grad_frozen in zip(grads, grads_frozen): self.assertTrue((grad == grad_frozen).all()) def check_pt_flax_equivalence(self, pt_model, fx_model, inputs_dict): pt_model.to(torch_device) pt_model.eval() # prepare inputs flax_inputs = inputs_dict pt_inputs = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()} with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs, pt_outputs): self.assert_almost_equals(fx_output, pt_output.numpy(), 1e-5) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = FlaxSpeechEncoderDecoderModel.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**inputs_dict).to_tuple() self.assertEqual(len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output_loaded, pt_output in zip(fx_outputs_loaded, pt_outputs): self.assert_almost_equals(fx_output_loaded, pt_output.numpy(), 1e-5) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = SpeechEncoderDecoderModel.from_pretrained(tmpdirname, from_flax=True) pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output_loaded in zip(fx_outputs, pt_outputs_loaded): self.assert_almost_equals(fx_output, pt_output_loaded.numpy(), 1e-5) def check_equivalence_pt_to_flax(self, config, decoder_config, inputs_dict): encoder_decoder_config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) pt_model = SpeechEncoderDecoderModel(encoder_decoder_config) fx_model = FlaxSpeechEncoderDecoderModel(encoder_decoder_config) fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state self.check_pt_flax_equivalence(pt_model, fx_model, inputs_dict) def check_equivalence_flax_to_pt(self, config, decoder_config, inputs_dict): encoder_decoder_config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) pt_model = SpeechEncoderDecoderModel(encoder_decoder_config) fx_model = FlaxSpeechEncoderDecoderModel(encoder_decoder_config) pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) self.check_pt_flax_equivalence(pt_model, fx_model, inputs_dict) def test_encoder_decoder_model_from_pretrained_configs(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained_configs(**input_ids_dict) def test_encoder_decoder_model_from_pretrained(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained(**input_ids_dict, return_dict=False) def test_encoder_decoder_model_from_pretrained_return_dict(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained(**input_ids_dict, return_dict=True) def test_save_and_load_from_pretrained(self): input_ids_dict = self.prepare_config_and_inputs() self.check_save_and_load(**input_ids_dict) def test_encoder_decoder_model_from_encoder_decoder_pretrained(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_encoder_decoder_pretrained(**input_ids_dict) def test_encoder_decoder_model_output_attentions(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_output_attentions(**input_ids_dict) def test_freeze_feature_encoder(self): input_ids_dict = self.prepare_config_and_inputs() self.check_freeze_feature_encoder(**input_ids_dict) def test_encoder_decoder_model_generate(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_generate(**input_ids_dict) def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float): diff = np.abs((a - b)).max() self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).") @is_pt_flax_cross_test def test_pt_flax_equivalence(self): config_inputs_dict = self.prepare_config_and_inputs() config = config_inputs_dict.pop("config") decoder_config = config_inputs_dict.pop("decoder_config") inputs_dict = config_inputs_dict # `encoder_hidden_states` is not used in model call/forward del inputs_dict["encoder_hidden_states"] # Avoid the case where a sequence has no place to attend (after combined with the causal attention mask) batch_size = inputs_dict["decoder_attention_mask"].shape[0] inputs_dict["decoder_attention_mask"] = np.concatenate( [np.ones(shape=(batch_size, 1)), inputs_dict["decoder_attention_mask"][:, 1:]], axis=1 ) # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. decoder_config.use_cache = False self.assertTrue(decoder_config.cross_attention_hidden_size is None) # check without `enc_to_dec_proj` projection decoder_config.hidden_size = config.hidden_size self.assertTrue(config.hidden_size == decoder_config.hidden_size) self.check_equivalence_pt_to_flax(config, decoder_config, inputs_dict) self.check_equivalence_flax_to_pt(config, decoder_config, inputs_dict) # check `enc_to_dec_proj` work as expected decoder_config.hidden_size = decoder_config.hidden_size * 2 self.assertTrue(config.hidden_size != decoder_config.hidden_size) self.check_equivalence_pt_to_flax(config, decoder_config, inputs_dict) self.check_equivalence_flax_to_pt(config, decoder_config, inputs_dict) # check `add_adapter` works as expected config.add_adapter = True self.assertTrue(config.add_adapter) self.check_equivalence_pt_to_flax(config, decoder_config, inputs_dict) self.check_equivalence_flax_to_pt(config, decoder_config, inputs_dict) @slow def test_real_model_save_load_from_pretrained(self): model_2 = self.get_pretrained_model() inputs = ids_tensor([13, 5], model_2.config.encoder.vocab_size) decoder_input_ids = ids_tensor([13, 1], model_2.config.decoder.vocab_size) attention_mask = ids_tensor([13, 5], vocab_size=2) outputs = model_2( inputs=inputs, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, ) out_2 = np.array(outputs[0]) out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmp_dirname: model_2.save_pretrained(tmp_dirname) model_1 = FlaxSpeechEncoderDecoderModel.from_pretrained(tmp_dirname) after_outputs = model_1( inputs=inputs, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, ) out_1 = np.array(after_outputs[0]) out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 4e-2) @require_flax class FlaxWav2Vec2GPT2ModelTest(FlaxEncoderDecoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained( "facebook/wav2vec2-large-lv60", "gpt2-medium" ) batch_size = 13 input_values = floats_tensor([batch_size, 512], scale=1.0) attention_mask = random_attention_mask([batch_size, 512]) decoder_input_ids = ids_tensor([batch_size, 4], model.config.decoder.vocab_size) decoder_attention_mask = random_attention_mask([batch_size, 4]) inputs = { "inputs": input_values, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } return model, inputs def get_encoder_decoder_model(self, config, decoder_config): encoder_model = FlaxWav2Vec2Model(config) decoder_model = FlaxGPT2LMHeadModel(decoder_config) return encoder_model, decoder_model def prepare_config_and_inputs(self): model_tester_encoder = FlaxWav2Vec2ModelTester(self, batch_size=13) model_tester_decoder = FlaxGPT2ModelTester(self, batch_size=13) encoder_config_and_inputs = model_tester_encoder.prepare_config_and_inputs() decoder_config_and_inputs = model_tester_decoder.prepare_config_and_inputs_for_decoder() (config, inputs, attention_mask) = encoder_config_and_inputs ( decoder_config, decoder_input_ids, decoder_attention_mask, encoder_hidden_states, encoder_attention_mask, ) = decoder_config_and_inputs # make sure that cross attention layers are added decoder_config.add_cross_attention = True return { "config": config, "inputs": inputs, "attention_mask": attention_mask, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "encoder_hidden_states": encoder_hidden_states, } @slow def test_flaxwav2vec2gpt2_pt_flax_equivalence(self): pt_model = SpeechEncoderDecoderModel.from_pretrained("jsnfly/wav2vec2-large-xlsr-53-german-gpt2") fx_model = FlaxSpeechEncoderDecoderModel.from_pretrained( "jsnfly/wav2vec2-large-xlsr-53-german-gpt2", from_pt=True ) pt_model.to(torch_device) pt_model.eval() # prepare inputs batch_size = 13 input_values = floats_tensor([batch_size, 512], scale=1.0) attention_mask = random_attention_mask([batch_size, 512]) decoder_input_ids = ids_tensor([batch_size, 4], fx_model.config.decoder.vocab_size) decoder_attention_mask = random_attention_mask([batch_size, 4]) inputs_dict = { "inputs": input_values, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } flax_inputs = inputs_dict pt_inputs = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()} with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) pt_logits = pt_outputs.logits pt_outputs = pt_outputs.to_tuple() fx_outputs = fx_model(**inputs_dict) fx_logits = fx_outputs.logits fx_outputs = fx_outputs.to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") self.assert_almost_equals(fx_logits, pt_logits.numpy(), 4e-2) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = FlaxSpeechEncoderDecoderModel.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**inputs_dict) fx_logits_loaded = fx_outputs_loaded.logits fx_outputs_loaded = fx_outputs_loaded.to_tuple() self.assertEqual(len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch") self.assert_almost_equals(fx_logits_loaded, pt_logits.numpy(), 4e-2) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = SpeechEncoderDecoderModel.from_pretrained(tmpdirname, from_flax=True) pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs) pt_logits_loaded = pt_outputs_loaded.logits pt_outputs_loaded = pt_outputs_loaded.to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch") self.assert_almost_equals(fx_logits, pt_logits_loaded.numpy(), 4e-2) @require_flax class FlaxWav2Vec2BartModelTest(FlaxEncoderDecoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained( "facebook/wav2vec2-large-lv60", "bart-large" ) batch_size = 13 input_values = floats_tensor([batch_size, 512], scale=1.0) attention_mask = random_attention_mask([batch_size, 512]) decoder_input_ids = ids_tensor([batch_size, 4], model.config.decoder.vocab_size) decoder_attention_mask = random_attention_mask([batch_size, 4]) inputs = { "inputs": input_values, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } return model, inputs def get_encoder_decoder_model(self, config, decoder_config): encoder_model = FlaxWav2Vec2Model(config) decoder_model = FlaxBartForCausalLM(decoder_config) return encoder_model, decoder_model def prepare_config_and_inputs(self): model_tester_encoder = FlaxWav2Vec2ModelTester(self, batch_size=13) model_tester_decoder = FlaxBartStandaloneDecoderModelTester(self, batch_size=13) encoder_config_and_inputs = model_tester_encoder.prepare_config_and_inputs() decoder_config_and_inputs = model_tester_decoder.prepare_config_and_inputs_for_decoder() (config, inputs, attention_mask) = encoder_config_and_inputs ( decoder_config, decoder_input_ids, decoder_attention_mask, encoder_hidden_states, encoder_attention_mask, ) = decoder_config_and_inputs # make sure that cross attention layers are added decoder_config.add_cross_attention = True return { "config": config, "inputs": inputs, "attention_mask": attention_mask, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "encoder_hidden_states": encoder_hidden_states, } @slow def test_flaxwav2vec2bart_pt_flax_equivalence(self): pt_model = SpeechEncoderDecoderModel.from_pretrained("patrickvonplaten/wav2vec2-2-bart-large") fx_model = FlaxSpeechEncoderDecoderModel.from_pretrained( "patrickvonplaten/wav2vec2-2-bart-large", from_pt=True ) pt_model.to(torch_device) pt_model.eval() # prepare inputs batch_size = 13 input_values = floats_tensor([batch_size, 512], scale=1.0) attention_mask = random_attention_mask([batch_size, 512]) decoder_input_ids = ids_tensor([batch_size, 4], fx_model.config.decoder.vocab_size) decoder_attention_mask = random_attention_mask([batch_size, 4]) inputs_dict = { "inputs": input_values, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } flax_inputs = inputs_dict pt_inputs = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()} with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) pt_logits = pt_outputs.logits pt_outputs = pt_outputs.to_tuple() fx_outputs = fx_model(**inputs_dict) fx_logits = fx_outputs.logits fx_outputs = fx_outputs.to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") self.assert_almost_equals(fx_logits, pt_logits.numpy(), 4e-2) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = FlaxSpeechEncoderDecoderModel.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**inputs_dict) fx_logits_loaded = fx_outputs_loaded.logits fx_outputs_loaded = fx_outputs_loaded.to_tuple() self.assertEqual(len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch") self.assert_almost_equals(fx_logits_loaded, pt_logits.numpy(), 4e-2) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = SpeechEncoderDecoderModel.from_pretrained(tmpdirname, from_flax=True) pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs) pt_logits_loaded = pt_outputs_loaded.logits pt_outputs_loaded = pt_outputs_loaded.to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch") self.assert_almost_equals(fx_logits, pt_logits_loaded.numpy(), 4e-2) @require_flax class FlaxWav2Vec2BertModelTest(FlaxEncoderDecoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained( "facebook/wav2vec2-large-lv60", "bert-large-uncased" ) batch_size = 13 input_values = floats_tensor([batch_size, 512], model.config.encoder.vocab_size) attention_mask = random_attention_mask([batch_size, 512]) decoder_input_ids = ids_tensor([batch_size, 4], model.config.decoder.vocab_size) decoder_attention_mask = random_attention_mask([batch_size, 4]) inputs = { "inputs": input_values, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } return model, inputs def get_encoder_decoder_model(self, config, decoder_config): encoder_model = FlaxWav2Vec2Model(config) decoder_model = FlaxBertForCausalLM(decoder_config) return encoder_model, decoder_model def prepare_config_and_inputs(self): model_tester_encoder = FlaxWav2Vec2ModelTester(self, batch_size=13) model_tester_decoder = FlaxBertModelTester(self, batch_size=13) encoder_config_and_inputs = model_tester_encoder.prepare_config_and_inputs() decoder_config_and_inputs = model_tester_decoder.prepare_config_and_inputs_for_decoder() (config, inputs, attention_mask) = encoder_config_and_inputs ( decoder_config, decoder_input_ids, decoder_attention_mask, encoder_hidden_states, encoder_attention_mask, ) = decoder_config_and_inputs # make sure that cross attention layers are added decoder_config.add_cross_attention = True return { "config": config, "inputs": inputs, "attention_mask": attention_mask, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "encoder_hidden_states": encoder_hidden_states, } @slow def test_flaxwav2vec2bert_pt_flax_equivalence(self): pt_model = SpeechEncoderDecoderModel.from_pretrained("speech-seq2seq/wav2vec2-2-bert-large") fx_model = FlaxSpeechEncoderDecoderModel.from_pretrained("speech-seq2seq/wav2vec2-2-bert-large", from_pt=True) pt_model.to(torch_device) pt_model.eval() # prepare inputs batch_size = 13 input_values = floats_tensor([batch_size, 512], fx_model.config.encoder.vocab_size) attention_mask = random_attention_mask([batch_size, 512]) decoder_input_ids = ids_tensor([batch_size, 4], fx_model.config.decoder.vocab_size) decoder_attention_mask = random_attention_mask([batch_size, 4]) inputs_dict = { "inputs": input_values, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } flax_inputs = inputs_dict pt_inputs = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()} with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) pt_logits = pt_outputs.logits pt_outputs = pt_outputs.to_tuple() fx_outputs = fx_model(**inputs_dict) fx_logits = fx_outputs.logits fx_outputs = fx_outputs.to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") self.assert_almost_equals(fx_logits, pt_logits.numpy(), 4e-2) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = FlaxSpeechEncoderDecoderModel.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**inputs_dict) fx_logits_loaded = fx_outputs_loaded.logits fx_outputs_loaded = fx_outputs_loaded.to_tuple() self.assertEqual(len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch") self.assert_almost_equals(fx_logits_loaded, pt_logits.numpy(), 4e-2) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = SpeechEncoderDecoderModel.from_pretrained(tmpdirname, from_flax=True) pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs) pt_logits_loaded = pt_outputs_loaded.logits pt_outputs_loaded = pt_outputs_loaded.to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch") self.assert_almost_equals(fx_logits, pt_logits_loaded.numpy(), 4e-2)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/imagegpt/test_image_processing_imagegpt.py
# coding=utf-8 # Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class ImageGPTImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, ): size = size if size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize def prepare_image_processor_dict(self): return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8866443634033203, 0.6618829369544983, 0.3891746401786804], [-0.6042559146881104, -0.02295008860528469, 0.5423797369003296], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class ImageGPTImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = ImageGPTImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = ImageGPTImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "clusters")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_normalize")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) def test_image_processor_to_json_string(self): image_processor = self.image_processing_class(**self.image_processor_dict) obj = json.loads(image_processor.to_json_string()) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(value, obj[key])) else: self.assertEqual(obj[key], value) def test_image_processor_to_json_file(self): image_processor_first = self.image_processing_class(**self.image_processor_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "image_processor.json") image_processor_first.to_json_file(json_file_path) image_processor_second = self.image_processing_class.from_json_file(json_file_path).to_dict() image_processor_first = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(value, image_processor_second[key])) else: self.assertEqual(image_processor_first[key], value) def test_image_processor_from_and_save_pretrained(self): image_processor_first = self.image_processing_class(**self.image_processor_dict) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(tmpdirname) image_processor_second = self.image_processing_class.from_pretrained(tmpdirname).to_dict() image_processor_first = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(value, image_processor_second[key])) else: self.assertEqual(image_processor_first[key], value) @unittest.skip("ImageGPT requires clusters at initialization") def test_init_without_params(self): pass def prepare_images(): dataset = load_dataset("hf-internal-testing/fixtures_image_utils", split="test") image1 = Image.open(dataset[4]["file"]) image2 = Image.open(dataset[5]["file"]) images = [image1, image2] return images @require_vision @require_torch class ImageGPTImageProcessorIntegrationTest(unittest.TestCase): @slow def test_image(self): image_processing = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small") images = prepare_images() # test non-batched encoding = image_processing(images[0], return_tensors="pt") self.assertIsInstance(encoding.input_ids, torch.LongTensor) self.assertEqual(encoding.input_ids.shape, (1, 1024)) expected_slice = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist(), expected_slice) # test batched encoding = image_processing(images, return_tensors="pt") self.assertIsInstance(encoding.input_ids, torch.LongTensor) self.assertEqual(encoding.input_ids.shape, (2, 1024)) expected_slice = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist(), expected_slice)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/imagegpt/test_modeling_imagegpt.py
# coding=utf-8 # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect import os import tempfile import unittest from transformers import ImageGPTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST, ImageGPTForCausalImageModeling, ImageGPTForImageClassification, ImageGPTModel, ) if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class ImageGPTModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_token_type_ids=True, use_input_mask=True, use_labels=True, use_mc_token_ids=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_token_type_ids = use_token_type_ids self.use_input_mask = use_input_mask self.use_labels = use_labels self.use_mc_token_ids = use_mc_token_ids self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = None def get_large_model_config(self): return ImageGPTConfig.from_pretrained("imagegpt") def prepare_config_and_inputs( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): pixel_values = ids_tensor([self.batch_size, self.seq_length], self.vocab_size - 1) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) mc_token_ids = None if self.use_mc_token_ids: mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config( gradient_checkpointing=gradient_checkpointing, scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx, reorder_and_upcast_attn=reorder_and_upcast_attn, ) head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, pixel_values, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def get_config( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): return ImageGPTConfig( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_inner=self.intermediate_size, activation_function=self.hidden_act, resid_pdrop=self.hidden_dropout_prob, attn_pdrop=self.attention_probs_dropout_prob, n_positions=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, use_cache=True, gradient_checkpointing=gradient_checkpointing, scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx, reorder_and_upcast_attn=reorder_and_upcast_attn, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 513 config.max_position_embeddings = 1024 return config def prepare_config_and_inputs_for_decoder(self): ( config, pixel_values, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, pixel_values, input_mask, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_imagegpt_model(self, config, pixel_values, input_mask, head_mask, token_type_ids, *args): model = ImageGPTModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values, token_type_ids=token_type_ids, head_mask=head_mask) result = model(pixel_values, token_type_ids=token_type_ids) result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(len(result.past_key_values), config.n_layer) def create_and_check_lm_head_model(self, config, pixel_values, input_mask, head_mask, token_type_ids, *args): model = ImageGPTForCausalImageModeling(config) model.to(torch_device) model.eval() labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size - 1) result = model(pixel_values, token_type_ids=token_type_ids, labels=labels) self.parent.assertEqual(result.loss.shape, ()) # ImageGPTForCausalImageModeling doens't have tied input- and output embeddings self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size - 1)) def create_and_check_imagegpt_for_image_classification( self, config, pixel_values, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, *args ): config.num_labels = self.num_labels model = ImageGPTForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, pixel_values, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = { "pixel_values": pixel_values, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class ImageGPTModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (ImageGPTForCausalImageModeling, ImageGPTForImageClassification, ImageGPTModel) if is_torch_available() else () ) all_generative_model_classes = (ImageGPTForCausalImageModeling,) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": ImageGPTModel, "image-classification": ImageGPTForImageClassification} if is_torch_available() else {} ) test_missing_keys = False input_name = "pixel_values" # as ImageGPTForImageClassification isn't included in any auto mapping, we add labels here def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "ImageGPTForImageClassification": inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict # we overwrite the _check_scores method of GenerationTesterMixin, as ImageGPTForCausalImageModeling doesn't have tied input- and output embeddings def _check_scores(self, batch_size, scores, length, config): expected_shape = (batch_size, config.vocab_size - 1) self.assertIsInstance(scores, tuple) self.assertEqual(len(scores), length) self.assertListEqual([iter_scores.shape for iter_scores in scores], [expected_shape] * len(scores)) def setUp(self): self.model_tester = ImageGPTModelTester(self) self.config_tester = ConfigTester(self, config_class=ImageGPTConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_imagegpt_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_imagegpt_model(*config_and_inputs) def test_imagegpt_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*config_and_inputs) def test_imagegpt_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_imagegpt_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ImageGPTModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_ids"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_resize_tokens_embeddings(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.vocab_size # Retrieve the embeddings and clone theme model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = model_embed.weight.clone() # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary inputs_dict["pixel_values"].clamp_(max=model_vocab_size - 15 - 1) # Check that adding and removing tokens has not modified the first part of the embedding matrix. models_equal = True for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_resize_embeddings_untied(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return original_config.tie_word_embeddings = False # if model cannot untied embeddings -> leave test if original_config.tie_word_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config).to(torch_device) # if no output embeddings -> leave test if model.get_output_embeddings() is None: continue # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_vocab_size = config.vocab_size model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary inputs_dict["pixel_values"].clamp_(max=model_vocab_size - 15 - 1) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) pixel_values = inputs["pixel_values"] del inputs["pixel_values"] wte = model.get_input_embeddings() inputs["inputs_embeds"] = wte(pixel_values) with torch.no_grad(): model(**inputs)[0] def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) try: pixel_values = inputs["pixel_values"] traced_model = torch.jit.trace(model, pixel_values) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): if layer_name in loaded_model_state_dict: p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) @unittest.skip("The model doesn't support left padding") # and it's not used enough to be worth fixing :) def test_left_padding_compatibility(self): pass # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class ImageGPTModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small") if is_vision_available() else None @slow def test_inference_causal_lm_head(self): model = ImageGPTForCausalImageModeling.from_pretrained("openai/imagegpt-small").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1024, 512)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [[2.3445, 2.6889, 2.7313], [1.0530, 1.2416, 0.5699], [0.2205, 0.7749, 0.3953]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice, atol=1e-4))
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/altclip/test_modeling_altclip.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch AltCLIP model. """ import inspect import os import tempfile import unittest import numpy as np import requests from transformers import AltCLIPConfig, AltCLIPProcessor, AltCLIPTextConfig, AltCLIPVisionConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch import torch.nn as nn from transformers import AltCLIPModel, AltCLIPTextModel, AltCLIPVisionModel from transformers.models.altclip.modeling_altclip import ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class AltCLIPVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, projection_dim=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return AltCLIPVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = AltCLIPVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class AltCLIPVisionModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as CLIP does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (AltCLIPVisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = AltCLIPVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=AltCLIPVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="CLIP does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip(reason="AltCLIPVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="AltCLIPVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @unittest.skip(reason="AltCLIPVisionModel use the same cv backbone with CLIP model.") def test_model_from_pretrained(self): pass class AltCLIPTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, projection_dim=32, project_dim=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.projection_dim = projection_dim self.project_dim = project_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return AltCLIPTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, project_dim=self.project_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, pad_token_id=1, ) def create_and_check_model(self, config, input_ids, input_mask): model = AltCLIPTextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class AltCLIPTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (AltCLIPTextModel,) if is_torch_available() else () fx_compatible = True test_pruning = False test_head_masking = False def setUp(self): self.model_tester = AltCLIPTextModelTester(self) self.config_tester = ConfigTester(self, config_class=AltCLIPTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass def test_model_outputs_equivalence(self): pass @unittest.skip(reason="Result of the model is a dict") def test_hidden_states_output(self): pass @unittest.skip(reason="AltCLIP does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="AltCLIPTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="AltCLIPTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = AltCLIPTextModel.from_pretrained(model_name) self.assertIsNotNone(model) class AltCLIPModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = AltCLIPTextModelTester(parent, **text_kwargs) self.vision_model_tester = AltCLIPVisionModelTester(parent, **vision_kwargs) self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return AltCLIPConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = AltCLIPModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): model(input_ids, pixel_values, attention_mask) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "return_loss": True, } return config, inputs_dict # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @require_torch class AltCLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (AltCLIPModel,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": AltCLIPModel} if is_torch_available() else {} fx_compatible = True test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False # TODO: Fix the failed tests when this model gets more usage def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "FeatureExtractionPipelineTests": return True return False def setUp(self): self.model_tester = AltCLIPModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="CLIPModel does not have input/output embeddings") def test_model_common_attributes(self): pass # override as the `logit_scale` parameter initilization is different for AltCLIP def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # check if `logit_scale` is initilized as per the original implementation if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] # CLIP needs pixel_values traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) @slow def test_model_from_pretrained(self): for model_name in ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = AltCLIPModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_vision @require_torch class AltCLIPModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "BAAI/AltCLIP" model = AltCLIPModel.from_pretrained(model_name).to(torch_device) processor = AltCLIPProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor(text=["一张猫的照片", "一张狗的照片"], images=image, padding=True, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits self.assertEqual( outputs.logits_per_image.shape, torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( outputs.logits_per_text.shape, torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) probs = outputs.logits_per_image.softmax(dim=1) expected_probs = torch.tensor([[9.9942e-01, 5.7805e-04]], device=torch_device) self.assertTrue(torch.allclose(probs, expected_probs, atol=5e-3))
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/nystromformer/test_modeling_nystromformer.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Nystromformer model. """ import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class NystromformerModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return NystromformerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = NystromformerModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = NystromformerForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = NystromformerForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = NystromformerForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = NystromformerForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = NystromformerForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class NystromformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": NystromformerModel, "fill-mask": NystromformerForMaskedLM, "question-answering": NystromformerForQuestionAnswering, "text-classification": NystromformerForSequenceClassification, "token-classification": NystromformerForTokenClassification, "zero-shot": NystromformerForSequenceClassification, } if is_torch_available() else {} ) test_pruning = False test_headmasking = False def setUp(self): self.model_tester = NystromformerModelTester(self) self.config_tester = ConfigTester(self, config_class=NystromformerConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = NystromformerModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class NystromformerModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head(self): model = NystromformerModel.from_pretrained("uw-madison/nystromformer-512") input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]]) with torch.no_grad(): output = model(input_ids)[0] expected_shape = torch.Size((1, 6, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4)) @slow def test_masked_lm_end_to_end(self): sentence = "the [MASK] of Belgium is Brussels" tokenizer = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512") model = NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512") encoding = tokenizer(sentence, return_tensors="pt") with torch.no_grad(): token_logits = model(encoding.input_ids).logits prediction = token_logits[:, 2, :].argmax(-1)[0] self.assertEqual(tokenizer.decode(prediction), "capital")
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/speecht5/test_tokenization_speecht5.py
# coding=utf-8 # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the SpeechT5 tokenizers.""" import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speecht5 import SpeechT5Tokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model") @require_sentencepiece @require_tokenizers class SpeechT5TokenizerTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = SpeechT5Tokenizer test_rust_tokenizer = False test_sentencepiece = True def setUp(self): super().setUp() # We have a SentencePiece fixture for testing tokenizer = SpeechT5Tokenizer(SAMPLE_VOCAB) mask_token = AddedToken("<mask>", lstrip=True, rstrip=False) tokenizer.mask_token = mask_token tokenizer.add_special_tokens({"mask_token": mask_token}) tokenizer.add_tokens(["<ctc_blank>"]) tokenizer.save_pretrained(self.tmpdirname) def get_input_output_texts(self, tokenizer): input_text = "this is a test" output_text = "this is a test" return input_text, output_text def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5): input_text, output_text = self.get_input_output_texts(tokenizer) ids = tokenizer.encode(output_text, add_special_tokens=False) text = tokenizer.decode(ids, clean_up_tokenization_spaces=False) return text, ids def test_convert_token_and_id(self): """Test ``_convert_token_to_id`` and ``_convert_id_to_token``.""" token = "<pad>" token_id = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "<s>") self.assertEqual(vocab_keys[1], "<pad>") self.assertEqual(vocab_keys[-4], "œ") self.assertEqual(vocab_keys[-2], "<mask>") self.assertEqual(vocab_keys[-1], "<ctc_blank>") self.assertEqual(len(vocab_keys), 81) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 79) def test_add_tokens_tokenizer(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): vocab_size = tokenizer.vocab_size all_size = len(tokenizer) self.assertNotEqual(vocab_size, 0) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) new_toks = ["aaaaa bbbbbb", "cccccccccdddddddd"] added_toks = tokenizer.add_tokens(new_toks) vocab_size_2 = tokenizer.vocab_size all_size_2 = len(tokenizer) self.assertNotEqual(vocab_size_2, 0) self.assertEqual(vocab_size, vocab_size_2) self.assertEqual(added_toks, len(new_toks)) self.assertEqual(all_size_2, all_size + len(new_toks)) tokens = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l", add_special_tokens=False) self.assertGreaterEqual(len(tokens), 4) self.assertGreater(tokens[0], tokenizer.vocab_size - 1) self.assertGreater(tokens[-3], tokenizer.vocab_size - 1) new_toks_2 = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"} added_toks_2 = tokenizer.add_special_tokens(new_toks_2) vocab_size_3 = tokenizer.vocab_size all_size_3 = len(tokenizer) self.assertNotEqual(vocab_size_3, 0) self.assertEqual(vocab_size, vocab_size_3) self.assertEqual(added_toks_2, len(new_toks_2)) self.assertEqual(all_size_3, all_size_2 + len(new_toks_2)) tokens = tokenizer.encode( ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l", add_special_tokens=False ) self.assertGreaterEqual(len(tokens), 6) self.assertGreater(tokens[0], tokenizer.vocab_size - 1) self.assertGreater(tokens[0], tokens[1]) self.assertGreater(tokens[-3], tokenizer.vocab_size - 1) self.assertGreater(tokens[-3], tokens[-4]) self.assertEqual(tokens[0], tokenizer.eos_token_id) self.assertEqual(tokens[-3], tokenizer.pad_token_id) def test_pickle_subword_regularization_tokenizer(self): pass def test_subword_regularization_tokenizer(self): pass def test_full_tokenizer(self): tokenizer = self.get_tokenizer() tokens = tokenizer.tokenize("This is a test") # fmt: off self.assertListEqual(tokens, [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't']) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, # fmt: off [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] # fmt: on ) ids = tokenizer.convert_tokens_to_ids(tokens) # fmt: off self.assertListEqual(ids, [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26]) # fmt: on back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, # fmt: off [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] # fmt: on ) @slow def test_tokenizer_integration(self): # Use custom sequence because this tokenizer does not handle numbers. sequences = [ "Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides " "general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural " "Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained " "models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.", "BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly " "conditioning on both left and right context in all layers.", "The quick brown fox jumps over the lazy dog.", ] # fmt: off expected_encoding = { 'input_ids': [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], 'attention_mask': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="microsoft/speecht5_asr", revision="c5ef64c71905caeccde0e4462ef3f9077224c524", sequences=sequences, )
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/speecht5/test_feature_extraction_speecht5.py
# coding=utf-8 # Copyright 2021-2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the SpeechT5 feature extractors.""" import itertools import random import unittest import numpy as np from transformers import BatchFeature, SpeechT5FeatureExtractor from transformers.testing_utils import require_torch from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch global_rng = random.Random() def floats_list(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = global_rng values = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values @require_torch class SpeechT5FeatureExtractionTester(unittest.TestCase): def __init__( self, parent, batch_size=7, min_seq_length=400, max_seq_length=2000, feature_size=1, padding_value=0.0, sampling_rate=16000, do_normalize=True, num_mel_bins=80, hop_length=16, win_length=64, win_function="hann_window", fmin=80, fmax=7600, mel_floor=1e-10, return_attention_mask=True, ): self.parent = parent self.batch_size = batch_size self.min_seq_length = min_seq_length self.max_seq_length = max_seq_length self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) self.feature_size = feature_size self.padding_value = padding_value self.sampling_rate = sampling_rate self.do_normalize = do_normalize self.num_mel_bins = num_mel_bins self.hop_length = hop_length self.win_length = win_length self.win_function = win_function self.fmin = fmin self.fmax = fmax self.mel_floor = mel_floor self.return_attention_mask = return_attention_mask def prepare_feat_extract_dict(self): return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "do_normalize": self.do_normalize, "num_mel_bins": self.num_mel_bins, "hop_length": self.hop_length, "win_length": self.win_length, "win_function": self.win_function, "fmin": self.fmin, "fmax": self.fmax, "mel_floor": self.mel_floor, "return_attention_mask": self.return_attention_mask, } def prepare_inputs_for_common(self, equal_length=False, numpify=False): def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) if equal_length: speech_inputs = floats_list((self.batch_size, self.max_seq_length)) else: # make sure that inputs increase in size speech_inputs = [ _flatten(floats_list((x, self.feature_size))) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) ] if numpify: speech_inputs = [np.asarray(x) for x in speech_inputs] return speech_inputs def prepare_inputs_for_target(self, equal_length=False, numpify=False): if equal_length: speech_inputs = [floats_list((self.max_seq_length, self.num_mel_bins)) for _ in range(self.batch_size)] else: # make sure that inputs increase in size speech_inputs = [ floats_list((x, self.num_mel_bins)) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) ] if numpify: speech_inputs = [np.asarray(x) for x in speech_inputs] return speech_inputs @require_torch class SpeechT5FeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = SpeechT5FeatureExtractor def setUp(self): self.feat_extract_tester = SpeechT5FeatureExtractionTester(self) def _check_zero_mean_unit_variance(self, input_vector): self.assertTrue(np.all(np.mean(input_vector, axis=0) < 1e-3)) self.assertTrue(np.all(np.abs(np.var(input_vector, axis=0) - 1) < 1e-3)) def test_call(self): # Tests that all call wrap to encode_plus and batch_encode_plus feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) # create three inputs of length 800, 1000, and 1200 speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] # Test not batched input encoded_sequences_1 = feat_extract(speech_inputs[0], return_tensors="np").input_values encoded_sequences_2 = feat_extract(np_speech_inputs[0], return_tensors="np").input_values self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) # Test batched encoded_sequences_1 = feat_extract(speech_inputs, return_tensors="np").input_values encoded_sequences_2 = feat_extract(np_speech_inputs, return_tensors="np").input_values for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) def test_zero_mean_unit_variance_normalization_np(self): feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] paddings = ["longest", "max_length", "do_not_pad"] max_lengths = [None, 1600, None] for max_length, padding in zip(max_lengths, paddings): processed = feat_extract(speech_inputs, padding=padding, max_length=max_length, return_tensors="np") input_values = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800]) self.assertTrue(input_values[0][800:].sum() < 1e-6) self._check_zero_mean_unit_variance(input_values[1][:1000]) self.assertTrue(input_values[0][1000:].sum() < 1e-6) self._check_zero_mean_unit_variance(input_values[2][:1200]) def test_zero_mean_unit_variance_normalization(self): feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) lengths = range(800, 1400, 200) speech_inputs = [floats_list((1, x))[0] for x in lengths] paddings = ["longest", "max_length", "do_not_pad"] max_lengths = [None, 1600, None] for max_length, padding in zip(max_lengths, paddings): processed = feat_extract(speech_inputs, max_length=max_length, padding=padding) input_values = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800]) self._check_zero_mean_unit_variance(input_values[1][:1000]) self._check_zero_mean_unit_variance(input_values[2][:1200]) def test_zero_mean_unit_variance_normalization_trunc_np_max_length(self): feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] processed = feat_extract( speech_inputs, truncation=True, max_length=1000, padding="max_length", return_tensors="np" ) input_values = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800]) self._check_zero_mean_unit_variance(input_values[1]) self._check_zero_mean_unit_variance(input_values[2]) def test_zero_mean_unit_variance_normalization_trunc_np_longest(self): feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] processed = feat_extract( speech_inputs, truncation=True, max_length=1000, padding="longest", return_tensors="np" ) input_values = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800]) self._check_zero_mean_unit_variance(input_values[1, :1000]) self._check_zero_mean_unit_variance(input_values[2]) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1000)) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] processed = feat_extract( speech_inputs, truncation=True, max_length=2000, padding="longest", return_tensors="np" ) input_values = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800]) self._check_zero_mean_unit_variance(input_values[1, :1000]) self._check_zero_mean_unit_variance(input_values[2]) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1200)) def test_double_precision_pad(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) np_speech_inputs = np.random.rand(100).astype(np.float64) py_speech_inputs = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: np_processed = feature_extractor.pad([{"input_values": inputs}], return_tensors="np") self.assertTrue(np_processed.input_values.dtype == np.float32) pt_processed = feature_extractor.pad([{"input_values": inputs}], return_tensors="pt") self.assertTrue(pt_processed.input_values.dtype == torch.float32) def test_call_target(self): # Tests that all call wrap to encode_plus and batch_encode_plus feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) # create three inputs of length 800, 1000, and 1200 speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] # Test feature size input_values = feature_extractor(audio_target=np_speech_inputs, padding=True, return_tensors="np").input_values self.assertTrue(input_values.ndim == 3) self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins) # Test not batched input encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors="np").input_values encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").input_values self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) # Test batched encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_values encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_values for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) # Test 2-D numpy arrays are batched. speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)] np_speech_inputs = np.asarray(speech_inputs) encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_values encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_values for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) def test_batch_feature_target(self): speech_inputs = self.feat_extract_tester.prepare_inputs_for_target() feat_extract = self.feature_extraction_class(**self.feat_extract_dict) input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}) self.assertTrue(all(len(x) == len(y) for x, y in zip(speech_inputs, processed_features[input_name]))) speech_inputs = self.feat_extract_tester.prepare_inputs_for_target(equal_length=True) processed_features = BatchFeature({input_name: speech_inputs}, tensor_type="np") batch_features_input = processed_features[input_name] if len(batch_features_input.shape) < 3: batch_features_input = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.num_mel_bins) ) @require_torch def test_batch_feature_target_pt(self): speech_inputs = self.feat_extract_tester.prepare_inputs_for_target(equal_length=True) feat_extract = self.feature_extraction_class(**self.feat_extract_dict) input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}, tensor_type="pt") batch_features_input = processed_features[input_name] if len(batch_features_input.shape) < 3: batch_features_input = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.num_mel_bins) ) @require_torch def test_padding_accepts_tensors_target_pt(self): feat_extract = self.feature_extraction_class(**self.feat_extract_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_target() input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}) feat_extract.feature_size = feat_extract.num_mel_bins # hack! input_np = feat_extract.pad(processed_features, padding="longest", return_tensors="np")[input_name] input_pt = feat_extract.pad(processed_features, padding="longest", return_tensors="pt")[input_name] self.assertTrue(abs(input_np.astype(np.float32).sum() - input_pt.numpy().astype(np.float32).sum()) < 1e-2) def test_attention_mask_target(self): feat_dict = self.feat_extract_dict feat_dict["return_attention_mask"] = True feat_extract = self.feature_extraction_class(**feat_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_target() input_lenghts = [len(x) for x in speech_inputs] input_name = feat_extract.model_input_names[0] processed = BatchFeature({input_name: speech_inputs}) feat_extract.feature_size = feat_extract.num_mel_bins # hack! processed = feat_extract.pad(processed, padding="longest", return_tensors="np") self.assertIn("attention_mask", processed) self.assertListEqual(list(processed.attention_mask.shape), list(processed[input_name].shape[:2])) self.assertListEqual(processed.attention_mask.sum(-1).tolist(), input_lenghts) def test_attention_mask_with_truncation_target(self): feat_dict = self.feat_extract_dict feat_dict["return_attention_mask"] = True feat_extract = self.feature_extraction_class(**feat_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_target() input_lenghts = [len(x) for x in speech_inputs] input_name = feat_extract.model_input_names[0] processed = BatchFeature({input_name: speech_inputs}) max_length = min(input_lenghts) feat_extract.feature_size = feat_extract.num_mel_bins # hack! processed_pad = feat_extract.pad( processed, padding="max_length", max_length=max_length, truncation=True, return_tensors="np" ) self.assertIn("attention_mask", processed_pad) self.assertListEqual( list(processed_pad.attention_mask.shape), [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1).tolist(), [max_length for x in speech_inputs] ) def _load_datasamples(self, num_samples): from datasets import load_dataset ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_integration(self): # fmt: off EXPECTED_INPUT_VALUES = torch.tensor( [2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03, 3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03, 2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04, 4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03, 7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04, 4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03] ) # fmt: on input_speech = self._load_datasamples(1) feature_extractor = SpeechT5FeatureExtractor() input_values = feature_extractor(input_speech, return_tensors="pt").input_values self.assertEquals(input_values.shape, (1, 93680)) self.assertTrue(torch.allclose(input_values[0, :30], EXPECTED_INPUT_VALUES, atol=1e-6)) def test_integration_target(self): # fmt: off EXPECTED_INPUT_VALUES = torch.tensor( [-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777, -3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386, -3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571, -3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] ) # fmt: on input_speech = self._load_datasamples(1) feature_extractor = SpeechT5FeatureExtractor() input_values = feature_extractor(audio_target=input_speech, return_tensors="pt").input_values self.assertEquals(input_values.shape, (1, 366, 80)) self.assertTrue(torch.allclose(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, atol=1e-4))
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/speecht5/test_modeling_speecht5.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch SpeechT5 model. """ import copy import inspect import tempfile import unittest from transformers import SpeechT5Config, SpeechT5HifiGanConfig from transformers.testing_utils import ( is_torch_available, require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from transformers.trainer_utils import set_seed from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SpeechT5ForSpeechToSpeech, SpeechT5ForSpeechToText, SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Model, SpeechT5Processor, ) def prepare_inputs_dict( config, input_ids=None, input_values=None, decoder_input_ids=None, decoder_input_values=None, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if input_ids is not None: encoder_dict = {"input_ids": input_ids} else: encoder_dict = {"input_values": input_values} if decoder_input_ids is not None: decoder_dict = {"decoder_input_ids": decoder_input_ids} else: decoder_dict = {"decoder_input_values": decoder_input_values} if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) return { **encoder_dict, **decoder_dict, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_torch class SpeechT5ModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=False, vocab_size=81, hidden_size=24, num_hidden_layers=4, num_attention_heads=2, intermediate_size=4, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.seq_length, self.hidden_size], scale=1.0) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) decoder_input_values = floats_tensor([self.batch_size, self.seq_length, self.hidden_size], scale=1.0) decoder_attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() inputs_dict = prepare_inputs_dict( config, input_values=input_values, decoder_input_values=decoder_input_values, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_config(self): return SpeechT5Config( vocab_size=self.vocab_size, hidden_size=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, ) def create_and_check_model_forward(self, config, inputs_dict): model = SpeechT5Model(config=config).to(torch_device).eval() input_values = inputs_dict["input_values"] attention_mask = inputs_dict["attention_mask"] decoder_input_values = inputs_dict["decoder_input_values"] result = model(input_values, attention_mask=attention_mask, decoder_input_values=decoder_input_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) @require_torch class SpeechT5ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (SpeechT5Model,) if is_torch_available() else () pipeline_model_mapping = ( {"automatic-speech-recognition": SpeechT5ForSpeechToText, "feature-extraction": SpeechT5Model} if is_torch_available() else {} ) is_encoder_decoder = True test_pruning = False test_headmasking = False test_resize_embeddings = False input_name = "input_values" def setUp(self): self.model_tester = SpeechT5ModelTester(self) self.config_tester = ConfigTester(self, config_class=SpeechT5Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = [ "input_values", "attention_mask", "decoder_input_values", "decoder_attention_mask", ] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) # this model has no inputs_embeds def test_inputs_embeds(self): pass # this model has no input embeddings def test_model_common_attributes(self): pass def test_retain_grad_hidden_states_attentions(self): # decoder cannot keep gradients pass @slow def test_torchscript_output_attentions(self): # disabled because this model doesn't have decoder_input_ids pass @slow def test_torchscript_output_hidden_state(self): # disabled because this model doesn't have decoder_input_ids pass @slow def test_torchscript_simple(self): # disabled because this model doesn't have decoder_input_ids pass @require_torch class SpeechT5ForSpeechToTextTester: def __init__( self, parent, batch_size=13, encoder_seq_length=1024, # speech is longer decoder_seq_length=7, is_training=False, hidden_size=24, num_hidden_layers=4, num_attention_heads=2, intermediate_size=4, conv_dim=(32, 32, 32), conv_stride=(4, 4, 4), conv_kernel=(8, 8, 8), conv_bias=False, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, vocab_size=81, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.vocab_size = vocab_size def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.encoder_seq_length], scale=1.0) attention_mask = random_attention_mask([self.batch_size, self.encoder_seq_length]) decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size).clamp(2) decoder_attention_mask = random_attention_mask([self.batch_size, self.decoder_seq_length]) config = self.get_config() inputs_dict = prepare_inputs_dict( config, input_values=input_values, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_config(self): return SpeechT5Config( hidden_size=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, vocab_size=self.vocab_size, ) def create_and_check_model_forward(self, config, inputs_dict): model = SpeechT5ForSpeechToText(config=config).to(torch_device).eval() input_values = inputs_dict["input_values"] attention_mask = inputs_dict["attention_mask"] decoder_input_ids = inputs_dict["decoder_input_ids"] result = model(input_values, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.decoder_seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = SpeechT5ForSpeechToText(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["decoder_input_ids"] attention_mask = inputs_dict["decoder_attention_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size).clamp(2) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2)) @require_torch class SpeechT5ForSpeechToTextTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (SpeechT5ForSpeechToText,) if is_torch_available() else () all_generative_model_classes = (SpeechT5ForSpeechToText,) if is_torch_available() else () is_encoder_decoder = True test_pruning = False test_headmasking = False input_name = "input_values" def setUp(self): self.model_tester = SpeechT5ForSpeechToTextTester(self) self.config_tester = ConfigTester(self, config_class=SpeechT5Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_model_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() subsampled_encoder_seq_length = model.speecht5.encoder.prenet._get_feat_extract_output_lengths( encoder_seq_length ) subsampled_encoder_key_length = model.speecht5.encoder.prenet._get_feat_extract_output_lengths( encoder_key_length ) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) out_len = len(outputs) correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, subsampled_encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = [ "input_values", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length else: seq_length = self.model_tester.seq_length subsampled_seq_length = model.speecht5.encoder.prenet._get_feat_extract_output_lengths(seq_length) self.assertListEqual( list(hidden_states[0].shape[-2:]), [subsampled_seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "masked_spec_embed", "feature_projection.projection.weight", "feature_projection.projection.bias", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # this model has no inputs_embeds def test_inputs_embeds(self): pass def test_resize_embeddings_untied(self): original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return original_config.tie_word_embeddings = False # if model cannot untied embeddings -> leave test if original_config.tie_word_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config).to(torch_device) # if no output embeddings -> leave test if model.get_output_embeddings() is None: continue # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_vocab_size = config.vocab_size model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) def test_resize_tokens_embeddings(self): original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.vocab_size # Retrieve the embeddings and clone theme model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = model_embed.weight.clone() # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) # make sure that decoder_input_ids are resized if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that adding and removing tokens has not modified the first part of the embedding matrix. models_equal = True for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_retain_grad_hidden_states_attentions(self): # decoder cannot keep gradients pass # training is not supported yet def test_training(self): pass def test_training_gradient_checkpointing(self): pass # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) @require_torch @require_sentencepiece @require_tokenizers @slow class SpeechT5ForSpeechToTextIntegrationTests(unittest.TestCase): @cached_property def default_processor(self): return SpeechT5Processor.from_pretrained("microsoft/speecht5_asr") def _load_datasamples(self, num_samples): from datasets import load_dataset ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_generation_librispeech(self): model = SpeechT5ForSpeechToText.from_pretrained("microsoft/speecht5_asr") model.to(torch_device) processor = self.default_processor input_speech = self._load_datasamples(1) input_values = processor(audio=input_speech, return_tensors="pt").input_values.to(torch_device) generated_ids = model.generate(input_values) generated_transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) EXPECTED_TRANSCRIPTIONS = [ "mister quilter is the apostle of the middle classes and we are glad to welcome his gospel" ] self.assertListEqual(generated_transcript, EXPECTED_TRANSCRIPTIONS) def test_generation_librispeech_batched(self): model = SpeechT5ForSpeechToText.from_pretrained("microsoft/speecht5_asr") model.to(torch_device) processor = self.default_processor input_speech = self._load_datasamples(4) inputs = processor(audio=input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) generated_ids = model.generate(input_values, attention_mask=attention_mask) generated_transcripts = processor.batch_decode(generated_ids, skip_special_tokens=True) EXPECTED_TRANSCRIPTIONS = [ "mister quilter is the apostle of the middle classes and we are glad to welcome his gospel", "nor is mister quilter's manner less interesting than his matter", "he tells us that at this festive season of the year with christmas and rosebeaf looming before us" " similars drawn from eating and its results occur most readily to the mind", "he has grave doubts whether sir frederick latin's work is really greek after all and can discover in it" " but little of rocky ithica", ] self.assertListEqual(generated_transcripts, EXPECTED_TRANSCRIPTIONS) @require_torch class SpeechT5ForTextToSpeechTester: def __init__( self, parent, batch_size=13, encoder_seq_length=7, decoder_seq_length=1024, # speech is longer is_training=False, hidden_size=24, num_hidden_layers=4, num_attention_heads=2, intermediate_size=4, vocab_size=81, num_mel_bins=20, reduction_factor=2, speech_decoder_postnet_layers=2, speech_decoder_postnet_units=32, speech_decoder_prenet_units=32, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.vocab_size = vocab_size self.num_mel_bins = num_mel_bins self.reduction_factor = reduction_factor self.speech_decoder_postnet_layers = speech_decoder_postnet_layers self.speech_decoder_postnet_units = speech_decoder_postnet_units self.speech_decoder_prenet_units = speech_decoder_prenet_units def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size).clamp(2) attention_mask = random_attention_mask([self.batch_size, self.encoder_seq_length]) decoder_input_values = floats_tensor([self.batch_size, self.decoder_seq_length, self.num_mel_bins], scale=1.0) decoder_attention_mask = random_attention_mask([self.batch_size, self.decoder_seq_length]) config = self.get_config() inputs_dict = prepare_inputs_dict( config, input_ids=input_ids, decoder_input_values=decoder_input_values, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_config(self): return SpeechT5Config( hidden_size=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, vocab_size=self.vocab_size, num_mel_bins=self.num_mel_bins, reduction_factor=self.reduction_factor, speech_decoder_postnet_layers=self.speech_decoder_postnet_layers, speech_decoder_postnet_units=self.speech_decoder_postnet_units, speech_decoder_prenet_units=self.speech_decoder_prenet_units, ) def create_and_check_model_forward(self, config, inputs_dict): model = SpeechT5ForTextToSpeech(config=config).to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] decoder_input_values = inputs_dict["decoder_input_values"] result = model(input_ids, attention_mask=attention_mask, decoder_input_values=decoder_input_values) self.parent.assertEqual( result.spectrogram.shape, (self.batch_size, self.decoder_seq_length * self.reduction_factor, self.num_mel_bins), ) @require_torch class SpeechT5ForTextToSpeechTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (SpeechT5ForTextToSpeech,) if is_torch_available() else () all_generative_model_classes = (SpeechT5ForTextToSpeech,) if is_torch_available() else () is_encoder_decoder = True test_pruning = False test_headmasking = False input_name = "input_ids" def setUp(self): self.model_tester = SpeechT5ForTextToSpeechTester(self) self.config_tester = ConfigTester(self, config_class=SpeechT5Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_model_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) # skipped because there is always dropout in SpeechT5SpeechDecoderPrenet def test_decoder_model_past_with_large_inputs(self): pass # skipped because there is always dropout in SpeechT5SpeechDecoderPrenet def test_determinism(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = [ "input_ids", "attention_mask", "decoder_input_values", "decoder_attention_mask", ] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # this model has no inputs_embeds def test_inputs_embeds(self): pass # skipped because there is always dropout in SpeechT5SpeechDecoderPrenet def test_model_outputs_equivalence(self): pass # skipped because there is always dropout in SpeechT5SpeechDecoderPrenet def test_save_load(self): pass def test_retain_grad_hidden_states_attentions(self): # decoder cannot keep gradients pass @slow def test_torchscript_output_attentions(self): # disabled because this model doesn't have decoder_input_ids pass @slow def test_torchscript_output_hidden_state(self): # disabled because this model doesn't have decoder_input_ids pass @slow def test_torchscript_simple(self): # disabled because this model doesn't have decoder_input_ids pass # training is not supported yet def test_training(self): pass def test_training_gradient_checkpointing(self): pass # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) @require_torch @require_sentencepiece @require_tokenizers @slow class SpeechT5ForTextToSpeechIntegrationTests(unittest.TestCase): @cached_property def default_processor(self): return SpeechT5Processor.from_pretrained("microsoft/speecht5_tts") def test_generation(self): model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts") model.to(torch_device) processor = self.default_processor set_seed(555) # make deterministic input_text = "mister quilter is the apostle of the middle classes and we are glad to welcome his gospel" input_ids = processor(text=input_text, return_tensors="pt").input_ids.to(torch_device) generated_speech = model.generate_speech(input_ids) self.assertEqual(generated_speech.shape, (1820, model.config.num_mel_bins)) @require_torch class SpeechT5ForSpeechToSpeechTester: def __init__( self, parent, batch_size=13, encoder_seq_length=1024, # speech is longer decoder_seq_length=1024, is_training=False, hidden_size=24, num_hidden_layers=4, num_attention_heads=2, intermediate_size=4, conv_dim=(32, 32, 32), conv_stride=(4, 4, 4), conv_kernel=(8, 8, 8), conv_bias=False, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, vocab_size=81, num_mel_bins=20, reduction_factor=2, speech_decoder_postnet_layers=2, speech_decoder_postnet_units=32, speech_decoder_prenet_units=32, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.vocab_size = vocab_size self.num_mel_bins = num_mel_bins self.reduction_factor = reduction_factor self.speech_decoder_postnet_layers = speech_decoder_postnet_layers self.speech_decoder_postnet_units = speech_decoder_postnet_units self.speech_decoder_prenet_units = speech_decoder_prenet_units def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.encoder_seq_length], scale=1.0) attention_mask = random_attention_mask([self.batch_size, self.encoder_seq_length]) decoder_input_values = floats_tensor([self.batch_size, self.decoder_seq_length, self.num_mel_bins], scale=1.0) decoder_attention_mask = random_attention_mask([self.batch_size, self.decoder_seq_length]) config = self.get_config() inputs_dict = prepare_inputs_dict( config, input_values=input_values, decoder_input_values=decoder_input_values, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_config(self): return SpeechT5Config( hidden_size=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, vocab_size=self.vocab_size, num_mel_bins=self.num_mel_bins, reduction_factor=self.reduction_factor, speech_decoder_postnet_layers=self.speech_decoder_postnet_layers, speech_decoder_postnet_units=self.speech_decoder_postnet_units, speech_decoder_prenet_units=self.speech_decoder_prenet_units, ) def create_and_check_model_forward(self, config, inputs_dict): model = SpeechT5ForSpeechToSpeech(config=config).to(torch_device).eval() input_values = inputs_dict["input_values"] attention_mask = inputs_dict["attention_mask"] decoder_input_values = inputs_dict["decoder_input_values"] result = model(input_values, attention_mask=attention_mask, decoder_input_values=decoder_input_values) self.parent.assertEqual( result.spectrogram.shape, (self.batch_size, self.decoder_seq_length * self.reduction_factor, self.num_mel_bins), ) @require_torch class SpeechT5ForSpeechToSpeechTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (SpeechT5ForSpeechToSpeech,) if is_torch_available() else () all_generative_model_classes = (SpeechT5ForSpeechToSpeech,) if is_torch_available() else () is_encoder_decoder = True test_pruning = False test_headmasking = False test_resize_embeddings = False input_name = "input_values" def setUp(self): self.model_tester = SpeechT5ForSpeechToSpeechTester(self) self.config_tester = ConfigTester(self, config_class=SpeechT5Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_model_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) # skipped because there is always dropout in SpeechT5SpeechDecoderPrenet def test_decoder_model_past_with_large_inputs(self): pass # skipped because there is always dropout in SpeechT5SpeechDecoderPrenet def test_determinism(self): pass def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() subsampled_encoder_seq_length = model.speecht5.encoder.prenet._get_feat_extract_output_lengths( encoder_seq_length ) subsampled_encoder_key_length = model.speecht5.encoder.prenet._get_feat_extract_output_lengths( encoder_key_length ) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) out_len = len(outputs) correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, subsampled_encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = [ "input_values", "attention_mask", "decoder_input_values", "decoder_attention_mask", ] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length else: seq_length = self.model_tester.seq_length subsampled_seq_length = model.speecht5.encoder.prenet._get_feat_extract_output_lengths(seq_length) self.assertListEqual( list(hidden_states[0].shape[-2:]), [subsampled_seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "masked_spec_embed", "feature_projection.projection.weight", "feature_projection.projection.bias", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # this model has no inputs_embeds def test_inputs_embeds(self): pass # this model has no input embeddings def test_model_common_attributes(self): pass # skipped because there is always dropout in SpeechT5SpeechDecoderPrenet def test_model_outputs_equivalence(self): pass def test_retain_grad_hidden_states_attentions(self): # decoder cannot keep gradients pass # skipped because there is always dropout in SpeechT5SpeechDecoderPrenet def test_save_load(self): pass @slow def test_torchscript_output_attentions(self): # disabled because this model doesn't have decoder_input_ids pass @slow def test_torchscript_output_hidden_state(self): # disabled because this model doesn't have decoder_input_ids pass @slow def test_torchscript_simple(self): # disabled because this model doesn't have decoder_input_ids pass # training is not supported yet def test_training(self): pass def test_training_gradient_checkpointing(self): pass # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) @require_torch @require_sentencepiece @require_tokenizers @slow class SpeechT5ForSpeechToSpeechIntegrationTests(unittest.TestCase): @cached_property def default_processor(self): return SpeechT5Processor.from_pretrained("microsoft/speecht5_vc") def _load_datasamples(self, num_samples): from datasets import load_dataset ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_generation_librispeech(self): model = SpeechT5ForSpeechToSpeech.from_pretrained("microsoft/speecht5_vc") model.to(torch_device) processor = self.default_processor input_speech = self._load_datasamples(1) input_values = processor(audio=input_speech, return_tensors="pt").input_values.to(torch_device) speaker_embeddings = torch.zeros((1, 512), device=torch_device) generated_speech = model.generate_speech(input_values, speaker_embeddings=speaker_embeddings) self.assertEqual(generated_speech.shape[1], model.config.num_mel_bins) self.assertGreaterEqual(generated_speech.shape[0], 300) self.assertLessEqual(generated_speech.shape[0], 310) class SpeechT5HifiGanTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=False, num_mel_bins=20, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.num_mel_bins = num_mel_bins def prepare_config_and_inputs(self): input_values = floats_tensor([self.seq_length, self.num_mel_bins], scale=1.0) config = self.get_config() return config, input_values def get_config(self): return SpeechT5HifiGanConfig( model_in_dim=self.num_mel_bins, upsample_initial_channel=32, ) def create_and_check_model(self, config, input_values): model = SpeechT5HifiGan(config=config).to(torch_device).eval() result = model(input_values) self.parent.assertEqual(result.shape, (self.seq_length * 256,)) def prepare_config_and_inputs_for_common(self): config, input_values = self.prepare_config_and_inputs() inputs_dict = {"spectrogram": input_values} return config, inputs_dict @require_torch class SpeechT5HifiGanTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (SpeechT5HifiGan,) if is_torch_available() else () test_torchscript = False test_pruning = False test_resize_embeddings = False test_resize_position_embeddings = False test_head_masking = False test_mismatched_shapes = False test_missing_keys = False test_model_parallel = False is_encoder_decoder = False has_attentions = False input_name = "spectrogram" def setUp(self): self.model_tester = SpeechT5HifiGanTester(self) self.config_tester = ConfigTester(self, config_class=SpeechT5HifiGanConfig) def test_config(self): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_from_and_save_pretrained_subfolder() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = [ "spectrogram", ] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) # this model does not output hidden states def test_hidden_states_output(self): pass # skip def test_initialization(self): pass # this model has no inputs_embeds def test_inputs_embeds(self): pass # this model has no input embeddings def test_model_common_attributes(self): pass # skip as this model doesn't support all arguments tested def test_model_outputs_equivalence(self): pass # this model does not output hidden states def test_retain_grad_hidden_states_attentions(self): pass # skip because it fails on automapping of SpeechT5HifiGanConfig def test_save_load_fast_init_from_base(self): pass # skip because it fails on automapping of SpeechT5HifiGanConfig def test_save_load_fast_init_to_base(self): pass def test_batched_inputs_outputs(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() batched_inputs = inputs["spectrogram"].unsqueeze(0).repeat(2, 1, 1) with torch.no_grad(): batched_outputs = model(batched_inputs.to(torch_device)) self.assertEqual( batched_inputs.shape[0], batched_outputs.shape[0], msg="Got different batch dims for input and output" ) def test_unbatched_inputs_outputs(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(inputs["spectrogram"].to(torch_device)) self.assertTrue(outputs.dim() == 1, msg="Got un-batched inputs but batched output")
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/speecht5/test_processor_speecht5.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the SpeechT5 processors.""" import json import os import shutil import tempfile import unittest from transformers import is_speech_available, is_torch_available from transformers.models.speecht5 import SpeechT5Tokenizer from transformers.testing_utils import get_tests_dir, require_torch from transformers.utils import FEATURE_EXTRACTOR_NAME if is_speech_available() and is_torch_available(): from transformers import SpeechT5FeatureExtractor, SpeechT5Processor from .test_feature_extraction_speecht5 import floats_list SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model") @require_torch class SpeechT5ProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() tokenizer = SpeechT5Tokenizer(SAMPLE_VOCAB) tokenizer.save_pretrained(self.tmpdirname) feature_extractor_map = { "feature_size": 1, "padding_value": 0.0, "sampling_rate": 16000, "do_normalize": False, "num_mel_bins": 80, "hop_length": 16, "win_length": 64, "win_function": "hann_window", "fmin": 80, "fmax": 7600, "mel_floor": 1e-10, "reduction_factor": 2, "return_attention_mask": True, } self.feature_extraction_file = os.path.join(self.tmpdirname, FEATURE_EXTRACTOR_NAME) with open(self.feature_extraction_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(feature_extractor_map) + "\n") def get_tokenizer(self, **kwargs): return SpeechT5Tokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_feature_extractor(self, **kwargs): return SpeechT5FeatureExtractor.from_pretrained(self.tmpdirname, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() feature_extractor = self.get_feature_extractor() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) processor.save_pretrained(self.tmpdirname) processor = SpeechT5Processor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer, SpeechT5Tokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string()) self.assertIsInstance(processor.feature_extractor, SpeechT5FeatureExtractor) def test_save_load_pretrained_additional_features(self): processor = SpeechT5Processor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") feature_extractor_add_kwargs = self.get_feature_extractor(do_normalize=False, padding_value=1.0) processor = SpeechT5Processor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, SpeechT5Tokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string()) self.assertIsInstance(processor.feature_extractor, SpeechT5FeatureExtractor) def test_feature_extractor(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(audio=raw_speech, return_tensors="np") input_processor = processor(audio=raw_speech, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_feature_extractor_target(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(audio_target=raw_speech, return_tensors="np") input_processor = processor(audio_target=raw_speech, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) input_str = "This is a test string" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_tokenizer_target(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) input_str = "This is a test string" encoded_processor = processor(text_target=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_tokenizer_decode(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_model_input_names(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) self.assertListEqual( processor.model_input_names, feature_extractor.model_input_names, msg="`processor` and `feature_extractor` model input names do not match", )
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/xlm/test_modeling_tf_xlm.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMModel, TFXLMWithLMHeadModel, XLMConfig, ) class TFXLMModelTester: def __init__( self, parent, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_lengths = True self.use_token_type_ids = True self.use_labels = True self.gelu_activation = True self.sinusoidal_embeddings = False self.causal = False self.asm = False self.n_langs = 2 self.vocab_size = 99 self.n_special = 0 self.hidden_size = 32 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.summary_type = "last" self.use_proj = True self.scope = None self.bos_token_id = 0 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = random_attention_mask([self.batch_size, self.seq_length], dtype=tf.float32) input_lengths = None if self.use_input_lengths: input_lengths = ( ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2 ) # small variation of seq_length token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.n_langs) sequence_labels = None token_labels = None is_impossible_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) is_impossible_labels = ids_tensor([self.batch_size], 2, dtype=tf.float32) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = XLMConfig( vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, bos_token_id=self.bos_token_id, ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def create_and_check_xlm_model( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = TFXLMModel(config=config) inputs = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_xlm_lm_head( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = TFXLMWithLMHeadModel(config) inputs = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} outputs = model(inputs) result = outputs self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_xlm_qa( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = TFXLMForQuestionAnsweringSimple(config) inputs = {"input_ids": input_ids, "lengths": input_lengths} result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_xlm_sequence_classif( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = TFXLMForSequenceClassification(config) inputs = {"input_ids": input_ids, "lengths": input_lengths} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def create_and_check_xlm_for_token_classification( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): config.num_labels = self.num_labels model = TFXLMForTokenClassification(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_xlm_for_multiple_choice( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): config.num_choices = self.num_choices model = TFXLMForMultipleChoice(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "langs": token_type_ids, "lengths": input_lengths, } return config, inputs_dict @require_tf class TFXLMModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFXLMModel, TFXLMWithLMHeadModel, TFXLMForSequenceClassification, TFXLMForQuestionAnsweringSimple, TFXLMForTokenClassification, TFXLMForMultipleChoice, ) if is_tf_available() else () ) all_generative_model_classes = ( (TFXLMWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable pipeline_model_mapping = ( { "feature-extraction": TFXLMModel, "fill-mask": TFXLMWithLMHeadModel, "question-answering": TFXLMForQuestionAnsweringSimple, "text-classification": TFXLMForSequenceClassification, "text-generation": TFXLMWithLMHeadModel, "token-classification": TFXLMForTokenClassification, "zero-shot": TFXLMForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def setUp(self): self.model_tester = TFXLMModelTester(self) self.config_tester = ConfigTester(self, config_class=XLMConfig, emb_dim=37) def test_config(self): self.config_tester.run_common_tests() def test_xlm_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*config_and_inputs) def test_xlm_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*config_and_inputs) def test_xlm_qa(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*config_and_inputs) def test_xlm_sequence_classif(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_token_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFXLMModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_tf class TFXLMModelLanguageGenerationTest(unittest.TestCase): @slow def test_lm_generate_xlm_mlm_en_2048(self): model = TFXLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048") input_ids = tf.convert_to_tensor([[14, 447]], dtype=tf.int32) # the president expected_output_ids = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference output_ids = model.generate(input_ids, do_sample=False) self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/xlm/test_tokenization_xlm.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class XLMTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = XLMTokenizer test_rust_tokenizer = False def setUp(self): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens)) with open(self.merges_file, "w") as fp: fp.write("\n".join(merges)) def get_input_output_texts(self, tokenizer): input_text = "lower newer" output_text = "lower newer" return input_text, output_text def test_full_tokenizer(self): """Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt""" tokenizer = XLMTokenizer(self.vocab_file, self.merges_file) text = "lower" bpe_tokens = ["low", "er</w>"] tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + ["<unk>"] input_bpe_tokens = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) @slow def test_sequence_builders(self): tokenizer = XLMTokenizer.from_pretrained("xlm-mlm-en-2048") text = tokenizer.encode("sequence builders", add_special_tokens=False) text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_2 + [1]
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/xlm/test_modeling_xlm.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class XLMModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_lengths=True, use_token_type_ids=True, use_labels=True, gelu_activation=True, sinusoidal_embeddings=False, causal=False, asm=False, n_langs=2, vocab_size=99, n_special=0, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_sequence_label_size=2, initializer_range=0.02, num_labels=2, num_choices=4, summary_type="last", use_proj=True, scope=None, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_lengths = use_input_lengths self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.gelu_activation = gelu_activation self.sinusoidal_embeddings = sinusoidal_embeddings self.causal = causal self.asm = asm self.n_langs = n_langs self.vocab_size = vocab_size self.n_special = n_special self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.summary_type = summary_type self.use_proj = use_proj self.scope = scope self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = random_attention_mask([self.batch_size, self.seq_length]) input_lengths = None if self.use_input_lengths: input_lengths = ( ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2 ) # small variation of seq_length token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.n_langs) sequence_labels = None token_labels = None is_impossible_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) is_impossible_labels = ids_tensor([self.batch_size], 2).float() choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def get_config(self): return XLMConfig( vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, num_labels=self.num_labels, bos_token_id=self.bos_token_id, ) def create_and_check_xlm_model( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = XLMModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, lengths=input_lengths, langs=token_type_ids) result = model(input_ids, langs=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_xlm_lm_head( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = XLMWithLMHeadModel(config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_xlm_simple_qa( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = XLMForQuestionAnsweringSimple(config) model.to(torch_device) model.eval() outputs = model(input_ids) outputs = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels) result = outputs self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_xlm_qa( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = XLMForQuestionAnswering(config) model.to(torch_device) model.eval() result = model(input_ids) result_with_labels = model( input_ids, start_positions=sequence_labels, end_positions=sequence_labels, cls_index=sequence_labels, is_impossible=is_impossible_labels, p_mask=input_mask, ) result_with_labels = model( input_ids, start_positions=sequence_labels, end_positions=sequence_labels, cls_index=sequence_labels, is_impossible=is_impossible_labels, ) (total_loss,) = result_with_labels.to_tuple() result_with_labels = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels) (total_loss,) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape, ()) self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top)) self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top)) self.parent.assertEqual( result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,)) def create_and_check_xlm_sequence_classif( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = XLMForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids) result = model(input_ids, labels=sequence_labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def create_and_check_xlm_token_classif( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): config.num_labels = self.num_labels model = XLMForTokenClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_xlm_for_multiple_choice( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): config.num_choices = self.num_choices model = XLMForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths} return config, inputs_dict @require_torch class XLMModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) all_generative_model_classes = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable pipeline_model_mapping = ( { "feature-extraction": XLMModel, "fill-mask": XLMWithLMHeadModel, "question-answering": XLMForQuestionAnsweringSimple, "text-classification": XLMForSequenceClassification, "text-generation": XLMWithLMHeadModel, "token-classification": XLMForTokenClassification, "zero-shot": XLMForSequenceClassification, } if is_torch_available() else {} ) # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False # XLM has 2 QA models -> need to manually set the correct labels for one of them here def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) inputs_dict["end_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = XLMModelTester(self) self.config_tester = ConfigTester(self, config_class=XLMConfig, emb_dim=37) def test_config(self): self.config_tester.run_common_tests() def test_xlm_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*config_and_inputs) def test_xlm_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*config_and_inputs) def test_xlm_simple_qa(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*config_and_inputs) def test_xlm_qa(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*config_and_inputs) def test_xlm_sequence_classif(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*config_and_inputs) def test_xlm_token_classif(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*config_and_inputs) def test_xlm_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*config_and_inputs) def _check_attentions_for_generate( self, batch_size, attentions, min_length, max_length, config, use_cache=False, num_beam_groups=1 ): self.assertIsInstance(attentions, tuple) self.assertListEqual( [isinstance(iter_attentions, tuple) for iter_attentions in attentions], [True] * len(attentions) ) self.assertEqual(len(attentions), (max_length - min_length) * num_beam_groups) for idx, iter_attentions in enumerate(attentions): # adds PAD dummy token tgt_len = min_length + idx + 1 src_len = min_length + idx + 1 expected_shape = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(iter_attentions) ) def _check_hidden_states_for_generate( self, batch_size, hidden_states, min_length, max_length, config, use_cache=False, num_beam_groups=1 ): self.assertIsInstance(hidden_states, tuple) self.assertListEqual( [isinstance(iter_hidden_states, tuple) for iter_hidden_states in hidden_states], [True] * len(hidden_states), ) self.assertEqual(len(hidden_states), (max_length - min_length) * num_beam_groups) for idx, iter_hidden_states in enumerate(hidden_states): # adds PAD dummy token seq_len = min_length + idx + 1 expected_shape = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(iter_hidden_states), ) pass @slow def test_model_from_pretrained(self): for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = XLMModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class XLMModelLanguageGenerationTest(unittest.TestCase): @slow def test_lm_generate_xlm_mlm_en_2048(self): model = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048") model.to(torch_device) input_ids = torch.tensor([[14, 447]], dtype=torch.long, device=torch_device) # the president expected_output_ids = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference output_ids = model.generate(input_ids, do_sample=False) self.assertListEqual(output_ids[0].cpu().numpy().tolist(), expected_output_ids)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/m2m_100/test_modeling_m2m_100.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch M2M100 model. """ import copy import tempfile import unittest from transformers import M2M100Config, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import M2M100ForConditionalGeneration, M2M100Model, M2M100Tokenizer from transformers.models.m2m_100.modeling_m2m_100 import M2M100Decoder, M2M100Encoder def prepare_m2m_100_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class M2M100ModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="relu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, encoder_layerdrop=0.0, decoder_layerdrop=0.0, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_ids[:, -1] = self.eos_token_id # Eos Token decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input input_ids = input_ids.clamp(self.pad_token_id + 1) decoder_input_ids = decoder_input_ids.clamp(self.pad_token_id + 1) config = self.get_config() inputs_dict = prepare_m2m_100_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def get_config(self): return M2M100Config( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, encoder_layerdrop=self.encoder_layerdrop, decoder_layerdrop=self.decoder_layerdrop, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = M2M100Model(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] head_mask = inputs_dict["head_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = M2M100Model(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = M2M100Encoder.from_pretrained(tmpdirname).to(torch_device) encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = M2M100Decoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, encoder_attention_mask=inputs_dict["attention_mask"], )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class M2M100ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( M2M100Model, M2M100ForConditionalGeneration, ) if is_torch_available() else () ) all_generative_model_classes = (M2M100ForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "conversational": M2M100ForConditionalGeneration, "feature-extraction": M2M100Model, "summarization": M2M100ForConditionalGeneration, "text2text-generation": M2M100ForConditionalGeneration, "translation": M2M100ForConditionalGeneration, } if is_torch_available() else {} ) is_encoder_decoder = True fx_compatible = True test_pruning = False test_missing_keys = False # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def setUp(self): self.model_tester = M2M100ModelTester(self) self.config_tester = ConfigTester(self, config_class=M2M100Config) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (M2M100Model, M2M100ForConditionalGeneration): model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = M2M100ForConditionalGeneration(config).eval().to(torch_device) if torch_device == "cuda": model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def _long_tensor(tok_lst): return torch.tensor(tok_lst, dtype=torch.long, device=torch_device) TOLERANCE = 1e-4 @require_torch @require_sentencepiece @require_tokenizers @slow class M2M100ModelIntegrationTests(unittest.TestCase): @cached_property def default_tokenizer(self): return M2M100Tokenizer.from_pretrained("facebook/m2m100_418M") def test_inference_no_head(self): model = M2M100Model.from_pretrained("facebook/m2m100_418M").to(torch_device) input_ids = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]]) decoder_input_ids = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]]) inputs_dict = prepare_m2m_100_inputs_dict(model.config, input_ids, decoder_input_ids) with torch.no_grad(): output = model(**inputs_dict)[0] expected_shape = torch.Size((1, 11, 1024)) self.assertEqual(output.shape, expected_shape) # change to expected output here expected_slice = torch.tensor( [[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]], device=torch_device ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE)) def test_inference_head(self): model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M").to(torch_device) # change to intended input input_ids = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]]) decoder_input_ids = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]]) inputs_dict = prepare_m2m_100_inputs_dict(model.config, input_ids, decoder_input_ids) with torch.no_grad(): output = model(**inputs_dict)[0] expected_shape = torch.Size((1, 11, model.config.vocab_size)) self.assertEqual(output.shape, expected_shape) # change to expected output here expected_slice = torch.tensor( [[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]], device=torch_device ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE)) def test_seq_to_seq_generation(self): model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M").to(torch_device) tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M", src_lang="fr", tgt_lang="en") src_fr = [ "L'affaire NSA souligne l'absence totale de débat sur le renseignement", "Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.", "Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent" " Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de" " l'ampleur de la surveillance américaine sur l'ensemble des communications en France.", ] # The below article tests that we don't add any hypotheses outside of the top n_beams dct = tokenizer(src_fr, padding=True, return_tensors="pt") hypotheses_batch = model.generate( input_ids=dct["input_ids"].to(torch_device), attention_mask=dct["attention_mask"].to(torch_device), num_beams=5, forced_bos_token_id=tokenizer.get_lang_id("en"), ) expected_en = [ "The NSA case highlights the total absence of intelligence debate", "I think there are two levels of response from the French government.", "When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S." " Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all" " communications in France.", ] generated = tokenizer.batch_decode( hypotheses_batch.tolist(), clean_up_tokenization_spaces=True, skip_special_tokens=True ) assert generated == expected_en
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/m2m_100/test_tokenization_m2m_100.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import M2M100Tokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.m2m_100.tokenization_m2m_100 import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): SAMPLE_SP = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.m2m_100.modeling_m2m_100 import shift_tokens_right EN_CODE = 128022 FR_CODE = 128028 @require_sentencepiece class M2M100TokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = M2M100Tokenizer test_rust_tokenizer = False test_seq2seq = False test_sentencepiece = True def setUp(self): super().setUp() vocab = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] vocab_tokens = dict(zip(vocab, range(len(vocab)))) save_dir = Path(self.tmpdirname) save_json(vocab_tokens, save_dir / VOCAB_FILES_NAMES["vocab_file"]) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(SAMPLE_SP, save_dir / VOCAB_FILES_NAMES["spm_file"]) tokenizer = M2M100Tokenizer.from_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname) def get_tokenizer(self, **kwargs): return M2M100Tokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self, tokenizer): return ( "This is a test", "This is a test", ) def test_convert_token_and_id(self): """Test ``_convert_token_to_id`` and ``_convert_id_to_token``.""" token = "</s>" token_id = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): tokenizer = self.get_tokenizer() vocab_keys = list(tokenizer.get_vocab().keys()) self.assertEqual(vocab_keys[0], "</s>") self.assertEqual(vocab_keys[1], "<unk>") self.assertEqual(vocab_keys[-1], "<s>") self.assertEqual(len(vocab_keys), tokenizer.vocab_size + len(tokenizer.get_added_vocab())) @unittest.skip("Skip this test while all models are still to be uploaded.") def test_pretrained_model_lists(self): pass def test_full_tokenizer(self): tokenizer = self.get_tokenizer() tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [2, 3, 4, 5, 6], ) back_tokens = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6]) self.assertListEqual(back_tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) text = tokenizer.convert_tokens_to_string(tokens) self.assertEqual(text, "This is a test") @slow def test_tokenizer_integration(self): # fmt: off expected_encoding = {'input_ids': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="facebook/m2m100_418M", revision="c168bae485c864188cf9aa0e4108b0b6934dc91e", ) @require_torch @require_sentencepiece @require_tokenizers class M2M100TokenizerIntegrationTest(unittest.TestCase): checkpoint_name = "facebook/m2m100_418M" src_text = [ "In my opinion, there are two levels of response from the French government.", "NSA Affair Emphasizes Complete Lack of Debate on Intelligence", ] tgt_text = [ "Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.", "L'affaire NSA souligne l'absence totale de débat sur le renseignement", ] # fmt: off expected_src_tokens = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2] # fmt: on @classmethod def setUpClass(cls): cls.tokenizer: M2M100Tokenizer = M2M100Tokenizer.from_pretrained( cls.checkpoint_name, src_lang="en", tgt_lang="fr" ) cls.pad_token_id = 1 return cls def check_language_codes(self): self.assertEqual(self.tokenizer.get_lang_id("ar"), 128006) self.assertEqual(self.tokenizer.get_lang_id("en"), 128022) self.assertEqual(self.tokenizer.get_lang_id("ro"), 128076) self.assertEqual(self.tokenizer.get_lang_id("mr"), 128063) def test_get_vocab(self): vocab = self.tokenizer.get_vocab() self.assertEqual(len(vocab), self.tokenizer.vocab_size) self.assertEqual(vocab["<unk>"], 3) self.assertIn(self.tokenizer.get_lang_token("en"), vocab) def test_tokenizer_batch_encode_plus(self): self.tokenizer.src_lang = "en" ids = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens, ids) def test_tokenizer_decode_ignores_language_codes(self): self.assertIn(FR_CODE, self.tokenizer.all_special_ids) # fmt: off generated_ids = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2] # fmt: on result = self.tokenizer.decode(generated_ids, skip_special_tokens=True) expected_french = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=True) self.assertEqual(result, expected_french) self.assertNotIn(self.tokenizer.eos_token, result) def test_special_tokens_unaffacted_by_save_load(self): tmpdirname = tempfile.mkdtemp() original_special_tokens = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(tmpdirname) new_tok = M2M100Tokenizer.from_pretrained(tmpdirname) self.assertDictEqual(new_tok.lang_token_to_id, original_special_tokens) @require_torch def test_batch_fairseq_parity(self): self.tokenizer.src_lang = "en" self.tokenizer.tgt_lang = "fr" batch = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=True, return_tensors="pt") batch["decoder_input_ids"] = shift_tokens_right( batch["labels"], self.tokenizer.pad_token_id, self.tokenizer.eos_token_id ) for k in batch: batch[k] = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def test_src_lang_setter(self): self.tokenizer.src_lang = "mr" self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id("mr")]) self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id]) self.tokenizer.src_lang = "zh" self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id("zh")]) self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id]) @require_torch def test_tokenizer_target_mode(self): self.tokenizer.tgt_lang = "mr" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id("mr")]) self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id]) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id(self.tokenizer.src_lang)]) self.tokenizer.tgt_lang = "zh" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id("zh")]) self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id]) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id(self.tokenizer.src_lang)]) @require_torch def test_tokenizer_translation(self): inputs = self.tokenizer._build_translation_inputs("A test", return_tensors="pt", src_lang="en", tgt_lang="ar") self.assertEqual( nested_simplify(inputs), { # en_XX, A, test, EOS "input_ids": [[128022, 58, 4183, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 128006, }, )
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/luke/test_tokenization_luke.py
# coding=utf-8 # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from typing import Tuple from transformers import AddedToken, LukeTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/vocab.json") SAMPLE_MERGE_FILE = get_tests_dir("fixtures/merges.txt") SAMPLE_ENTITY_VOCAB = get_tests_dir("fixtures/test_entity_vocab.json") class LukeTokenizerTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = LukeTokenizer test_rust_tokenizer = False from_pretrained_kwargs = {"cls_token": "<s>"} def setUp(self): super().setUp() self.special_tokens_map = {"entity_token_1": "<ent>", "entity_token_2": "<ent2>"} def get_tokenizer(self, task=None, **kwargs): kwargs.update(self.special_tokens_map) tokenizer = LukeTokenizer( vocab_file=SAMPLE_VOCAB, merges_file=SAMPLE_MERGE_FILE, entity_vocab_file=SAMPLE_ENTITY_VOCAB, task=task, **kwargs, ) tokenizer.sanitize_special_tokens() return tokenizer def get_input_output_texts(self, tokenizer): input_text = "lower newer" output_text = "lower newer" return input_text, output_text def test_full_tokenizer(self): tokenizer = self.get_tokenizer() text = "lower newer" bpe_tokens = ["l", "o", "w", "er", "Ġ", "n", "e", "w", "er"] tokens = tokenizer.tokenize(text) # , add_prefix_space=True) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + [tokenizer.unk_token] input_bpe_tokens = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) @slow def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("studio-ousia/luke-large") text = tokenizer.encode("sequence builders", add_special_tokens=False) text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_text_from_decode = tokenizer.encode( "sequence builders", add_special_tokens=True, add_prefix_space=False ) encoded_pair_from_decode = tokenizer.encode( "sequence builders", "multi-sequence build", add_special_tokens=True, add_prefix_space=False ) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) self.assertEqual(encoded_sentence, encoded_text_from_decode) self.assertEqual(encoded_pair, encoded_pair_from_decode) def get_clean_sequence(self, tokenizer, max_length=20) -> Tuple[str, list]: txt = "Beyonce lives in Los Angeles" ids = tokenizer.encode(txt, add_special_tokens=False) return txt, ids def test_space_encoding(self): tokenizer = self.get_tokenizer() sequence = "Encode this sequence." space_encoding = tokenizer.byte_encoder[" ".encode("utf-8")[0]] # Testing encoder arguments encoded = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=False) first_char = tokenizer.convert_ids_to_tokens(encoded[0])[0] self.assertNotEqual(first_char, space_encoding) encoded = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=True) first_char = tokenizer.convert_ids_to_tokens(encoded[0])[0] self.assertEqual(first_char, space_encoding) tokenizer.add_special_tokens({"bos_token": "<s>"}) encoded = tokenizer.encode(sequence, add_special_tokens=True) first_char = tokenizer.convert_ids_to_tokens(encoded[1])[0] self.assertNotEqual(first_char, space_encoding) # Testing spaces after special tokens mask = "<mask>" tokenizer.add_special_tokens( {"mask_token": AddedToken(mask, lstrip=True, rstrip=False)} ) # mask token has a left space mask_ind = tokenizer.convert_tokens_to_ids(mask) sequence = "Encode <mask> sequence" sequence_nospace = "Encode <mask>sequence" encoded = tokenizer.encode(sequence) mask_loc = encoded.index(mask_ind) first_char = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0] self.assertEqual(first_char, space_encoding) encoded = tokenizer.encode(sequence_nospace) mask_loc = encoded.index(mask_ind) first_char = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0] self.assertNotEqual(first_char, space_encoding) def test_pretokenized_inputs(self): pass def test_embeded_special_tokens(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest("{} ({})".format(tokenizer.__class__.__name__, pretrained_name)): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) sentence = "A, <mask> AllenNLP sentence." tokens_r = tokenizer_r.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True) tokens_p = tokenizer_p.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"])) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"])) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_p["attention_mask"]) / len(tokens_p["attention_mask"]), ) tokens_p_str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"]) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2]) self.assertSequenceEqual( tokens_p_str, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) def test_padding_entity_inputs(self): tokenizer = self.get_tokenizer() sentence = "Japanese is an East Asian language spoken by about 128 million people, primarily in Japan." span = (15, 34) pad_id = tokenizer.entity_vocab["[PAD]"] mask_id = tokenizer.entity_vocab["[MASK]"] encoding = tokenizer([sentence, sentence], entity_spans=[[span], [span, span]], padding=True) self.assertEqual(encoding["entity_ids"], [[mask_id, pad_id], [mask_id, mask_id]]) # test with a sentence with no entity encoding = tokenizer([sentence, sentence], entity_spans=[[], [span, span]], padding=True) self.assertEqual(encoding["entity_ids"], [[pad_id, pad_id], [mask_id, mask_id]]) def test_if_tokenize_single_text_raise_error_with_invalid_inputs(self): tokenizer = self.get_tokenizer() sentence = "Japanese is an East Asian language spoken by about 128 million people, primarily in Japan." spans = [(15, 34)] entities = ["East Asian language"] with self.assertRaises(ValueError): tokenizer(sentence, entities=tuple(entities), entity_spans=spans) with self.assertRaises(ValueError): tokenizer(sentence, entities=entities, entity_spans=tuple(spans)) with self.assertRaises(ValueError): tokenizer(sentence, entities=[0], entity_spans=spans) with self.assertRaises(ValueError): tokenizer(sentence, entities=entities, entity_spans=[0]) with self.assertRaises(ValueError): tokenizer(sentence, entities=entities, entity_spans=spans + [(0, 9)]) def test_if_tokenize_entity_classification_raise_error_with_invalid_inputs(self): tokenizer = self.get_tokenizer(task="entity_classification") sentence = "Japanese is an East Asian language spoken by about 128 million people, primarily in Japan." span = (15, 34) with self.assertRaises(ValueError): tokenizer(sentence, entity_spans=[]) with self.assertRaises(ValueError): tokenizer(sentence, entity_spans=[span, span]) with self.assertRaises(ValueError): tokenizer(sentence, entity_spans=[0]) def test_if_tokenize_entity_pair_classification_raise_error_with_invalid_inputs(self): tokenizer = self.get_tokenizer(task="entity_pair_classification") sentence = "Japanese is an East Asian language spoken by about 128 million people, primarily in Japan." # head and tail information with self.assertRaises(ValueError): tokenizer(sentence, entity_spans=[]) with self.assertRaises(ValueError): tokenizer(sentence, entity_spans=[0, 0]) def test_if_tokenize_entity_span_classification_raise_error_with_invalid_inputs(self): tokenizer = self.get_tokenizer(task="entity_span_classification") sentence = "Japanese is an East Asian language spoken by about 128 million people, primarily in Japan." with self.assertRaises(ValueError): tokenizer(sentence, entity_spans=[]) with self.assertRaises(ValueError): tokenizer(sentence, entity_spans=[0, 0, 0]) @slow @require_torch class LukeTokenizerIntegrationTests(unittest.TestCase): tokenizer_class = LukeTokenizer from_pretrained_kwargs = {"cls_token": "<s>"} def setUp(self): super().setUp() def test_single_text_no_padding_or_truncation(self): tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", return_token_type_ids=True) sentence = "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck." entities = ["Ana Ivanovic", "Thursday", "Dummy Entity"] spans = [(9, 21), (30, 38), (39, 42)] encoding = tokenizer(sentence, entities=entities, entity_spans=spans, return_token_type_ids=True) self.assertEqual( tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False), "<s>Top seed Ana Ivanovic said on Thursday she could hardly believe her luck.</s>", ) self.assertEqual( tokenizer.decode(encoding["input_ids"][3:6], spaces_between_special_tokens=False), " Ana Ivanovic" ) self.assertEqual( tokenizer.decode(encoding["input_ids"][8:9], spaces_between_special_tokens=False), " Thursday" ) self.assertEqual(tokenizer.decode(encoding["input_ids"][9:10], spaces_between_special_tokens=False), " she") self.assertEqual( encoding["entity_ids"], [ tokenizer.entity_vocab["Ana Ivanovic"], tokenizer.entity_vocab["Thursday"], tokenizer.entity_vocab["[UNK]"], ], ) self.assertEqual(encoding["entity_attention_mask"], [1, 1, 1]) self.assertEqual(encoding["entity_token_type_ids"], [0, 0, 0]) # fmt: off self.assertEqual( encoding["entity_position_ids"], [ [3, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], ] ) # fmt: on def test_single_text_only_entity_spans_no_padding_or_truncation(self): tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", return_token_type_ids=True) sentence = "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck." spans = [(9, 21), (30, 38), (39, 42)] encoding = tokenizer(sentence, entity_spans=spans, return_token_type_ids=True) self.assertEqual( tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False), "<s>Top seed Ana Ivanovic said on Thursday she could hardly believe her luck.</s>", ) self.assertEqual( tokenizer.decode(encoding["input_ids"][3:6], spaces_between_special_tokens=False), " Ana Ivanovic" ) self.assertEqual( tokenizer.decode(encoding["input_ids"][8:9], spaces_between_special_tokens=False), " Thursday" ) self.assertEqual(tokenizer.decode(encoding["input_ids"][9:10], spaces_between_special_tokens=False), " she") mask_id = tokenizer.entity_vocab["[MASK]"] self.assertEqual(encoding["entity_ids"], [mask_id, mask_id, mask_id]) self.assertEqual(encoding["entity_attention_mask"], [1, 1, 1]) self.assertEqual(encoding["entity_token_type_ids"], [0, 0, 0]) # fmt: off self.assertEqual( encoding["entity_position_ids"], [ [3, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, ], [9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, ] ] ) # fmt: on def test_single_text_padding_pytorch_tensors(self): tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", return_token_type_ids=True) sentence = "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck." entities = ["Ana Ivanovic", "Thursday", "Dummy Entity"] spans = [(9, 21), (30, 38), (39, 42)] encoding = tokenizer( sentence, entities=entities, entity_spans=spans, return_token_type_ids=True, padding="max_length", max_length=30, max_entity_length=16, return_tensors="pt", ) # test words self.assertEqual(encoding["input_ids"].shape, (1, 30)) self.assertEqual(encoding["attention_mask"].shape, (1, 30)) self.assertEqual(encoding["token_type_ids"].shape, (1, 30)) # test entities self.assertEqual(encoding["entity_ids"].shape, (1, 16)) self.assertEqual(encoding["entity_attention_mask"].shape, (1, 16)) self.assertEqual(encoding["entity_token_type_ids"].shape, (1, 16)) self.assertEqual(encoding["entity_position_ids"].shape, (1, 16, tokenizer.max_mention_length)) def test_text_pair_no_padding_or_truncation(self): tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", return_token_type_ids=True) sentence = "Top seed Ana Ivanovic said on Thursday" sentence_pair = "She could hardly believe her luck." entities = ["Ana Ivanovic", "Thursday"] entities_pair = ["Dummy Entity"] spans = [(9, 21), (30, 38)] spans_pair = [(0, 3)] encoding = tokenizer( sentence, sentence_pair, entities=entities, entities_pair=entities_pair, entity_spans=spans, entity_spans_pair=spans_pair, return_token_type_ids=True, ) self.assertEqual( tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False), "<s>Top seed Ana Ivanovic said on Thursday</s></s>She could hardly believe her luck.</s>", ) self.assertEqual( tokenizer.decode(encoding["input_ids"][3:6], spaces_between_special_tokens=False), " Ana Ivanovic" ) self.assertEqual( tokenizer.decode(encoding["input_ids"][8:9], spaces_between_special_tokens=False), " Thursday" ) self.assertEqual(tokenizer.decode(encoding["input_ids"][11:12], spaces_between_special_tokens=False), "She") self.assertEqual( encoding["entity_ids"], [ tokenizer.entity_vocab["Ana Ivanovic"], tokenizer.entity_vocab["Thursday"], tokenizer.entity_vocab["[UNK]"], ], ) self.assertEqual(encoding["entity_attention_mask"], [1, 1, 1]) self.assertEqual(encoding["entity_token_type_ids"], [0, 0, 0]) # fmt: off self.assertEqual( encoding["entity_position_ids"], [ [3, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], ] ) # fmt: on def test_text_pair_only_entity_spans_no_padding_or_truncation(self): tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", return_token_type_ids=True) sentence = "Top seed Ana Ivanovic said on Thursday" sentence_pair = "She could hardly believe her luck." spans = [(9, 21), (30, 38)] spans_pair = [(0, 3)] encoding = tokenizer( sentence, sentence_pair, entity_spans=spans, entity_spans_pair=spans_pair, return_token_type_ids=True, ) self.assertEqual( tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False), "<s>Top seed Ana Ivanovic said on Thursday</s></s>She could hardly believe her luck.</s>", ) self.assertEqual( tokenizer.decode(encoding["input_ids"][3:6], spaces_between_special_tokens=False), " Ana Ivanovic" ) self.assertEqual( tokenizer.decode(encoding["input_ids"][8:9], spaces_between_special_tokens=False), " Thursday" ) self.assertEqual(tokenizer.decode(encoding["input_ids"][11:12], spaces_between_special_tokens=False), "She") mask_id = tokenizer.entity_vocab["[MASK]"] self.assertEqual(encoding["entity_ids"], [mask_id, mask_id, mask_id]) self.assertEqual(encoding["entity_attention_mask"], [1, 1, 1]) self.assertEqual(encoding["entity_token_type_ids"], [0, 0, 0]) # fmt: off self.assertEqual( encoding["entity_position_ids"], [ [3, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], ] ) # fmt: on def test_text_pair_padding_pytorch_tensors(self): tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", return_token_type_ids=True) sentence = "Top seed Ana Ivanovic said on Thursday" sentence_pair = "She could hardly believe her luck." entities = ["Ana Ivanovic", "Thursday"] entities_pair = ["Dummy Entity"] spans = [(9, 21), (30, 38)] spans_pair = [(0, 3)] encoding = tokenizer( sentence, sentence_pair, entities=entities, entities_pair=entities_pair, entity_spans=spans, entity_spans_pair=spans_pair, return_token_type_ids=True, padding="max_length", max_length=30, max_entity_length=16, return_tensors="pt", ) # test words self.assertEqual(encoding["input_ids"].shape, (1, 30)) self.assertEqual(encoding["attention_mask"].shape, (1, 30)) self.assertEqual(encoding["token_type_ids"].shape, (1, 30)) # test entities self.assertEqual(encoding["entity_ids"].shape, (1, 16)) self.assertEqual(encoding["entity_attention_mask"].shape, (1, 16)) self.assertEqual(encoding["entity_token_type_ids"].shape, (1, 16)) self.assertEqual(encoding["entity_position_ids"].shape, (1, 16, tokenizer.max_mention_length)) def test_entity_classification_no_padding_or_truncation(self): tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", task="entity_classification") sentence = ( "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped" " the new world number one avoid a humiliating second- round exit at Wimbledon ." ) span = (39, 42) encoding = tokenizer(sentence, entity_spans=[span], return_token_type_ids=True) # test words self.assertEqual(len(encoding["input_ids"]), 42) self.assertEqual(len(encoding["attention_mask"]), 42) self.assertEqual(len(encoding["token_type_ids"]), 42) self.assertEqual( tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False), "<s>Top seed Ana Ivanovic said on Thursday<ent> she<ent> could hardly believe her luck as a fortuitous" " netcord helped the new world number one avoid a humiliating second- round exit at Wimbledon.</s>", ) self.assertEqual( tokenizer.decode(encoding["input_ids"][9:12], spaces_between_special_tokens=False), "<ent> she<ent>" ) # test entities self.assertEqual(encoding["entity_ids"], [2]) self.assertEqual(encoding["entity_attention_mask"], [1]) self.assertEqual(encoding["entity_token_type_ids"], [0]) # fmt: off self.assertEqual( encoding["entity_position_ids"], [ [9, 10, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1] ] ) # fmt: on def test_entity_classification_padding_pytorch_tensors(self): tokenizer = LukeTokenizer.from_pretrained( "studio-ousia/luke-base", task="entity_classification", return_token_type_ids=True ) sentence = ( "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped" " the new world number one avoid a humiliating second- round exit at Wimbledon ." ) # entity information span = (39, 42) encoding = tokenizer( sentence, entity_spans=[span], return_token_type_ids=True, padding="max_length", return_tensors="pt" ) # test words self.assertEqual(encoding["input_ids"].shape, (1, 512)) self.assertEqual(encoding["attention_mask"].shape, (1, 512)) self.assertEqual(encoding["token_type_ids"].shape, (1, 512)) # test entities self.assertEqual(encoding["entity_ids"].shape, (1, 1)) self.assertEqual(encoding["entity_attention_mask"].shape, (1, 1)) self.assertEqual(encoding["entity_token_type_ids"].shape, (1, 1)) self.assertEqual( encoding["entity_position_ids"].shape, (1, tokenizer.max_entity_length, tokenizer.max_mention_length) ) def test_entity_pair_classification_no_padding_or_truncation(self): tokenizer = LukeTokenizer.from_pretrained( "studio-ousia/luke-base", task="entity_pair_classification", return_token_type_ids=True ) sentence = "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck." # head and tail information spans = [(9, 21), (39, 42)] encoding = tokenizer(sentence, entity_spans=spans, return_token_type_ids=True) self.assertEqual( tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False), "<s>Top seed<ent> Ana Ivanovic<ent> said on Thursday<ent2> she<ent2> could hardly believe her luck.</s>", ) self.assertEqual( tokenizer.decode(encoding["input_ids"][3:8], spaces_between_special_tokens=False), "<ent> Ana Ivanovic<ent>", ) self.assertEqual( tokenizer.decode(encoding["input_ids"][11:14], spaces_between_special_tokens=False), "<ent2> she<ent2>" ) self.assertEqual(encoding["entity_ids"], [2, 3]) self.assertEqual(encoding["entity_attention_mask"], [1, 1]) self.assertEqual(encoding["entity_token_type_ids"], [0, 0]) # fmt: off self.assertEqual( encoding["entity_position_ids"], [ [3, 4, 5, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [11, 12, 13, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], ] ) # fmt: on def test_entity_pair_classification_padding_pytorch_tensors(self): tokenizer = LukeTokenizer.from_pretrained( "studio-ousia/luke-base", task="entity_pair_classification", return_token_type_ids=True ) sentence = "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck." # head and tail information spans = [(9, 21), (39, 42)] encoding = tokenizer( sentence, entity_spans=spans, return_token_type_ids=True, padding="max_length", max_length=30, return_tensors="pt", ) # test words self.assertEqual(encoding["input_ids"].shape, (1, 30)) self.assertEqual(encoding["attention_mask"].shape, (1, 30)) self.assertEqual(encoding["token_type_ids"].shape, (1, 30)) # test entities self.assertEqual(encoding["entity_ids"].shape, (1, 2)) self.assertEqual(encoding["entity_attention_mask"].shape, (1, 2)) self.assertEqual(encoding["entity_token_type_ids"].shape, (1, 2)) self.assertEqual( encoding["entity_position_ids"].shape, (1, tokenizer.max_entity_length, tokenizer.max_mention_length) ) def test_entity_span_classification_no_padding_or_truncation(self): tokenizer = LukeTokenizer.from_pretrained( "studio-ousia/luke-base", task="entity_span_classification", return_token_type_ids=True ) sentence = "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck." spans = [(0, 8), (9, 21), (39, 42)] encoding = tokenizer(sentence, entity_spans=spans, return_token_type_ids=True) self.assertEqual( tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False), "<s>Top seed Ana Ivanovic said on Thursday she could hardly believe her luck.</s>", ) self.assertEqual(encoding["entity_ids"], [2, 2, 2]) self.assertEqual(encoding["entity_attention_mask"], [1, 1, 1]) self.assertEqual(encoding["entity_token_type_ids"], [0, 0, 0]) # fmt: off self.assertEqual( encoding["entity_position_ids"], [ [1, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [3, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], ] ) # fmt: on self.assertEqual(encoding["entity_start_positions"], [1, 3, 9]) self.assertEqual(encoding["entity_end_positions"], [2, 5, 9]) def test_entity_span_classification_padding_pytorch_tensors(self): tokenizer = LukeTokenizer.from_pretrained( "studio-ousia/luke-base", task="entity_span_classification", return_token_type_ids=True ) sentence = "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck." spans = [(0, 8), (9, 21), (39, 42)] encoding = tokenizer( sentence, entity_spans=spans, return_token_type_ids=True, padding="max_length", max_length=30, max_entity_length=16, return_tensors="pt", ) # test words self.assertEqual(encoding["input_ids"].shape, (1, 30)) self.assertEqual(encoding["attention_mask"].shape, (1, 30)) self.assertEqual(encoding["token_type_ids"].shape, (1, 30)) # test entities self.assertEqual(encoding["entity_ids"].shape, (1, 16)) self.assertEqual(encoding["entity_attention_mask"].shape, (1, 16)) self.assertEqual(encoding["entity_token_type_ids"].shape, (1, 16)) self.assertEqual(encoding["entity_position_ids"].shape, (1, 16, tokenizer.max_mention_length)) self.assertEqual(encoding["entity_start_positions"].shape, (1, 16)) self.assertEqual(encoding["entity_end_positions"].shape, (1, 16))
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/luke/test_modeling_luke.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch LUKE model. """ import unittest from transformers import LukeConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukeTokenizer, ) from transformers.models.luke.modeling_luke import LUKE_PRETRAINED_MODEL_ARCHIVE_LIST class LukeModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, entity_length=3, mention_length=5, use_attention_mask=True, use_token_type_ids=True, use_entity_ids=True, use_entity_attention_mask=True, use_entity_token_type_ids=True, use_entity_position_ids=True, use_labels=True, vocab_size=99, entity_vocab_size=10, entity_emb_size=6, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, num_entity_classification_labels=9, num_entity_pair_classification_labels=6, num_entity_span_classification_labels=4, use_entity_aware_attention=True, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.entity_length = entity_length self.mention_length = mention_length self.use_attention_mask = use_attention_mask self.use_token_type_ids = use_token_type_ids self.use_entity_ids = use_entity_ids self.use_entity_attention_mask = use_entity_attention_mask self.use_entity_token_type_ids = use_entity_token_type_ids self.use_entity_position_ids = use_entity_position_ids self.use_labels = use_labels self.vocab_size = vocab_size self.entity_vocab_size = entity_vocab_size self.entity_emb_size = entity_emb_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.num_entity_classification_labels = num_entity_classification_labels self.num_entity_pair_classification_labels = num_entity_pair_classification_labels self.num_entity_span_classification_labels = num_entity_span_classification_labels self.scope = scope self.use_entity_aware_attention = use_entity_aware_attention self.encoder_seq_length = seq_length self.key_length = seq_length self.num_hidden_states_types = 2 # hidden_states and entity_hidden_states def prepare_config_and_inputs(self): # prepare words input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) # prepare entities entity_ids = ids_tensor([self.batch_size, self.entity_length], self.entity_vocab_size) entity_attention_mask = None if self.use_entity_attention_mask: entity_attention_mask = random_attention_mask([self.batch_size, self.entity_length]) entity_token_type_ids = None if self.use_token_type_ids: entity_token_type_ids = ids_tensor([self.batch_size, self.entity_length], self.type_vocab_size) entity_position_ids = None if self.use_entity_position_ids: entity_position_ids = ids_tensor( [self.batch_size, self.entity_length, self.mention_length], self.mention_length ) sequence_labels = None token_labels = None choice_labels = None entity_labels = None entity_classification_labels = None entity_pair_classification_labels = None entity_span_classification_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) entity_labels = ids_tensor([self.batch_size, self.entity_length], self.entity_vocab_size) entity_classification_labels = ids_tensor([self.batch_size], self.num_entity_classification_labels) entity_pair_classification_labels = ids_tensor( [self.batch_size], self.num_entity_pair_classification_labels ) entity_span_classification_labels = ids_tensor( [self.batch_size, self.entity_length], self.num_entity_span_classification_labels ) config = self.get_config() return ( config, input_ids, attention_mask, token_type_ids, entity_ids, entity_attention_mask, entity_token_type_ids, entity_position_ids, sequence_labels, token_labels, choice_labels, entity_labels, entity_classification_labels, entity_pair_classification_labels, entity_span_classification_labels, ) def get_config(self): return LukeConfig( vocab_size=self.vocab_size, entity_vocab_size=self.entity_vocab_size, entity_emb_size=self.entity_emb_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, use_entity_aware_attention=self.use_entity_aware_attention, ) def create_and_check_model( self, config, input_ids, attention_mask, token_type_ids, entity_ids, entity_attention_mask, entity_token_type_ids, entity_position_ids, sequence_labels, token_labels, choice_labels, entity_labels, entity_classification_labels, entity_pair_classification_labels, entity_span_classification_labels, ): model = LukeModel(config=config) model.to(torch_device) model.eval() # test with words + entities result = model( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual( result.entity_last_hidden_state.shape, (self.batch_size, self.entity_length, self.hidden_size) ) # test with words only result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, attention_mask, token_type_ids, entity_ids, entity_attention_mask, entity_token_type_ids, entity_position_ids, sequence_labels, token_labels, choice_labels, entity_labels, entity_classification_labels, entity_pair_classification_labels, entity_span_classification_labels, ): config.num_labels = self.num_entity_classification_labels model = LukeForMaskedLM(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, labels=token_labels, entity_labels=entity_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) if entity_ids is not None: self.parent.assertEqual( result.entity_logits.shape, (self.batch_size, self.entity_length, self.entity_vocab_size) ) else: self.parent.assertIsNone(result.entity_logits) def create_and_check_for_entity_classification( self, config, input_ids, attention_mask, token_type_ids, entity_ids, entity_attention_mask, entity_token_type_ids, entity_position_ids, sequence_labels, token_labels, choice_labels, entity_labels, entity_classification_labels, entity_pair_classification_labels, entity_span_classification_labels, ): config.num_labels = self.num_entity_classification_labels model = LukeForEntityClassification(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, labels=entity_classification_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_entity_classification_labels)) def create_and_check_for_entity_pair_classification( self, config, input_ids, attention_mask, token_type_ids, entity_ids, entity_attention_mask, entity_token_type_ids, entity_position_ids, sequence_labels, token_labels, choice_labels, entity_labels, entity_classification_labels, entity_pair_classification_labels, entity_span_classification_labels, ): config.num_labels = self.num_entity_pair_classification_labels model = LukeForEntityClassification(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, labels=entity_pair_classification_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_entity_pair_classification_labels)) def create_and_check_for_entity_span_classification( self, config, input_ids, attention_mask, token_type_ids, entity_ids, entity_attention_mask, entity_token_type_ids, entity_position_ids, sequence_labels, token_labels, choice_labels, entity_labels, entity_classification_labels, entity_pair_classification_labels, entity_span_classification_labels, ): config.num_labels = self.num_entity_span_classification_labels model = LukeForEntitySpanClassification(config) model.to(torch_device) model.eval() entity_start_positions = ids_tensor([self.batch_size, self.entity_length], self.seq_length) entity_end_positions = ids_tensor([self.batch_size, self.entity_length], self.seq_length) result = model( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, entity_start_positions=entity_start_positions, entity_end_positions=entity_end_positions, labels=entity_span_classification_labels, ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.entity_length, self.num_entity_span_classification_labels) ) def create_and_check_for_question_answering( self, config, input_ids, attention_mask, token_type_ids, entity_ids, entity_attention_mask, entity_token_type_ids, entity_position_ids, sequence_labels, token_labels, choice_labels, entity_labels, entity_classification_labels, entity_pair_classification_labels, entity_span_classification_labels, ): model = LukeForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, attention_mask, token_type_ids, entity_ids, entity_attention_mask, entity_token_type_ids, entity_position_ids, sequence_labels, token_labels, choice_labels, entity_labels, entity_classification_labels, entity_pair_classification_labels, entity_span_classification_labels, ): config.num_labels = self.num_labels model = LukeForSequenceClassification(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, labels=sequence_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, attention_mask, token_type_ids, entity_ids, entity_attention_mask, entity_token_type_ids, entity_position_ids, sequence_labels, token_labels, choice_labels, entity_labels, entity_classification_labels, entity_pair_classification_labels, entity_span_classification_labels, ): config.num_labels = self.num_labels model = LukeForTokenClassification(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, entity_ids=entity_ids, entity_attention_mask=entity_attention_mask, entity_token_type_ids=entity_token_type_ids, entity_position_ids=entity_position_ids, labels=token_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, attention_mask, token_type_ids, entity_ids, entity_attention_mask, entity_token_type_ids, entity_position_ids, sequence_labels, token_labels, choice_labels, entity_labels, entity_classification_labels, entity_pair_classification_labels, entity_span_classification_labels, ): config.num_choices = self.num_choices model = LukeForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_attention_mask = attention_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_entity_ids = entity_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_entity_token_type_ids = ( entity_token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() ) multiple_choice_entity_attention_mask = ( entity_attention_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() ) multiple_choice_entity_position_ids = ( entity_position_ids.unsqueeze(1).expand(-1, self.num_choices, -1, -1).contiguous() ) result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_attention_mask, token_type_ids=multiple_choice_token_type_ids, entity_ids=multiple_choice_entity_ids, entity_attention_mask=multiple_choice_entity_attention_mask, entity_token_type_ids=multiple_choice_entity_token_type_ids, entity_position_ids=multiple_choice_entity_position_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, token_type_ids, entity_ids, entity_attention_mask, entity_token_type_ids, entity_position_ids, sequence_labels, token_labels, choice_labels, entity_labels, entity_classification_labels, entity_pair_classification_labels, entity_span_classification_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, "entity_ids": entity_ids, "entity_token_type_ids": entity_token_type_ids, "entity_attention_mask": entity_attention_mask, "entity_position_ids": entity_position_ids, } return config, inputs_dict @require_torch class LukeModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( LukeModel, LukeForMaskedLM, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeForMultipleChoice, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": LukeModel, "fill-mask": LukeForMaskedLM, "question-answering": LukeForQuestionAnswering, "text-classification": LukeForSequenceClassification, "token-classification": LukeForTokenClassification, "zero-shot": LukeForSequenceClassification, } if is_torch_available() else {} ) test_pruning = False test_torchscript = False test_resize_embeddings = True test_head_masking = True # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name in ["QAPipelineTests", "ZeroShotClassificationPipelineTests"]: return True return False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): entity_inputs_dict = {k: v for k, v in inputs_dict.items() if k.startswith("entity")} inputs_dict = {k: v for k, v in inputs_dict.items() if not k.startswith("entity")} inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if model_class == LukeForMultipleChoice: entity_inputs_dict = { k: v.unsqueeze(1).expand(-1, self.model_tester.num_choices, -1).contiguous() if v.ndim == 2 else v.unsqueeze(1).expand(-1, self.model_tester.num_choices, -1, -1).contiguous() for k, v in entity_inputs_dict.items() } inputs_dict.update(entity_inputs_dict) if model_class == LukeForEntitySpanClassification: inputs_dict["entity_start_positions"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.entity_length), dtype=torch.long, device=torch_device ) inputs_dict["entity_end_positions"] = torch.ones( (self.model_tester.batch_size, self.model_tester.entity_length), dtype=torch.long, device=torch_device ) if return_labels: if model_class in ( LukeForEntityClassification, LukeForEntityPairClassification, LukeForSequenceClassification, LukeForMultipleChoice, ): inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class == LukeForEntitySpanClassification: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.entity_length), dtype=torch.long, device=torch_device, ) elif model_class == LukeForTokenClassification: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device, ) elif model_class == LukeForMaskedLM: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device, ) inputs_dict["entity_labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.entity_length), dtype=torch.long, device=torch_device, ) return inputs_dict def setUp(self): self.model_tester = LukeModelTester(self) self.config_tester = ConfigTester(self, config_class=LukeConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in LUKE_PRETRAINED_MODEL_ARCHIVE_LIST: model = LukeModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_masked_lm_with_word_only(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = (*config_and_inputs[:4], *((None,) * len(config_and_inputs[4:]))) self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_entity_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_entity_classification(*config_and_inputs) def test_for_entity_pair_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_entity_pair_classification(*config_and_inputs) def test_for_entity_span_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_entity_span_classification(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_length = self.model_tester.seq_length entity_length = self.model_tester.entity_length key_length = seq_length + entity_length for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_length + entity_length, key_length], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = self.model_tester.num_hidden_states_types self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_length + entity_length, key_length], ) def test_entity_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) entity_hidden_states = outputs.entity_hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(entity_hidden_states), expected_num_layers) entity_length = self.model_tester.entity_length self.assertListEqual( list(entity_hidden_states[0].shape[-2:]), [entity_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_retain_grad_entity_hidden_states(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] entity_hidden_states = outputs.entity_hidden_states[0] entity_hidden_states.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(entity_hidden_states.grad) @require_torch class LukeModelIntegrationTests(unittest.TestCase): @slow def test_inference_base_model(self): model = LukeModel.from_pretrained("studio-ousia/luke-base").eval() model.to(torch_device) tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", task="entity_classification") text = ( "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped" " the new world number one avoid a humiliating second- round exit at Wimbledon ." ) span = (39, 42) encoding = tokenizer(text, entity_spans=[span], add_prefix_space=True, return_tensors="pt") # move all values to device for key, value in encoding.items(): encoding[key] = encoding[key].to(torch_device) outputs = model(**encoding) # Verify word hidden states expected_shape = torch.Size((1, 42, 768)) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4)) # Verify entity hidden states expected_shape = torch.Size((1, 1, 768)) self.assertEqual(outputs.entity_last_hidden_state.shape, expected_shape) expected_slice = torch.tensor([[0.1457, 0.1044, 0.0174]]).to(torch_device) self.assertTrue(torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], expected_slice, atol=1e-4)) @slow def test_inference_large_model(self): model = LukeModel.from_pretrained("studio-ousia/luke-large").eval() model.to(torch_device) tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large", task="entity_classification") text = ( "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped" " the new world number one avoid a humiliating second- round exit at Wimbledon ." ) span = (39, 42) encoding = tokenizer(text, entity_spans=[span], add_prefix_space=True, return_tensors="pt") # move all values to device for key, value in encoding.items(): encoding[key] = encoding[key].to(torch_device) outputs = model(**encoding) # Verify word hidden states expected_shape = torch.Size((1, 42, 1024)) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4)) # Verify entity hidden states expected_shape = torch.Size((1, 1, 1024)) self.assertEqual(outputs.entity_last_hidden_state.shape, expected_shape) expected_slice = torch.tensor([[0.0466, -0.0106, -0.0179]]).to(torch_device) self.assertTrue(torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetV1ImageProcessor class MobileNetV1ImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_center_crop=True, crop_size=None, ): size = size if size is not None else {"shortest_edge": 20} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class MobileNetV1ImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = MobileNetV1ImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = MobileNetV1ImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "center_crop")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 20}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) def test_batch_feature(self): pass def test_call_pil(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) def test_call_numpy(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) def test_call_pytorch(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), )
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/mobilenet_v1/test_modeling_mobilenet_v1.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch MobileNetV1 model. """ import inspect import unittest from transformers import MobileNetV1Config from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetV1ForImageClassification, MobileNetV1Model from transformers.models.mobilenet_v1.modeling_mobilenet_v1 import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetV1ImageProcessor class MobileNetV1ConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "tf_padding")) self.parent.assertTrue(hasattr(config, "depth_multiplier")) class MobileNetV1ModelTester: def __init__( self, parent, batch_size=13, num_channels=3, image_size=32, depth_multiplier=0.25, min_depth=8, tf_padding=True, last_hidden_size=1024, output_stride=32, hidden_act="relu6", classifier_dropout_prob=0.1, initializer_range=0.02, is_training=True, use_labels=True, num_labels=10, scope=None, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.depth_multiplier = depth_multiplier self.min_depth = min_depth self.tf_padding = tf_padding self.last_hidden_size = int(last_hidden_size * depth_multiplier) self.output_stride = output_stride self.hidden_act = hidden_act self.classifier_dropout_prob = classifier_dropout_prob self.use_labels = use_labels self.is_training = is_training self.num_labels = num_labels self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None pixel_labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) pixel_labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels, pixel_labels def get_config(self): return MobileNetV1Config( num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, min_depth=self.min_depth, tf_padding=self.tf_padding, hidden_act=self.hidden_act, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values, labels, pixel_labels): model = MobileNetV1Model(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def create_and_check_for_image_classification(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = MobileNetV1ForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels, pixel_labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class MobileNetV1ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as MobileNetV1 does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (MobileNetV1Model, MobileNetV1ForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": MobileNetV1Model, "image-classification": MobileNetV1ForImageClassification} if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = MobileNetV1ModelTester(self) self.config_tester = MobileNetV1ConfigTester(self, config_class=MobileNetV1Config, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="MobileNetV1 does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="MobileNetV1 does not support input and output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="MobileNetV1 does not output attentions") def test_attention_outputs(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_stages = 26 self.assertEqual(len(hidden_states), expected_num_stages) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = MobileNetV1Model.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class MobileNetV1ModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( MobileNetV1ImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224") if is_vision_available() else None ) @slow def test_inference_image_classification_head(self): model = MobileNetV1ForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1001)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-4.1739, -1.1233, 3.1205]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py
# coding=utf-8 # Copyright 2021 HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re import tempfile import unittest from datasets import load_dataset from packaging import version from transformers import DonutProcessor, TrOCRProcessor from transformers.testing_utils import ( require_sentencepiece, require_torch, require_vision, slow, to_2tuple, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_modeling_common import floats_tensor, ids_tensor, random_attention_mask from ..bart.test_modeling_bart import BartModelTester from ..bert.test_modeling_bert import BertModelTester from ..deit.test_modeling_deit import DeiTModelTester from ..swin.test_modeling_swin import SwinModelTester from ..trocr.test_modeling_trocr import TrOCRStandaloneDecoderModelTester from ..vit.test_modeling_vit import ViTModelTester if is_torch_available(): import numpy as np import torch from transformers import ( AutoTokenizer, BartForCausalLM, BertLMHeadModel, DeiTModel, SwinModel, TrOCRForCausalLM, VisionEncoderDecoderConfig, VisionEncoderDecoderModel, ViTModel, ) from transformers.modeling_outputs import BaseModelOutput if is_vision_available(): import PIL from PIL import Image from transformers import ViTImageProcessor @require_torch class EncoderDecoderMixin: def get_encoder_decoder_model(self, config, decoder_config): pass def prepare_config_and_inputs(self): pass def get_pretrained_model_and_inputs(self): pass def check_encoder_decoder_model_from_pretrained_configs( self, config, decoder_config, decoder_input_ids, decoder_attention_mask, pixel_values=None, **kwargs ): encoder_decoder_config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) self.assertTrue(encoder_decoder_config.decoder.is_decoder) enc_dec_model = VisionEncoderDecoderModel(encoder_decoder_config) enc_dec_model.to(torch_device) enc_dec_model.eval() self.assertTrue(enc_dec_model.config.is_encoder_decoder) outputs_encoder_decoder = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) def check_encoder_decoder_model( self, config, decoder_config, decoder_input_ids, decoder_attention_mask, pixel_values=None, **kwargs ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = VisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) self.assertTrue(enc_dec_model.config.decoder.is_decoder) self.assertTrue(enc_dec_model.config.decoder.add_cross_attention) self.assertTrue(enc_dec_model.config.is_encoder_decoder) enc_dec_model.to(torch_device) outputs_encoder_decoder = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_hidden_states=True, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) encoder_outputs = BaseModelOutput(last_hidden_state=outputs_encoder_decoder.encoder_hidden_states[-1]) outputs_encoder_decoder = enc_dec_model( encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) def check_encoder_decoder_model_from_pretrained( self, config, decoder_config, decoder_input_ids, decoder_attention_mask, return_dict, pixel_values=None, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model, "return_dict": return_dict} enc_dec_model = VisionEncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs) enc_dec_model.to(torch_device) outputs_encoder_decoder = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_hidden_states=True, return_dict=True, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) def check_save_and_load( self, config, decoder_config, decoder_input_ids, decoder_attention_mask, pixel_values=None, **kwargs ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = VisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model.to(torch_device) enc_dec_model.eval() with torch.no_grad(): outputs = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) out_2 = outputs[0].cpu().numpy() out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmpdirname: enc_dec_model.save_pretrained(tmpdirname) enc_dec_model = VisionEncoderDecoderModel.from_pretrained(tmpdirname) enc_dec_model.to(torch_device) after_outputs = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) out_1 = after_outputs[0].cpu().numpy() out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) def check_save_and_load_encoder_decoder_model( self, config, decoder_config, decoder_input_ids, decoder_attention_mask, pixel_values=None, **kwargs ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = VisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model.to(torch_device) enc_dec_model.eval() with torch.no_grad(): outputs = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) out_2 = outputs[0].cpu().numpy() out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as encoder_tmp_dirname, tempfile.TemporaryDirectory() as decoder_tmp_dirname: enc_dec_model.encoder.save_pretrained(encoder_tmp_dirname) enc_dec_model.decoder.save_pretrained(decoder_tmp_dirname) VisionEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=encoder_tmp_dirname, decoder_pretrained_model_name_or_path=decoder_tmp_dirname, ) after_outputs = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) out_1 = after_outputs[0].cpu().numpy() out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) def check_encoder_decoder_model_output_attentions( self, config, decoder_config, decoder_input_ids, decoder_attention_mask, labels=None, pixel_values=None, **kwargs, ): # make the decoder inputs a different shape from the encoder inputs to harden the test decoder_input_ids = decoder_input_ids[:, :-1] decoder_attention_mask = decoder_attention_mask[:, :-1] encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = VisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model.to(torch_device) outputs_encoder_decoder = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_attentions=True, ) encoder_attentions = outputs_encoder_decoder["encoder_attentions"] self.assertEqual(len(encoder_attentions), config.num_hidden_layers) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) image_size = to_2tuple(encoder_model.config.image_size) patch_size = to_2tuple(encoder_model.config.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 1 self.assertEqual(encoder_attentions[0].shape[-3:], (config.num_attention_heads, seq_len, seq_len)) decoder_attentions = outputs_encoder_decoder["decoder_attentions"] num_decoder_layers = ( decoder_config.num_decoder_layers if hasattr(decoder_config, "num_decoder_layers") else decoder_config.num_hidden_layers ) self.assertEqual(len(decoder_attentions), num_decoder_layers) self.assertEqual( decoder_attentions[0].shape[-3:], (decoder_config.num_attention_heads, decoder_input_ids.shape[-1], decoder_input_ids.shape[-1]), ) cross_attentions = outputs_encoder_decoder["cross_attentions"] self.assertEqual(len(cross_attentions), num_decoder_layers) cross_attention_input_seq_len = decoder_input_ids.shape[-1] self.assertEqual( cross_attentions[0].shape[-3:], (decoder_config.num_attention_heads, cross_attention_input_seq_len, seq_len), ) def check_encoder_decoder_model_generate(self, config, decoder_config, pixel_values=None, **kwargs): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = VisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) # Generate until max length if hasattr(enc_dec_model.config, "eos_token_id"): enc_dec_model.config.eos_token_id = None if hasattr(enc_dec_model.config, "decoder") and hasattr(enc_dec_model.config.decoder, "eos_token_id"): enc_dec_model.config.decoder.eos_token_id = None enc_dec_model.to(torch_device) inputs = pixel_values # Bert does not have a bos token id, so use pad_token_id instead generated_output = enc_dec_model.generate( inputs, decoder_start_token_id=enc_dec_model.config.decoder.pad_token_id ) self.assertEqual(generated_output.shape, (inputs.shape[0],) + (decoder_config.max_length,)) def test_encoder_decoder_model(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model(**input_ids_dict) def test_encoder_decoder_model_from_pretrained_configs(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained_configs(**input_ids_dict) def test_encoder_decoder_model_from_pretrained(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained(**input_ids_dict, return_dict=False) def test_encoder_decoder_model_from_pretrained_return_dict(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained(**input_ids_dict, return_dict=True) def test_save_and_load_from_pretrained(self): input_ids_dict = self.prepare_config_and_inputs() self.check_save_and_load(**input_ids_dict) def test_save_and_load_from_encoder_decoder_pretrained(self): input_ids_dict = self.prepare_config_and_inputs() self.check_save_and_load_encoder_decoder_model(**input_ids_dict) def test_encoder_decoder_model_output_attentions(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_output_attentions(**input_ids_dict) def test_encoder_decoder_model_generate(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_generate(**input_ids_dict) def test_training_gradient_checkpointing(self): inputs_dict = self.prepare_config_and_inputs() encoder_model, decoder_model = self.get_encoder_decoder_model( inputs_dict["config"], inputs_dict["decoder_config"] ) model = VisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) model.to(torch_device) model.train() model.gradient_checkpointing_enable() model.config.decoder_start_token_id = 0 model.config.pad_token_id = 0 model_inputs = { "pixel_values": inputs_dict["pixel_values"], "labels": inputs_dict["labels"], "decoder_input_ids": inputs_dict["decoder_input_ids"], } loss = model(**model_inputs).loss loss.backward() @slow def test_real_model_save_load_from_pretrained(self): model_2, inputs = self.get_pretrained_model_and_inputs() model_2.to(torch_device) with torch.no_grad(): outputs = model_2(**inputs) out_2 = outputs[0].cpu().numpy() out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmp_dirname: model_2.save_pretrained(tmp_dirname) model_1 = VisionEncoderDecoderModel.from_pretrained(tmp_dirname) model_1.to(torch_device) after_outputs = model_1(**inputs) out_1 = after_outputs[0].cpu().numpy() out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) @require_torch class DeiT2RobertaModelTest(EncoderDecoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = VisionEncoderDecoderModel.from_encoder_decoder_pretrained( "hf-internal-testing/tiny-random-deit", "hf-internal-testing/tiny-random-roberta" ) batch_size = 13 pixel_values = floats_tensor( [ batch_size, model.encoder.config.num_channels, model.encoder.config.image_size, model.encoder.config.image_size, ] ) # for DEiT, the sequence length is equal to the number of patches + 2 (for the [CLS] and distillation tokens) decoder_input_ids = ids_tensor([batch_size, 4], model.decoder.config.vocab_size) decoder_attention_mask = random_attention_mask([batch_size, 4]) inputs = { "pixel_values": pixel_values, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } return model, inputs def check_encoder_decoder_model_output_attentions( self, config, decoder_config, decoder_input_ids, decoder_attention_mask, labels=None, pixel_values=None, **kwargs, ): # make the decoder inputs a different shape from the encoder inputs to harden the test decoder_input_ids = decoder_input_ids[:, :-1] decoder_attention_mask = decoder_attention_mask[:, :-1] encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = VisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model.to(torch_device) outputs_encoder_decoder = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_attentions=True, ) encoder_attentions = outputs_encoder_decoder["encoder_attentions"] self.assertEqual(len(encoder_attentions), config.num_hidden_layers) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) image_size = to_2tuple(encoder_model.config.image_size) patch_size = to_2tuple(encoder_model.config.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 2 self.assertEqual(encoder_attentions[0].shape[-3:], (config.num_attention_heads, seq_len, seq_len)) decoder_attentions = outputs_encoder_decoder["decoder_attentions"] num_decoder_layers = ( decoder_config.num_decoder_layers if hasattr(decoder_config, "num_decoder_layers") else decoder_config.num_hidden_layers ) self.assertEqual(len(decoder_attentions), num_decoder_layers) self.assertEqual( decoder_attentions[0].shape[-3:], (decoder_config.num_attention_heads, decoder_input_ids.shape[-1], decoder_input_ids.shape[-1]), ) cross_attentions = outputs_encoder_decoder["cross_attentions"] self.assertEqual(len(cross_attentions), num_decoder_layers) cross_attention_input_seq_len = decoder_input_ids.shape[-1] self.assertEqual( cross_attentions[0].shape[-3:], (decoder_config.num_attention_heads, cross_attention_input_seq_len, seq_len), ) def get_encoder_decoder_model(self, config, decoder_config): encoder_model = DeiTModel(config).eval() decoder_model = BertLMHeadModel(decoder_config).eval() return encoder_model, decoder_model def prepare_config_and_inputs(self): bert_model_tester = BertModelTester(self) deit_model_tester = DeiTModelTester(self) encoder_config_and_inputs = deit_model_tester.prepare_config_and_inputs() decoder_config_and_inputs = bert_model_tester.prepare_config_and_inputs_for_decoder() config, pixel_values, _ = encoder_config_and_inputs ( decoder_config, decoder_input_ids, decoder_token_type_ids, decoder_input_mask, decoder_sequence_labels, decoder_token_labels, decoder_choice_labels, encoder_attention_mask, _, ) = decoder_config_and_inputs # make sure that cross attention layers are added decoder_config.add_cross_attention = True return { "config": config, "pixel_values": pixel_values, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_token_type_ids": decoder_token_type_ids, "decoder_attention_mask": decoder_input_mask, "decoder_sequence_labels": decoder_sequence_labels, "decoder_token_labels": decoder_token_labels, "decoder_choice_labels": decoder_choice_labels, "labels": decoder_token_labels, } @require_torch class ViT2BertModelTest(EncoderDecoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = VisionEncoderDecoderModel.from_encoder_decoder_pretrained( "hf-internal-testing/tiny-random-vit", "hf-internal-testing/tiny-bert" ) batch_size = 13 pixel_values = floats_tensor( [ batch_size, model.encoder.config.num_channels, model.encoder.config.image_size, model.encoder.config.image_size, ] ) # for ViT, the sequence length is equal to the number of patches + 1 (for the [CLS] token) decoder_input_ids = ids_tensor([batch_size, 4], model.decoder.config.vocab_size) decoder_attention_mask = random_attention_mask([batch_size, 4]) inputs = { "pixel_values": pixel_values, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } return model, inputs def get_encoder_decoder_model(self, config, decoder_config): encoder_model = ViTModel(config).eval() decoder_model = BertLMHeadModel(decoder_config).eval() return encoder_model, decoder_model def prepare_config_and_inputs(self): vit_model_tester = ViTModelTester(self) bert_model_tester = BertModelTester(self) encoder_config_and_inputs = vit_model_tester.prepare_config_and_inputs() decoder_config_and_inputs = bert_model_tester.prepare_config_and_inputs_for_decoder() config, pixel_values, _ = encoder_config_and_inputs ( decoder_config, decoder_input_ids, decoder_token_type_ids, decoder_input_mask, decoder_sequence_labels, decoder_token_labels, decoder_choice_labels, encoder_attention_mask, _, ) = decoder_config_and_inputs # make sure that cross attention layers are added decoder_config.add_cross_attention = True return { "config": config, "pixel_values": pixel_values, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_token_type_ids": decoder_token_type_ids, "decoder_attention_mask": decoder_input_mask, "decoder_sequence_labels": decoder_sequence_labels, "decoder_token_labels": decoder_token_labels, "decoder_choice_labels": decoder_choice_labels, "labels": decoder_token_labels, } @require_torch class Swin2BartModelTest(EncoderDecoderMixin, unittest.TestCase): def get_encoder_decoder_model(self, config, decoder_config): encoder_model = SwinModel(config).eval() decoder_model = BartForCausalLM(decoder_config).eval() return encoder_model, decoder_model def prepare_config_and_inputs(self): model_tester_encoder = SwinModelTester(self, batch_size=13, embed_dim=32) model_tester_decoder = BartModelTester(self, batch_size=13, hidden_size=32, max_position_embeddings=512) encoder_config_and_inputs = model_tester_encoder.prepare_config_and_inputs() decoder_config_and_inputs = model_tester_decoder.prepare_config_and_inputs() config, pixel_values, _ = encoder_config_and_inputs decoder_config, decoder_inputs_dict = decoder_config_and_inputs decoder_inputs_dict["labels"] = decoder_inputs_dict["decoder_input_ids"] # make sure that cross attention layers are added decoder_config.add_cross_attention = True # disable cache for now decoder_config.use_cache = False return { "config": config, "pixel_values": pixel_values, "decoder_config": decoder_config, **decoder_inputs_dict, } def check_encoder_decoder_model_output_attentions( self, config, decoder_config, decoder_input_ids, decoder_attention_mask, labels=None, pixel_values=None, **kwargs, ): # make the decoder inputs a different shape from the encoder inputs to harden the test decoder_input_ids = decoder_input_ids[:, :-1] decoder_attention_mask = decoder_attention_mask[:, :-1] encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = VisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) enc_dec_model.to(torch_device) outputs_encoder_decoder = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_attentions=True, ) encoder_attentions = outputs_encoder_decoder["encoder_attentions"] self.assertEqual(len(encoder_attentions), config.num_hidden_layers) # in Swin, the seq_len equals: seq_len = encoder_model.config.window_size**2 self.assertEqual(encoder_attentions[0].shape[-3:], (config.num_attention_heads[0], seq_len, seq_len)) decoder_attentions = outputs_encoder_decoder["decoder_attentions"] num_decoder_layers = ( decoder_config.num_decoder_layers if hasattr(decoder_config, "num_decoder_layers") else decoder_config.num_hidden_layers ) self.assertEqual(len(decoder_attentions), num_decoder_layers) self.assertEqual( decoder_attentions[0].shape[-3:], (decoder_config.num_attention_heads, decoder_input_ids.shape[-1], decoder_input_ids.shape[-1]), ) cross_attentions = outputs_encoder_decoder["cross_attentions"] self.assertEqual(len(cross_attentions), num_decoder_layers) encoder_seq_len = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) cross_attention_input_seq_len = decoder_input_ids.shape[-1] self.assertEqual( cross_attentions[0].shape[-3:], (decoder_config.num_attention_heads, cross_attention_input_seq_len, encoder_seq_len), ) # there are no published pretrained BART-causal checkpoints for now def test_real_model_save_load_from_pretrained(self): pass @require_torch class ViT2TrOCR(EncoderDecoderMixin, unittest.TestCase): def get_encoder_decoder_model(self, config, decoder_config): encoder_model = ViTModel(config).eval() decoder_model = TrOCRForCausalLM(decoder_config).eval() return encoder_model, decoder_model def prepare_config_and_inputs(self): model_tester_encoder = ViTModelTester(self, batch_size=13) model_tester_decoder = TrOCRStandaloneDecoderModelTester( self, batch_size=13, d_model=32, max_position_embeddings=512 ) encoder_config_and_inputs = model_tester_encoder.prepare_config_and_inputs() decoder_config_and_inputs = model_tester_decoder.prepare_config_and_inputs() config, pixel_values, _ = encoder_config_and_inputs (decoder_config, decoder_input_ids, decoder_attention_mask, _) = decoder_config_and_inputs # make sure that cross attention layers are added decoder_config.add_cross_attention = True # disable cache for now decoder_config.use_cache = False return { "config": config, "pixel_values": pixel_values, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "labels": decoder_input_ids, } # there are no published pretrained TrOCR checkpoints for now def test_real_model_save_load_from_pretrained(self): pass @require_vision @require_torch class TrOCRModelIntegrationTest(unittest.TestCase): @cached_property def default_processor(self): return TrOCRProcessor.from_pretrained("microsoft/trocr-base-handwritten") if is_vision_available() else None @slow def test_inference_handwritten(self): model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-handwritten").to(torch_device) dataset = load_dataset("hf-internal-testing/fixtures_ocr", split="test") image = Image.open(dataset[0]["file"]).convert("RGB") processor = self.default_processor pixel_values = processor(images=image, return_tensors="pt").pixel_values.to(torch_device) # forward pass decoder_input_ids = torch.tensor([[model.config.decoder.decoder_start_token_id]]).to(torch_device) outputs = model(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids) logits = outputs.logits # verify the logits expected_shape = torch.Size((1, 1, model.decoder.config.vocab_size)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] ).to(torch_device) self.assertTrue(torch.allclose(logits[0, 0, :10], expected_slice, atol=1e-4)) @slow def test_inference_printed(self): model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-printed").to(torch_device) dataset = load_dataset("hf-internal-testing/fixtures_ocr", split="test") image = Image.open(dataset[1]["file"]).convert("RGB") processor = self.default_processor pixel_values = processor(images=image, return_tensors="pt").pixel_values.to(torch_device) # forward pass decoder_input_ids = torch.tensor([[model.config.decoder.decoder_start_token_id]]).to(torch_device) outputs = model(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids) logits = outputs.logits # verify the logits expected_shape = torch.Size((1, 1, model.decoder.config.vocab_size)) self.assertEqual(outputs.logits.shape, expected_shape) is_pillow_less_than_9 = version.parse(PIL.__version__) < version.parse("9.0.0") if is_pillow_less_than_9: expected_slice = torch.tensor( [-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210], device=torch_device, ) else: expected_slice = torch.tensor( [-5.6844, -5.8372, 1.1518, -6.8984, 6.8587, -2.4453, 1.2347, -1.0241, -1.9649, -3.9109], device=torch_device, ) self.assertTrue(torch.allclose(logits[0, 0, :10], expected_slice, atol=1e-4)) @require_vision @require_torch class ViT2GPT2ModelIntegrationTest(unittest.TestCase): @slow def test_inference_coco_en(self): loc = "ydshieh/vit-gpt2-coco-en" image_processor = ViTImageProcessor.from_pretrained(loc) tokenizer = AutoTokenizer.from_pretrained(loc) model = VisionEncoderDecoderModel.from_pretrained(loc) model.to(torch_device) model.eval() # We will verify our results on an image of cute cats img = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") pixel_values = image_processor(images=img, return_tensors="pt").pixel_values.to(torch_device) decoder_input_ids = torch.tensor([[model.config.decoder_start_token_id]]).to(torch_device) with torch.no_grad(): logits = model(pixel_values, decoder_input_ids)[0].detach().cpu().numpy() # verify the logits expected_shape = (1, 1, model.config.decoder.vocab_size) self.assertEqual(logits.shape, expected_shape) EXPECTED_LOGIT_SLICE = np.array( [ -38.705807, -30.639929, -31.41903, -39.012012, -38.38696, -34.887207, -33.290855, -35.68447, -38.508484, -36.124645, ] ) max_diff = np.amax(np.abs(logits[0, 0, :10] - EXPECTED_LOGIT_SLICE)) self.assertLessEqual(max_diff, 1e-4) def generate_step(pixel_values): outputs = model.generate( pixel_values, max_length=16, num_beams=4, return_dict_in_generate=True, output_scores=True ) output_ids = outputs.sequences preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True) preds = [pred.strip() for pred in preds] return preds, outputs.sequences_scores.detach().cpu().numpy() preds, scores = generate_step(pixel_values) EXPECTED_SCORES = np.array([-0.59562886]) max_diff = np.amax(np.abs(scores - EXPECTED_SCORES)) self.assertLessEqual(max_diff, 1e-4) # should produce # ["a cat laying on top of a couch next to another cat"] self.assertEqual(preds, ["a cat laying on top of a couch next to another cat"]) @require_vision @require_torch @require_sentencepiece class DonutModelIntegrationTest(unittest.TestCase): @slow def test_inference_docvqa(self): processor = DonutProcessor.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa") model = VisionEncoderDecoderModel.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa").to( torch_device ) dataset = load_dataset("hf-internal-testing/example-documents", split="test") image = dataset[0]["image"] pixel_values = processor(images=image, return_tensors="pt").pixel_values.to(torch_device) decoder_input_ids = processor.tokenizer( "<s_docvqa>", add_special_tokens=False, return_tensors="pt" ).input_ids.to(torch_device) # step 1: single forward pass with torch.no_grad(): outputs = model(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids) logits = outputs.logits # verify the logits expected_shape = torch.Size([1, 1, 57532]) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([24.3873, -6.4491, 32.5394]).to(torch_device) self.assertTrue(torch.allclose(logits[0, 0, :3], expected_slice, atol=1e-4)) # step 2: generation task_prompt = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" question = "When is the coffee break?" prompt = task_prompt.replace("{user_input}", question) decoder_input_ids = processor.tokenizer(prompt, add_special_tokens=False, return_tensors="pt").input_ids decoder_input_ids = decoder_input_ids.to(torch_device) outputs = model.generate( pixel_values, decoder_input_ids=decoder_input_ids, max_length=model.decoder.config.max_position_embeddings, early_stopping=True, pad_token_id=processor.tokenizer.pad_token_id, eos_token_id=processor.tokenizer.eos_token_id, use_cache=True, num_beams=1, bad_words_ids=[[processor.tokenizer.unk_token_id]], output_scores=True, return_dict_in_generate=True, ) sequence = processor.batch_decode(outputs.sequences)[0] sequence = sequence.replace(processor.tokenizer.eos_token, "").replace(processor.tokenizer.pad_token, "") sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token # verify generated sequence self.assertEqual( sequence, "<s_question> When is the coffee break?</s_question><s_answer> 11-14 to 11:39 a.m.</s_answer>" ) # verify scores self.assertEqual(len(outputs.scores), 11) self.assertTrue( torch.allclose( outputs.scores[0][0, :3], torch.tensor([5.6019, -3.5070, 13.7123], device=torch_device), atol=1e-4 ) ) @slow def test_inference_cordv2(self): processor = DonutProcessor.from_pretrained("naver-clova-ix/donut-base-finetuned-cord-v2") model = VisionEncoderDecoderModel.from_pretrained("naver-clova-ix/donut-base-finetuned-cord-v2").to( torch_device ) dataset = load_dataset("hf-internal-testing/example-documents", split="test") image = dataset[2]["image"] pixel_values = processor(images=image, return_tensors="pt").pixel_values.to(torch_device) decoder_input_ids = processor.tokenizer( "<s_cord-v2>", add_special_tokens=False, return_tensors="pt" ).input_ids.to(torch_device) # step 1: single forward pass with torch.no_grad(): outputs = model(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids) logits = outputs.logits # verify the logits expected_shape = torch.Size((1, 1, model.decoder.config.vocab_size)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-27.4344, -3.2686, -19.3524], device=torch_device) self.assertTrue(torch.allclose(logits[0, 0, :3], expected_slice, atol=1e-4)) # step 2: generation task_prompt = "<s_cord-v2>" decoder_input_ids = processor.tokenizer(task_prompt, add_special_tokens=False, return_tensors="pt").input_ids decoder_input_ids = decoder_input_ids.to(torch_device) outputs = model.generate( pixel_values, decoder_input_ids=decoder_input_ids, max_length=model.decoder.config.max_position_embeddings, early_stopping=True, pad_token_id=processor.tokenizer.pad_token_id, eos_token_id=processor.tokenizer.eos_token_id, use_cache=True, num_beams=1, bad_words_ids=[[processor.tokenizer.unk_token_id]], output_scores=True, return_dict_in_generate=True, ) sequence = processor.batch_decode(outputs.sequences)[0] sequence = sequence.replace(processor.tokenizer.eos_token, "").replace(processor.tokenizer.pad_token, "") sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token # verify generated sequence # fmt: off expected_sequence = "<s_menu><s_nm> CINNAMON SUGAR</s_nm><s_unitprice> 17,000</s_unitprice><s_cnt> 1 x</s_cnt><s_price> 17,000</s_price></s_menu><s_sub_total><s_subtotal_price> 17,000</s_subtotal_price></s_sub_total><s_total><s_total_price> 17,000</s_total_price><s_cashprice> 20,000</s_cashprice><s_changeprice> 3,000</s_changeprice></s_total>" # noqa: E231 # fmt: on self.assertEqual(sequence, expected_sequence) # verify scores self.assertEqual(len(outputs.scores), 43) self.assertTrue( torch.allclose( outputs.scores[0][0, :3], torch.tensor([-27.4344, -3.2686, -19.3524], device=torch_device), atol=1e-4 ) ) @slow def test_inference_rvlcdip(self): processor = DonutProcessor.from_pretrained("naver-clova-ix/donut-base-finetuned-rvlcdip") model = VisionEncoderDecoderModel.from_pretrained("naver-clova-ix/donut-base-finetuned-rvlcdip").to( torch_device ) dataset = load_dataset("hf-internal-testing/example-documents", split="test") image = dataset[1]["image"] pixel_values = processor(images=image, return_tensors="pt").pixel_values.to(torch_device) # step 1: single forward pass decoder_input_ids = processor.tokenizer( "<s_rvlcdip>", add_special_tokens=False, return_tensors="pt" ).input_ids.to(torch_device) with torch.no_grad(): outputs = model(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids) logits = outputs.logits # verify the logits expected_shape = torch.Size((1, 1, model.decoder.config.vocab_size)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-17.6490, -4.8381, -15.7577], device=torch_device) self.assertTrue(torch.allclose(logits[0, 0, :3], expected_slice, atol=1e-4)) # step 2: generation task_prompt = "<s_rvlcdip>" decoder_input_ids = processor.tokenizer(task_prompt, add_special_tokens=False, return_tensors="pt").input_ids decoder_input_ids = decoder_input_ids.to(torch_device) outputs = model.generate( pixel_values, decoder_input_ids=decoder_input_ids, max_length=model.decoder.config.max_position_embeddings, early_stopping=True, pad_token_id=processor.tokenizer.pad_token_id, eos_token_id=processor.tokenizer.eos_token_id, use_cache=True, num_beams=1, bad_words_ids=[[processor.tokenizer.unk_token_id]], output_scores=True, return_dict_in_generate=True, ) sequence = processor.batch_decode(outputs.sequences)[0] sequence = sequence.replace(processor.tokenizer.eos_token, "").replace(processor.tokenizer.pad_token, "") sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token # verify generated sequence self.assertEqual(sequence, "<s_class><advertisement/></s_class>") # verify scores self.assertEqual(len(outputs.scores), 4) self.assertTrue( torch.allclose( outputs.scores[0][0, :3], torch.tensor([-17.6490, -4.8381, -15.7577], device=torch_device), atol=1e-4 ) )
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/vision_encoder_decoder/test_modeling_flax_vision_encoder_decoder.py
# coding=utf-8 # Copyright 2021 HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import numpy as np from transformers import is_flax_available, is_torch_available, is_vision_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, require_vision, slow, torch_device from ...test_modeling_flax_common import floats_tensor, ids_tensor from ..gpt2.test_modeling_flax_gpt2 import FlaxGPT2ModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( AutoTokenizer, FlaxGPT2LMHeadModel, FlaxVisionEncoderDecoderModel, FlaxViTModel, VisionEncoderDecoderConfig, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionEncoderDecoderModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor @require_flax class FlaxEncoderDecoderMixin: def get_encoder_decoder_model(self, config, decoder_config): raise NotImplementedError def prepare_config_and_inputs(self): raise NotImplementedError def get_pretrained_model(self): raise NotImplementedError def check_encoder_decoder_model_from_pretrained_configs( self, config, pixel_values, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): encoder_decoder_config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) self.assertTrue(encoder_decoder_config.decoder.is_decoder) enc_dec_model = FlaxVisionEncoderDecoderModel(encoder_decoder_config) self.assertTrue(enc_dec_model.config.is_encoder_decoder) outputs_encoder_decoder = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[0], pixel_values.shape[0]) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[-1], config.hidden_size) def check_encoder_decoder_model_from_pretrained( self, config, pixel_values, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, return_dict, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model, "return_dict": return_dict} enc_dec_model = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs) outputs_encoder_decoder = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, return_dict=True, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[0], pixel_values.shape[0]) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[-1], config.hidden_size) def check_save_and_load( self, config, pixel_values, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model} enc_dec_model = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs) outputs = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) out_2 = np.array(outputs[0]) out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmpdirname: enc_dec_model.save_pretrained(tmpdirname) FlaxVisionEncoderDecoderModel.from_pretrained(tmpdirname) after_outputs = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) out_1 = np.array(after_outputs[0]) out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) def check_encoder_decoder_model_output_attentions( self, config, pixel_values, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): # make the decoder inputs a different shape from the encoder inputs to harden the test decoder_input_ids = decoder_input_ids[:, :-1] decoder_attention_mask = decoder_attention_mask[:, :-1] encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model} enc_dec_model = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs) outputs_encoder_decoder = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_attentions=True, ) encoder_attentions = outputs_encoder_decoder["encoder_attentions"] self.assertEqual(len(encoder_attentions), config.num_hidden_layers) self.assertEqual(encoder_attentions[0].shape[-3:-2], (config.num_attention_heads,)) decoder_attentions = outputs_encoder_decoder["decoder_attentions"] num_decoder_layers = ( decoder_config.num_decoder_layers if hasattr(decoder_config, "num_decoder_layers") else decoder_config.num_hidden_layers ) self.assertEqual(len(decoder_attentions), num_decoder_layers) self.assertEqual( decoder_attentions[0].shape[-3:], (decoder_config.num_attention_heads, decoder_input_ids.shape[-1], decoder_input_ids.shape[-1]), ) cross_attentions = outputs_encoder_decoder["cross_attentions"] self.assertEqual(len(cross_attentions), num_decoder_layers) cross_attention_input_seq_len = decoder_input_ids.shape[-1] * ( 1 + (decoder_config.ngram if hasattr(decoder_config, "ngram") else 0) ) self.assertEqual( cross_attentions[0].shape[-3:-1], (decoder_config.num_attention_heads, cross_attention_input_seq_len), ) def check_encoder_decoder_model_generate(self, pixel_values, config, decoder_config, **kwargs): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model} enc_dec_model = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs) pad_token_id = enc_dec_model.config.decoder.pad_token_id eos_token_id = enc_dec_model.config.decoder.eos_token_id decoder_start_token_id = enc_dec_model.config.decoder.decoder_start_token_id # Copied from generation.utils (GPT2 doesn't have `pad_token_id`) if pad_token_id is None and eos_token_id is not None: pad_token_id = eos_token_id if decoder_start_token_id is None: decoder_start_token_id = enc_dec_model.config.decoder.bos_token_id # Bert does not have a bos token id, so use pad_token_id instead # Copied from `test_modeling_encoder_decoder.py` if decoder_start_token_id is None: decoder_start_token_id = pad_token_id generated_output = enc_dec_model.generate( pixel_values, pad_token_id=pad_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, ) generated_sequences = generated_output.sequences self.assertEqual(generated_sequences.shape, (pixel_values.shape[0],) + (decoder_config.max_length,)) def check_pt_flax_equivalence(self, pt_model, fx_model, inputs_dict): pt_model.to(torch_device) pt_model.eval() # prepare inputs flax_inputs = inputs_dict pt_inputs = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()} with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs, pt_outputs): self.assert_almost_equals(fx_output, pt_output.numpy(), 1e-5) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = FlaxVisionEncoderDecoderModel.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**inputs_dict).to_tuple() self.assertEqual(len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output_loaded, pt_output in zip(fx_outputs_loaded, pt_outputs): self.assert_almost_equals(fx_output_loaded, pt_output.numpy(), 1e-5) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = VisionEncoderDecoderModel.from_pretrained(tmpdirname, from_flax=True) pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output_loaded in zip(fx_outputs, pt_outputs_loaded): self.assert_almost_equals(fx_output, pt_output_loaded.numpy(), 1e-5) def check_equivalence_pt_to_flax(self, config, decoder_config, inputs_dict): encoder_decoder_config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) pt_model = VisionEncoderDecoderModel(encoder_decoder_config) fx_model = FlaxVisionEncoderDecoderModel(encoder_decoder_config) fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state self.check_pt_flax_equivalence(pt_model, fx_model, inputs_dict) def check_equivalence_flax_to_pt(self, config, decoder_config, inputs_dict): encoder_decoder_config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) pt_model = VisionEncoderDecoderModel(encoder_decoder_config) fx_model = FlaxVisionEncoderDecoderModel(encoder_decoder_config) pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) self.check_pt_flax_equivalence(pt_model, fx_model, inputs_dict) def test_encoder_decoder_model_from_pretrained_configs(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained_configs(**config_inputs_dict) def test_encoder_decoder_model_from_pretrained(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained(**config_inputs_dict, return_dict=False) def test_encoder_decoder_model_from_pretrained_return_dict(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained(**config_inputs_dict, return_dict=True) def test_save_and_load_from_pretrained(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_save_and_load(**config_inputs_dict) def test_encoder_decoder_model_output_attentions(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_output_attentions(**config_inputs_dict) def test_encoder_decoder_model_generate(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_generate(**config_inputs_dict) def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float): diff = np.abs((a - b)).max() self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).") @is_pt_flax_cross_test def test_pt_flax_equivalence(self): config_inputs_dict = self.prepare_config_and_inputs() config = config_inputs_dict.pop("config") decoder_config = config_inputs_dict.pop("decoder_config") inputs_dict = config_inputs_dict # `encoder_hidden_states` is not used in model call/forward del inputs_dict["encoder_hidden_states"] # Avoid the case where a sequence has no place to attend (after combined with the causal attention mask) batch_size = inputs_dict["decoder_attention_mask"].shape[0] inputs_dict["decoder_attention_mask"] = np.concatenate( [np.ones(shape=(batch_size, 1)), inputs_dict["decoder_attention_mask"][:, 1:]], axis=1 ) # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. decoder_config.use_cache = False self.assertTrue(decoder_config.cross_attention_hidden_size is None) # check without `enc_to_dec_proj` projection self.assertTrue(config.hidden_size == decoder_config.hidden_size) self.check_equivalence_pt_to_flax(config, decoder_config, inputs_dict) self.check_equivalence_flax_to_pt(config, decoder_config, inputs_dict) # check `enc_to_dec_proj` work as expected decoder_config.hidden_size = decoder_config.hidden_size * 2 self.assertTrue(config.hidden_size != decoder_config.hidden_size) self.check_equivalence_pt_to_flax(config, decoder_config, inputs_dict) self.check_equivalence_flax_to_pt(config, decoder_config, inputs_dict) @slow def test_real_model_save_load_from_pretrained(self): model_2 = self.get_pretrained_model() pixel_values = floats_tensor( [ 13, model_2.config.encoder.num_channels, model_2.config.encoder.image_size, model_2.config.encoder.image_size, ] ) decoder_input_ids = ids_tensor([13, 1], model_2.config.decoder.vocab_size) outputs = model_2( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, ) out_2 = np.array(outputs[0]) out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmp_dirname: model_2.save_pretrained(tmp_dirname) model_1 = FlaxVisionEncoderDecoderModel.from_pretrained(tmp_dirname) after_outputs = model_1( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, ) out_1 = np.array(after_outputs[0]) out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) @require_flax class FlaxViT2GPT2EncoderDecoderModelTest(FlaxEncoderDecoderMixin, unittest.TestCase): def get_encoder_decoder_model(self, config, decoder_config): encoder_model = FlaxViTModel(config) decoder_model = FlaxGPT2LMHeadModel(decoder_config) return encoder_model, decoder_model def prepare_config_and_inputs(self): model_tester_encoder = FlaxViTModelTester(self, batch_size=13) model_tester_decoder = FlaxGPT2ModelTester(self, batch_size=13) encoder_config_and_inputs = model_tester_encoder.prepare_config_and_inputs() decoder_config_and_inputs = model_tester_decoder.prepare_config_and_inputs_for_decoder() (config, pixel_values) = encoder_config_and_inputs ( decoder_config, decoder_input_ids, decoder_attention_mask, encoder_hidden_states, encoder_attention_mask, ) = decoder_config_and_inputs # make sure that cross attention layers are added decoder_config.add_cross_attention = True return { "config": config, "pixel_values": pixel_values, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "encoder_hidden_states": encoder_hidden_states, # This is not used in the tests. } def get_pretrained_model(self): return FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( "google/vit-base-patch16-224-in21k", "gpt2" ) @require_flax class FlaxVisionEncoderDecoderModelTest(unittest.TestCase): def get_from_encoderdecoder_pretrained_model(self): return FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( "google/vit-base-patch16-224-in21k", "gpt2" ) def _check_configuration_tie(self, model): module = model.module.bind(model.params) assert id(module.decoder.config) == id(model.config.decoder) assert id(module.encoder.config) == id(model.config.encoder) @slow def test_configuration_tie(self): model = self.get_from_encoderdecoder_pretrained_model() self._check_configuration_tie(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_vision @require_flax class FlaxViT2GPT2ModelIntegrationTest(unittest.TestCase): @slow def test_inference_coco_en(self): loc = "ydshieh/vit-gpt2-coco-en" image_processor = ViTImageProcessor.from_pretrained(loc) tokenizer = AutoTokenizer.from_pretrained(loc) model = FlaxVisionEncoderDecoderModel.from_pretrained(loc) img = prepare_img() pixel_values = image_processor(images=img, return_tensors="np").pixel_values decoder_input_ids = np.array([[model.config.decoder_start_token_id]]) logits = model(pixel_values, decoder_input_ids)[0] logits = np.array(logits) # verify the logits expected_shape = (1, 1, model.config.decoder.vocab_size) self.assertEqual(logits.shape, expected_shape) EXPECTED_LOGIT_SLICE = np.array( [ -38.705837, -30.639936, -31.41905, -39.01204, -38.38698, -34.887215, -33.29087, -35.684475, -38.50852, -36.124676, ] ) max_diff = np.amax(np.abs(logits[0, 0, :10] - EXPECTED_LOGIT_SLICE)) self.assertLessEqual(max_diff, 1e-4) def generate_step(pixel_values): outputs = model.generate(pixel_values, max_length=16, num_beams=4) output_ids = outputs.sequences preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True) preds = [pred.strip() for pred in preds] return preds, outputs.scores preds, scores = generate_step(pixel_values) EXPECTED_SCORES = np.array([-0.59563464]) scores = np.array(scores) max_diff = np.amax(np.abs(scores - EXPECTED_SCORES)) self.assertLessEqual(max_diff, 1e-4) # should produce # ["a cat laying on top of a couch next to another cat"] self.assertEqual(preds, ["a cat laying on top of a couch next to another cat"])
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/vision_encoder_decoder/test_modeling_tf_vision_encoder_decoder.py
# coding=utf-8 # Copyright 2022 HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the TensorFlow VisionEncoderDecoder model. """ from __future__ import annotations import copy import os import tempfile import unittest import numpy as np from transformers import is_tf_available, is_torch_available, is_vision_available from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_vision, slow, torch_device, ) from transformers.utils.generic import ModelOutput from ...test_modeling_tf_common import floats_tensor, ids_tensor from ..gpt2.test_modeling_tf_gpt2 import TFGPT2ModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): import tensorflow as tf from transformers import ( AutoConfig, AutoImageProcessor, AutoTokenizer, TFAutoModel, TFAutoModelForCausalLM, TFGPT2LMHeadModel, TFVisionEncoderDecoderModel, TFViTModel, VisionEncoderDecoderConfig, ) from transformers.modeling_tf_outputs import TFBaseModelOutput if is_torch_available(): import torch from transformers import GPT2LMHeadModel, VisionEncoderDecoderModel, ViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor @require_tf class TFVisionEncoderDecoderMixin: def get_encoder_decoder_model(self, config, decoder_config): raise NotImplementedError def prepare_config_and_inputs(self): raise NotImplementedError def get_pretrained_model(self): raise NotImplementedError def check_encoder_decoder_model_from_pretrained_configs( self, config, pixel_values, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): encoder_decoder_config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) self.assertTrue(encoder_decoder_config.decoder.is_decoder) enc_dec_model = TFVisionEncoderDecoderModel(encoder_decoder_config) self.assertTrue(enc_dec_model.config.is_encoder_decoder) outputs_encoder_decoder = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, kwargs=kwargs, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[0], pixel_values.shape[0]) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[-1], config.hidden_size) def check_encoder_decoder_model( self, config, pixel_values, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = TFVisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) self.assertTrue(enc_dec_model.config.decoder.is_decoder) self.assertTrue(enc_dec_model.config.decoder.add_cross_attention) self.assertTrue(enc_dec_model.config.is_encoder_decoder) outputs_encoder_decoder = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, kwargs=kwargs, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[0], pixel_values.shape[0]) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[-1], config.hidden_size) encoder_outputs = TFBaseModelOutput(last_hidden_state=encoder_hidden_states) outputs_encoder_decoder = enc_dec_model( pixel_values=None, encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, kwargs=kwargs, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[0], pixel_values.shape[0]) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[-1], config.hidden_size) def check_encoder_decoder_model_from_pretrained( self, config, pixel_values, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, return_dict, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model, "return_dict": return_dict} enc_dec_model = TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs) outputs_encoder_decoder = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, return_dict=True, kwargs=kwargs, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[0], pixel_values.shape[0]) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[-1], config.hidden_size) def check_save_and_load( self, config, pixel_values, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = TFVisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) outputs = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, kwargs=kwargs, ) out_2 = np.array(outputs[0]) out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmpdirname: enc_dec_model.save_pretrained(tmpdirname) enc_dec_model = TFVisionEncoderDecoderModel.from_pretrained(tmpdirname) after_outputs = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, kwargs=kwargs, ) out_1 = np.array(after_outputs[0]) out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) def check_encoder_decoder_model_labels( self, config, pixel_values, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, labels, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = TFVisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) outputs_encoder_decoder = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, labels=labels, kwargs=kwargs, ) # Make sure `loss` exist self.assertIn("loss", outputs_encoder_decoder) batch_size, seq_len = decoder_input_ids.shape expected_shape = (batch_size, seq_len, decoder_config.vocab_size) self.assertEqual(outputs_encoder_decoder["logits"].shape, expected_shape) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[0], pixel_values.shape[0]) self.assertEqual(outputs_encoder_decoder["encoder_last_hidden_state"].shape[-1], config.hidden_size) def check_encoder_decoder_model_output_attentions( self, config, pixel_values, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): # make the decoder inputs a different shape from the encoder inputs to harden the test decoder_input_ids = decoder_input_ids[:, :-1] decoder_attention_mask = decoder_attention_mask[:, :-1] encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = TFVisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) outputs_encoder_decoder = enc_dec_model( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_attentions=True, kwargs=kwargs, ) encoder_attentions = outputs_encoder_decoder["encoder_attentions"] self.assertEqual(len(encoder_attentions), config.num_hidden_layers) self.assertEqual(encoder_attentions[0].shape[-3:-2], (config.num_attention_heads,)) decoder_attentions = outputs_encoder_decoder["decoder_attentions"] num_decoder_layers = ( decoder_config.num_decoder_layers if hasattr(decoder_config, "num_decoder_layers") else decoder_config.num_hidden_layers ) self.assertEqual(len(decoder_attentions), num_decoder_layers) self.assertEqual( decoder_attentions[0].shape[-3:], (decoder_config.num_attention_heads, decoder_input_ids.shape[-1], decoder_input_ids.shape[-1]), ) cross_attentions = outputs_encoder_decoder["cross_attentions"] self.assertEqual(len(cross_attentions), num_decoder_layers) cross_attention_input_seq_len = decoder_input_ids.shape[-1] * ( 1 + (decoder_config.ngram if hasattr(decoder_config, "ngram") else 0) ) self.assertEqual( cross_attentions[0].shape[-3:-1], (decoder_config.num_attention_heads, cross_attention_input_seq_len), ) def check_encoder_decoder_model_generate(self, pixel_values, config, decoder_config, **kwargs): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = TFVisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) # Generate until max length if hasattr(enc_dec_model.config, "eos_token_id"): enc_dec_model.config.eos_token_id = None if hasattr(enc_dec_model.config, "decoder") and hasattr(enc_dec_model.config.decoder, "eos_token_id"): enc_dec_model.config.decoder.eos_token_id = None # Bert does not have a bos token id, so use pad_token_id instead generated_output = enc_dec_model.generate( pixel_values, decoder_start_token_id=enc_dec_model.config.decoder.pad_token_id ) self.assertEqual( tuple(generated_output.shape.as_list()), (pixel_values.shape[0],) + (decoder_config.max_length,) ) def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None): """Check the outputs from PyTorch and TensorFlow models are close enough. Checks are done in a recursive way. Args: model_class: The class of the model that is currently testing. For example, `TFBertModel`, TFBertForMaskedLM`, `TFBertForSequenceClassification`, etc. Mainly used for providing more informative error messages. name (`str`): The name of the output. For example, `output.hidden_states`, `output.attentions`, etc. attributes (`Tuple[str]`): The names of the output's element if the output is a tuple/list with each element being a named field in the output. """ self.assertEqual(type(name), str) if attributes is not None: self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`") # Allow `ModelOutput` (e.g. `CLIPOutput` has `text_model_output` and `vision_model_output`). if isinstance(tf_outputs, ModelOutput): self.assertTrue( isinstance(pt_outputs, ModelOutput), f"{name}: `pt_outputs` should an instance of `ModelOutput` when `tf_outputs` is", ) tf_keys = [k for k, v in tf_outputs.items() if v is not None] pt_keys = [k for k, v in pt_outputs.items() if v is not None] self.assertEqual(tf_keys, pt_keys, f"{name}: Output keys differ between TF and PyTorch") # convert to the case of `tuple` # appending each key to the current (string) `names` attributes = tuple([f"{name}.{k}" for k in tf_keys]) self.check_pt_tf_outputs( tf_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes ) # Allow `list` (e.g. `TransfoXLModelOutput.mems` is a list of tensors.) elif type(tf_outputs) in [tuple, list]: self.assertEqual(type(tf_outputs), type(pt_outputs), f"{name}: Output types differ between TF and PyTorch") self.assertEqual(len(tf_outputs), len(pt_outputs), f"{name}: Output lengths differ between TF and PyTorch") if attributes is not None: # case 1: each output has assigned name (e.g. a tuple form of a `ModelOutput`) self.assertEqual( len(attributes), len(tf_outputs), f"{name}: The tuple `names` should have the same length as `tf_outputs`", ) else: # case 2: each output has no assigned name (e.g. hidden states of each layer) -> add an index to `names` attributes = tuple([f"{name}_{idx}" for idx in range(len(tf_outputs))]) for tf_output, pt_output, attr in zip(tf_outputs, pt_outputs, attributes): self.check_pt_tf_outputs(tf_output, pt_output, model_class, tol=tol, name=attr) elif isinstance(tf_outputs, tf.Tensor): self.assertTrue( isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `tf_outputs` is" ) tf_outputs = tf_outputs.numpy() pt_outputs = pt_outputs.detach().to("cpu").numpy() self.assertEqual( tf_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between TF and PyTorch" ) # deal with NumPy's scalars to make replacing nan values by 0 work. if np.isscalar(tf_outputs): tf_outputs = np.array([tf_outputs]) pt_outputs = np.array([pt_outputs]) tf_nans = np.isnan(tf_outputs) pt_nans = np.isnan(pt_outputs) pt_outputs[tf_nans] = 0 tf_outputs[tf_nans] = 0 pt_outputs[pt_nans] = 0 tf_outputs[pt_nans] = 0 max_diff = np.amax(np.abs(tf_outputs - pt_outputs)) self.assertLessEqual(max_diff, tol, f"{name}: Difference between torch and tf is {max_diff} (>= {tol}).") else: raise ValueError( "`tf_outputs` should be an instance of `tf.Tensor`, a `tuple`, or an instance of `tf.Tensor`. Got" f" {type(tf_outputs)} instead." ) def prepare_pt_inputs_from_tf_inputs(self, tf_inputs_dict): pt_inputs_dict = {} for name, key in tf_inputs_dict.items(): if type(key) == bool: pt_inputs_dict[name] = key elif name == "input_values": pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) elif name == "pixel_values": pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) elif name == "input_features": pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) # other general float inputs elif tf_inputs_dict[name].dtype.is_floating: pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) else: pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.long) return pt_inputs_dict def check_pt_tf_models(self, tf_model, pt_model, tf_inputs_dict): pt_inputs_dict = self.prepare_pt_inputs_from_tf_inputs(tf_inputs_dict) # send pytorch inputs to the correct device pt_inputs_dict = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs_dict.items() } # send pytorch model to the correct device pt_model.to(torch_device) # Check predictions on first output (logits/hidden-states) are close enough given low-level computational differences pt_model.eval() with torch.no_grad(): pt_outputs = pt_model(**pt_inputs_dict) tf_outputs = tf_model(tf_inputs_dict) # tf models returned loss is usually a tensor rather than a scalar. # (see `hf_compute_loss`: it uses `tf.keras.losses.Reduction.NONE`) # Change it here to a scalar to match PyTorch models' loss tf_loss = getattr(tf_outputs, "loss", None) if tf_loss is not None: tf_outputs.loss = tf.math.reduce_mean(tf_loss) self.check_pt_tf_outputs(tf_outputs, pt_outputs, type(tf_model)) def check_pt_tf_equivalence(self, tf_model, pt_model, tf_inputs_dict): """Wrap `check_pt_tf_models` to further check PT -> TF again""" self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) # PT -> TF with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) tf_model = TFVisionEncoderDecoderModel.from_pretrained(tmpdirname, from_pt=True) self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) def check_pt_to_tf_equivalence(self, config, decoder_config, tf_inputs_dict): encoder_decoder_config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) # Output all for aggressive testing encoder_decoder_config.output_hidden_states = True # All models tested in this file have attentions encoder_decoder_config.output_attentions = True pt_model = VisionEncoderDecoderModel(encoder_decoder_config) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) tf_model = TFVisionEncoderDecoderModel.from_pretrained(tmpdirname, from_pt=True) self.check_pt_tf_equivalence(tf_model, pt_model, tf_inputs_dict) def check_tf_to_pt_equivalence(self, config, decoder_config, tf_inputs_dict): encoder_decoder_config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) # Output all for aggressive testing encoder_decoder_config.output_hidden_states = True # TODO: A generalizable way to determine this attribute encoder_decoder_config.output_attentions = True tf_model = TFVisionEncoderDecoderModel(encoder_decoder_config) # Make sure model is built before saving tf_model(**tf_inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: tf_model.save_pretrained(tmpdirname) pt_model = VisionEncoderDecoderModel.from_pretrained(tmpdirname, from_tf=True) self.check_pt_tf_equivalence(tf_model, pt_model, tf_inputs_dict) def test_encoder_decoder_model(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model(**config_inputs_dict) def test_encoder_decoder_model_from_pretrained_configs(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained_configs(**config_inputs_dict) def test_encoder_decoder_model_from_pretrained(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained(**config_inputs_dict, return_dict=False) def test_encoder_decoder_model_from_pretrained_return_dict(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained(**config_inputs_dict, return_dict=True) def test_save_and_load_from_pretrained(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_save_and_load(**config_inputs_dict) def test_encoder_decoder_model_labels(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_labels(**config_inputs_dict) def test_encoder_decoder_model_output_attentions(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_output_attentions(**config_inputs_dict) def test_encoder_decoder_model_generate(self): config_inputs_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_generate(**config_inputs_dict) def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float): diff = np.abs((a - b)).max() self.assertLessEqual(diff, tol, f"Difference between torch and tf is {diff} (>= {tol}).") @is_pt_tf_cross_test def test_pt_tf_model_equivalence(self): config_inputs_dict = self.prepare_config_and_inputs() labels = config_inputs_dict.pop("decoder_token_labels") # Keep only common arguments arg_names = [ "config", "pixel_values", "decoder_config", "decoder_input_ids", "decoder_attention_mask", "encoder_hidden_states", ] config_inputs_dict = {k: v for k, v in config_inputs_dict.items() if k in arg_names} config = config_inputs_dict.pop("config") decoder_config = config_inputs_dict.pop("decoder_config") # Output all for aggressive testing config.output_hidden_states = True decoder_config.output_hidden_states = True # All models tested in this file have attentions config.output_attentions = True decoder_config.output_attentions = True tf_inputs_dict = config_inputs_dict # `encoder_hidden_states` is not used in model call/forward del tf_inputs_dict["encoder_hidden_states"] # Make sure no sequence has all zeros as attention mask, otherwise some tests fail due to the inconsistency # of the usage `1e-4`, `1e-9`, `1e-30`, `-inf`. for k in ["decoder_attention_mask"]: attention_mask = tf_inputs_dict[k] # Make sure no all 0s attention masks - to avoid failure at this moment. # Put `1` at the beginning of sequences to make it still work when combining causal attention masks. # TODO: remove this line once a fix regarding large negative values for attention mask is done. attention_mask = tf.concat( [tf.ones_like(attention_mask[:, :1], dtype=attention_mask.dtype), attention_mask[:, 1:]], axis=-1 ) tf_inputs_dict[k] = attention_mask tf_inputs_dict_with_labels = copy.copy(tf_inputs_dict) tf_inputs_dict_with_labels["labels"] = labels self.assertTrue(decoder_config.cross_attention_hidden_size is None) # Original test: check without `labels` and without `enc_to_dec_proj` projection self.assertTrue(config.hidden_size == decoder_config.hidden_size) self.check_pt_to_tf_equivalence(config, decoder_config, tf_inputs_dict) self.check_tf_to_pt_equivalence(config, decoder_config, tf_inputs_dict) # check with `labels` self.check_pt_to_tf_equivalence(config, decoder_config, tf_inputs_dict_with_labels) self.check_tf_to_pt_equivalence(config, decoder_config, tf_inputs_dict_with_labels) # check `enc_to_dec_proj` work as expected decoder_config.hidden_size = decoder_config.hidden_size * 2 self.assertTrue(config.hidden_size != decoder_config.hidden_size) self.check_pt_to_tf_equivalence(config, decoder_config, tf_inputs_dict) self.check_tf_to_pt_equivalence(config, decoder_config, tf_inputs_dict) @slow def test_real_model_save_load_from_pretrained(self): model_2 = self.get_pretrained_model() pixel_values = floats_tensor( [ 13, model_2.config.encoder.num_channels, model_2.config.encoder.image_size, model_2.config.encoder.image_size, ] ) decoder_input_ids = ids_tensor([13, 1], model_2.config.decoder.vocab_size) outputs = model_2( pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, ) out_2 = np.array(outputs[0]) out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmp_dirname: model_2.save_pretrained(tmp_dirname) model_1 = TFVisionEncoderDecoderModel.from_pretrained(tmp_dirname) after_outputs = model_1(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids) out_1 = np.array(after_outputs[0]) out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) @require_tf class TFViT2GPT2EncoderDecoderModelTest(TFVisionEncoderDecoderMixin, unittest.TestCase): def get_pretrained_model(self): return TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained("google/vit-base-patch16-224-in21k", "gpt2") def get_encoder_decoder_model(self, config, decoder_config): encoder_model = TFViTModel(config, name="encoder") decoder_model = TFGPT2LMHeadModel(decoder_config, name="decoder") return encoder_model, decoder_model def prepare_config_and_inputs(self): model_tester_encoder = TFViTModelTester(self, batch_size=13) model_tester_decoder = TFGPT2ModelTester(self) encoder_config_and_inputs = model_tester_encoder.prepare_config_and_inputs() decoder_config_and_inputs = model_tester_decoder.prepare_config_and_inputs_for_decoder() (config, pixel_values, labels) = encoder_config_and_inputs ( decoder_config, decoder_input_ids, decoder_attention_mask, decoder_head_mask, decoder_token_type_ids, decoder_sequence_labels, decoder_token_labels, decoder_choice_labels, encoder_hidden_states, encoder_attention_mask, ) = decoder_config_and_inputs # make sure that cross attention layers are added decoder_config.add_cross_attention = True # disable cache for now decoder_config.use_cache = False return { "config": config, "pixel_values": pixel_values, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "decoder_token_labels": decoder_token_labels, "encoder_hidden_states": encoder_hidden_states, # This is not used in the tests. "labels": decoder_token_labels, } @require_tf class TFVisionEncoderDecoderModelTest(unittest.TestCase): def get_from_encoderdecoder_pretrained_model(self): return TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained("google/vit-base-patch16-224-in21k", "gpt2") def get_decoder_config(self): config = AutoConfig.from_pretrained("gpt2") config.is_decoder = True config.add_cross_attention = True return config def get_encoderdecoder_model(self): return TFVisionEncoderDecoderModel.from_pretrained("ydshieh/vit-gpt2-coco-en") def get_encoder_decoder_models(self): encoder_model = TFViTModel.from_pretrained("google/vit-base-patch16-224-in21k", name="encoder") decoder_model = TFGPT2LMHeadModel.from_pretrained("gpt2", config=self.get_decoder_config(), name="decoder") return {"encoder": encoder_model, "decoder": decoder_model} def _check_configuration_tie(self, model): assert id(model.decoder.config) == id(model.config.decoder) assert id(model.encoder.config) == id(model.config.encoder) @slow def test_configuration_tie(self): model = self.get_from_encoderdecoder_pretrained_model() self._check_configuration_tie(model) model = TFVisionEncoderDecoderModel(**self.get_encoder_decoder_models()) self._check_configuration_tie(model) model = self.get_encoderdecoder_model() self._check_configuration_tie(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf class TFVisionEncoderDecoderModelSaveLoadTests(unittest.TestCase): def get_encoder_decoder_config(self): encoder_config = AutoConfig.from_pretrained("google/vit-base-patch16-224-in21k") decoder_config = AutoConfig.from_pretrained("gpt2", is_decoder=True, add_cross_attention=True) return VisionEncoderDecoderConfig.from_encoder_decoder_configs(encoder_config, decoder_config) def get_encoder_decoder_config_small(self): encoder_config = AutoConfig.from_pretrained("hf-internal-testing/tiny-random-vit") decoder_config = AutoConfig.from_pretrained( "hf-internal-testing/tiny-random-gpt2", is_decoder=True, add_cross_attention=True ) return VisionEncoderDecoderConfig.from_encoder_decoder_configs(encoder_config, decoder_config) def test_encoder_decoder_save_load_from_encoder_decoder(self): config = self.get_encoder_decoder_config_small() # create two random ViT/GPT2 models for vit-gpt2 & initialize weights (+cross_attention weights) encoder = TFViTModel(config.encoder) encoder.build() decoder = TFGPT2LMHeadModel(config.decoder) decoder.build() encoder_decoder_orig = TFVisionEncoderDecoderModel(encoder=encoder, decoder=decoder) pixel_values = floats_tensor( [ 13, encoder.config.num_channels, encoder.config.image_size, encoder.config.image_size, ] ) decoder_input_ids = ids_tensor([13, 1], decoder.config.vocab_size) logits_orig = encoder_decoder_orig(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids).logits with tempfile.TemporaryDirectory() as tmp_dirname: encoder_path = os.path.join(tmp_dirname, "encoder") decoder_path = os.path.join(tmp_dirname, "decoder") encoder.save_pretrained(encoder_path) decoder.save_pretrained(decoder_path) encoder_decoder = TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained(encoder_path, decoder_path) logits_1 = encoder_decoder(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids).logits self.assertTrue(logits_orig.numpy().sum() - logits_1.numpy().sum() < 1e-3) max_diff = np.max(np.abs(logits_1.numpy() - logits_orig.numpy())) self.assertAlmostEqual(max_diff, 0.0, places=4) with tempfile.TemporaryDirectory() as tmp_dirname: encoder_decoder.save_pretrained(tmp_dirname) encoder_decoder = TFVisionEncoderDecoderModel.from_pretrained(tmp_dirname) logits_2 = encoder_decoder(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids).logits max_diff = np.max(np.abs(logits_2.numpy() - logits_orig.numpy())) self.assertAlmostEqual(max_diff, 0.0, places=4) @require_torch @is_pt_tf_cross_test def test_encoder_decoder_save_load_from_encoder_decoder_from_pt(self): config = self.get_encoder_decoder_config_small() # create two random ViT/GPT2 models for vit-gpt2 & initialize weights (+cross_attention weights) encoder_pt = ViTModel(config.encoder).to(torch_device).eval() decoder_pt = GPT2LMHeadModel(config.decoder).to(torch_device).eval() encoder_decoder_pt = VisionEncoderDecoderModel(encoder=encoder_pt, decoder=decoder_pt).to(torch_device).eval() pixel_values = floats_tensor( [ 13, encoder_pt.config.num_channels, encoder_pt.config.image_size, encoder_pt.config.image_size, ] ) decoder_input_ids = ids_tensor([13, 1], decoder_pt.config.vocab_size) pt_pixel_values = torch.tensor(pixel_values.numpy(), device=torch_device, dtype=torch.float) pt_decoder_input_ids = torch.tensor(decoder_input_ids.numpy(), device=torch_device, dtype=torch.long) logits_pt = encoder_decoder_pt(pixel_values=pt_pixel_values, decoder_input_ids=pt_decoder_input_ids).logits # PyTorch => TensorFlow with tempfile.TemporaryDirectory() as tmp_dirname_1, tempfile.TemporaryDirectory() as tmp_dirname_2: encoder_decoder_pt.encoder.save_pretrained(tmp_dirname_1) encoder_decoder_pt.decoder.save_pretrained(tmp_dirname_2) encoder_decoder_tf = TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained( tmp_dirname_1, tmp_dirname_2, encoder_from_pt=True, decoder_from_pt=True ) logits_tf = encoder_decoder_tf(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids).logits max_diff = np.max(np.abs(logits_pt.detach().cpu().numpy() - logits_tf.numpy())) self.assertAlmostEqual(max_diff, 0.0, places=3) # Make sure `from_pretrained` following `save_pretrained` work and give the same result # (See https://github.com/huggingface/transformers/pull/14016) with tempfile.TemporaryDirectory() as tmp_dirname: encoder_decoder_tf.save_pretrained(tmp_dirname) encoder_decoder_tf = TFVisionEncoderDecoderModel.from_pretrained(tmp_dirname) logits_tf_2 = encoder_decoder_tf(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids).logits max_diff = np.max(np.abs(logits_tf_2.numpy() - logits_tf.numpy())) self.assertAlmostEqual(max_diff, 0.0, places=3) @require_vision @slow def test_encoder_decoder_from_pretrained(self): load_weight_prefix = TFVisionEncoderDecoderModel.load_weight_prefix config = self.get_encoder_decoder_config() image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k") decoder_tokenizer = AutoTokenizer.from_pretrained("gpt2") img = prepare_img() pixel_values = image_processor(images=img, return_tensors="tf").pixel_values decoder_input_ids = decoder_tokenizer("Linda Davis", return_tensors="tf").input_ids with tempfile.TemporaryDirectory() as tmp_dirname: # Since most of HF's models don't have pretrained cross-attention layers, they are randomly # initialized even if we create models using `from_pretrained` method. # For the tests, the decoder need to be a model with pretrained cross-attention layers. # So we create pretrained models (without `load_weight_prefix`), save them, and later, # we load them using `from_pretrained`. # (we don't need to do this for encoder, but let's make the code more similar between encoder/decoder) encoder = TFAutoModel.from_pretrained("google/vit-base-patch16-224-in21k", name="encoder") # It's necessary to specify `add_cross_attention=True` here. decoder = TFAutoModelForCausalLM.from_pretrained( "gpt2", is_decoder=True, add_cross_attention=True, name="decoder" ) pretrained_encoder_dir = os.path.join(tmp_dirname, "pretrained_encoder") pretrained_decoder_dir = os.path.join(tmp_dirname, "pretrained_decoder") encoder.save_pretrained(pretrained_encoder_dir) decoder.save_pretrained(pretrained_decoder_dir) del encoder del decoder enc_dec_model = TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained( pretrained_encoder_dir, pretrained_decoder_dir, ) # check that the from pretrained methods work enc_dec_model.save_pretrained(tmp_dirname) enc_dec_model = TFVisionEncoderDecoderModel.from_pretrained(tmp_dirname) output = enc_dec_model(pixel_values, decoder_input_ids=decoder_input_ids, labels=decoder_input_ids) loss_pretrained = output.loss del enc_dec_model # Create the model using `__init__` with loaded ``pretrained`` encoder / decoder encoder = TFAutoModel.from_pretrained( pretrained_encoder_dir, load_weight_prefix=load_weight_prefix, name="encoder" ) decoder = TFAutoModelForCausalLM.from_pretrained( pretrained_decoder_dir, load_weight_prefix=load_weight_prefix, name="decoder" ) enc_dec_model = TFVisionEncoderDecoderModel(config=config, encoder=encoder, decoder=decoder) output = enc_dec_model(pixel_values, decoder_input_ids=decoder_input_ids, labels=decoder_input_ids) loss_init = output.loss max_diff = np.max(np.abs(loss_pretrained - loss_init)) expected_diff = 0.0 self.assertAlmostEqual(max_diff, expected_diff, places=4) @require_vision @require_tf class TFViT2GPT2ModelIntegrationTest(unittest.TestCase): @slow def test_inference_coco_en(self): loc = "ydshieh/vit-gpt2-coco-en" image_processor = ViTImageProcessor.from_pretrained(loc) tokenizer = AutoTokenizer.from_pretrained(loc) model = TFVisionEncoderDecoderModel.from_pretrained(loc) # We will verify our results on an image of cute cats img = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") pixel_values = image_processor(images=img, return_tensors="tf").pixel_values decoder_input_ids = tf.constant([[model.config.decoder_start_token_id]]) logits = model(pixel_values, decoder_input_ids)[0].numpy() # verify the logits expected_shape = (1, 1, model.config.decoder.vocab_size) self.assertEqual(logits.shape, expected_shape) EXPECTED_LOGIT_SLICE = np.array( [ -38.705807, -30.639929, -31.41903, -39.012012, -38.38696, -34.887207, -33.290855, -35.68447, -38.508484, -36.124645, ] ) max_diff = np.amax(np.abs(logits[0, 0, :10] - EXPECTED_LOGIT_SLICE)) self.assertLessEqual(max_diff, 1e-4) def generate_step(pixel_values): outputs = model.generate(pixel_values, max_length=16, num_beams=4, return_dict_in_generate=True) output_ids = outputs.sequences preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True) preds = [pred.strip() for pred in preds] return preds preds = generate_step(pixel_values) # should produce # ["a cat laying on top of a couch next to another cat"] self.assertEqual(preds, ["a cat laying on top of a couch next to another cat"])
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/longt5/test_modeling_longt5.py
# coding=utf-8 # Copyright 2022 Google LongT5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import tempfile import unittest from transformers import LongT5Config, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_QUESTION_ANSWERING_MAPPING, AutoTokenizer, LongT5EncoderModel, LongT5ForConditionalGeneration, LongT5Model, ) from transformers.models.longt5.modeling_longt5 import LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.pytorch_utils import is_torch_less_than_1_11 class LongT5ModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, decoder_seq_length=9, local_radius=5, encoder_attention_type="local", global_block_size=3, # For common tests is_training=True, use_attention_mask=True, use_labels=True, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, dropout_rate=0.1, initializer_factor=0.002, eos_token_id=1, pad_token_id=0, decoder_start_token_id=0, scope=None, decoder_layers=None, large_model_config_path="google/long-t5-local-large", ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length self.local_radius = local_radius self.block_len = local_radius + 1 self.encoder_attention_type = encoder_attention_type self.global_block_size = global_block_size # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.scope = None self.decoder_layers = decoder_layers self.large_model_config_path = large_model_config_path def get_large_model_config(self): return LongT5Config.from_pretrained(self.large_model_config_path) def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None decoder_attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) decoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = self.get_config() return ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) def get_pipeline_config(self): return LongT5Config( vocab_size=166, # longt5 forces 100 extra tokens d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, local_radius=self.local_radius, encoder_attention_type=self.encoder_attention_type, global_block_size=self.global_block_size, ) def get_config(self): return LongT5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, local_radius=self.local_radius, encoder_attention_type=self.encoder_attention_type, global_block_size=self.global_block_size, ) def check_prepare_lm_labels_via_shift_left( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = LongT5Model(config=config) model.to(torch_device) model.eval() # make sure that lm_labels are correctly padded from the right lm_labels.masked_fill_((lm_labels == self.decoder_start_token_id), self.eos_token_id) # add casaul pad token mask triangular_mask = torch.tril(lm_labels.new_ones(lm_labels.shape)).logical_not() lm_labels.masked_fill_(triangular_mask, self.pad_token_id) decoder_input_ids = model._shift_right(lm_labels) for i, (decoder_input_ids_slice, lm_labels_slice) in enumerate(zip(decoder_input_ids, lm_labels)): # first item self.parent.assertEqual(decoder_input_ids_slice[0].item(), self.decoder_start_token_id) if i < decoder_input_ids_slice.shape[-1]: if i < decoder_input_ids.shape[-1] - 1: # items before diagonal self.parent.assertListEqual( decoder_input_ids_slice[1 : i + 1].tolist(), lm_labels_slice[:i].tolist() ) # pad items after diagonal if i < decoder_input_ids.shape[-1] - 2: self.parent.assertListEqual( decoder_input_ids_slice[i + 2 :].tolist(), lm_labels_slice[i + 1 : -1].tolist() ) else: # all items after square self.parent.assertListEqual(decoder_input_ids_slice[1:].tolist(), lm_labels_slice[:-1].tolist()) def create_and_check_model( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = LongT5Model(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) decoder_output = result.last_hidden_state decoder_past = result.past_key_values encoder_output = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size)) self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size)) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(decoder_past), config.num_layers) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0]), 4) def create_and_check_with_lm_head( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = LongT5ForConditionalGeneration(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, labels=lm_labels, ) self.parent.assertEqual(len(outputs), 4) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.decoder_seq_length, self.vocab_size)) self.parent.assertEqual(outputs["loss"].size(), ()) def create_and_check_decoder_model_past( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = LongT5Model(config=config).get_decoder().to(torch_device).eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) output, past_key_values = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = LongT5Model(config=config).get_decoder() model.to(torch_device) model.eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass output, past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True).to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values, attention_mask=attn_mask)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = LongT5Model(config=config).get_decoder().to(torch_device).eval() # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_generate_with_past_key_values( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = LongT5ForConditionalGeneration(config=config).to(torch_device).eval() torch.manual_seed(0) output_without_past_cache = model.generate( input_ids[:1], num_beams=2, max_length=5, do_sample=True, use_cache=False ) torch.manual_seed(0) output_with_past_cache = model.generate(input_ids[:1], num_beams=2, max_length=5, do_sample=True) self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache)) def create_and_check_encoder_decoder_shared_weights( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): for model_class in [LongT5Model, LongT5ForConditionalGeneration]: torch.manual_seed(0) model = model_class(config=config).to(torch_device).eval() # load state dict copies weights but does not tie them model.encoder.load_state_dict(model.decoder.state_dict(), strict=False) torch.manual_seed(0) tied_config = copy.deepcopy(config) tied_config.tie_encoder_decoder = True tied_model = model_class(config=tied_config).to(torch_device).eval() model_result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) tied_model_result = tied_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) # check that models has less parameters self.parent.assertLess( sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters()) ) random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item() # check that outputs are equal self.parent.assertTrue( torch.allclose( model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4 ) ) # check that outputs after saving and loading are equal with tempfile.TemporaryDirectory() as tmpdirname: tied_model.save_pretrained(tmpdirname) tied_model = model_class.from_pretrained(tmpdirname) tied_model.to(torch_device) tied_model.eval() # check that models has less parameters self.parent.assertLess( sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters()) ) random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item() tied_model_result = tied_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) # check that outputs are equal self.parent.assertTrue( torch.allclose( model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4, ) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "use_cache": False, } return config, inputs_dict @require_torch class LongT5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (LongT5Model, LongT5ForConditionalGeneration) if is_torch_available() else () all_generative_model_classes = (LongT5ForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "conversational": LongT5ForConditionalGeneration, "feature-extraction": LongT5Model, "summarization": LongT5ForConditionalGeneration, "text2text-generation": LongT5ForConditionalGeneration, "translation": LongT5ForConditionalGeneration, } if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_torchscript = True test_resize_embeddings = True test_model_parallel = False is_encoder_decoder = True def setUp(self): self.model_tester = LongT5ModelTester(self) self.config_tester = ConfigTester(self, config_class=LongT5Config, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_shift_right(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_prepare_lm_labels_via_shift_left(*config_and_inputs) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_with_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_lm_head(*config_and_inputs) def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_past_with_attn_mask(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) def test_decoder_model_past_with_3d_attn_mask(self): ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = self.model_tester.prepare_config_and_inputs() attention_mask = ids_tensor( [self.model_tester.batch_size, self.model_tester.encoder_seq_length, self.model_tester.encoder_seq_length], vocab_size=2, ) decoder_attention_mask = ids_tensor( [self.model_tester.batch_size, self.model_tester.decoder_seq_length, self.model_tester.decoder_seq_length], vocab_size=2, ) self.model_tester.create_and_check_decoder_model_attention_mask_past( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_generate_with_past_key_values(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_generate_with_past_key_values(*config_and_inputs) def test_encoder_decoder_shared_weights(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_encoder_decoder_shared_weights(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = LongT5Model.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skipIf( not is_torch_available() or is_torch_less_than_1_11, "Test failed with torch < 1.11 with an exception in a C++ file.", ) @slow def test_export_to_onnx(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() model = LongT5Model(config_and_inputs[0]).to(torch_device) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( model, (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]), f"{tmpdirname}/longt5_test.onnx", export_params=True, opset_version=13, input_names=["input_ids", "decoder_input_ids"], ) def test_generate_with_head_masking(self): attention_names = ["encoder_attentions", "decoder_attentions", "cross_attentions"] config_and_inputs = self.model_tester.prepare_config_and_inputs() config = config_and_inputs[0] max_length = config_and_inputs[1].shape[-1] + 3 model = LongT5ForConditionalGeneration(config).eval() model.to(torch_device) head_masking = { "head_mask": torch.zeros(config.num_layers, config.num_heads, device=torch_device), "decoder_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=torch_device), "cross_attn_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=torch_device), } for attn_name, (name, mask) in zip(attention_names, head_masking.items()): head_masks = {name: mask} # Explicitly pass decoder_head_mask as it is required from LONGT5 model when head_mask specified if name == "head_mask": head_masks["decoder_head_mask"] = torch.ones( config.num_decoder_layers, config.num_heads, device=torch_device ) out = model.generate( config_and_inputs[1], num_beams=1, max_length=max_length, output_attentions=True, return_dict_in_generate=True, **head_masks, ) # We check the state of decoder_attentions and cross_attentions just from the last step attn_weights = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights]), 0.0) def test_attention_outputs(self): if not self.has_attentions: pass else: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) block_len = getattr(self.model_tester, "block_len", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning # Question Answering model returns start_logits and end_logits if model_class in get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING): correct_outlen += 1 # start_logits and end_logits instead of only 1 output if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len], ) def _check_encoder_attention_for_generate(self, attentions, batch_size, config, seq_length): block_len = getattr(self.model_tester, "block_len", None) encoder_expected_shape = (batch_size, 1, config.num_attention_heads, block_len, 3 * block_len) self.assertIsInstance(attentions, tuple) self.assertListEqual( [layer_attentions.shape for layer_attentions in attentions], [encoder_expected_shape] * len(attentions), ) @require_torch class LongT5TGlobalModelTest(LongT5ModelTest): def setUp(self): self.model_tester = LongT5ModelTester( self, encoder_attention_type="transient-global", large_model_config_path="google/long-t5-tglobal-large" ) self.config_tester = ConfigTester(self, config_class=LongT5Config, d_model=37) def test_attention_outputs(self): if not self.has_attentions: pass else: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) block_len = getattr(self.model_tester, "block_len", None) global_block_size = getattr(self.model_tester, "global_block_size", None) global_seq_len = encoder_seq_length // global_block_size if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len + global_seq_len], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning # Question Answering model returns start_logits and end_logits if model_class in get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING): correct_outlen += 1 # start_logits and end_logits instead of only 1 output if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len + global_seq_len], ) def _check_encoder_attention_for_generate(self, attentions, batch_size, config, seq_length): block_len = getattr(self.model_tester, "block_len", None) global_block_size = getattr(self.model_tester, "global_block_size", None) global_seq_length = seq_length // global_block_size encoder_expected_shape = ( batch_size, 1, config.num_attention_heads, block_len, 3 * block_len + global_seq_length, ) self.assertIsInstance(attentions, tuple) self.assertListEqual( [layer_attentions.shape for layer_attentions in attentions], [encoder_expected_shape] * len(attentions), ) class LongT5EncoderOnlyModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, local_radius=5, encoder_attention_type="local", global_block_size=3, # For common tests use_attention_mask=True, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, is_training=False, dropout_rate=0.1, initializer_factor=0.002, is_encoder_decoder=False, eos_token_id=1, pad_token_id=0, scope=None, large_model_config_path="google/long-t5-local-large", ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.local_radius = local_radius self.block_len = local_radius + 1 self.encoder_attention_type = encoder_attention_type self.global_block_size = global_block_size # For common tests self.seq_length = self.encoder_seq_length self.use_attention_mask = use_attention_mask self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.is_encoder_decoder = is_encoder_decoder self.scope = None self.is_training = is_training self.large_model_config_path = large_model_config_path def get_large_model_config(self): return LongT5Config.from_pretrained(self.large_model_config_path) def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) config = LongT5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, is_encoder_decoder=self.is_encoder_decoder, local_radius=self.local_radius, encoder_attention_type=self.encoder_attention_type, global_block_size=self.global_block_size, ) return ( config, input_ids, attention_mask, ) def create_and_check_model( self, config, input_ids, attention_mask, ): model = LongT5EncoderModel(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, attention_mask=attention_mask, ) result = model(input_ids=input_ids) encoder_output = result.last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict class LongT5EncoderOnlyModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (LongT5EncoderModel,) if is_torch_available() else () test_pruning = False test_torchscript = True test_resize_embeddings = False test_model_parallel = False def setUp(self): self.model_tester = LongT5EncoderOnlyModelTester(self) self.config_tester = ConfigTester(self, config_class=LongT5Config, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_attention_outputs(self): if not self.has_attentions: pass else: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True block_len = getattr(self.model_tester, "block_len", 4) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len], ) class LongT5EncoderOnlyTGlobalModelTest(LongT5EncoderOnlyModelTest): def setUp(self): self.model_tester = LongT5EncoderOnlyModelTester( self, encoder_attention_type="transient-global", large_model_config_path="google/long-t5-tglobal-large" ) self.config_tester = ConfigTester(self, config_class=LongT5Config, d_model=37) def test_attention_outputs(self): if not self.has_attentions: pass else: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True block_len = getattr(self.model_tester, "block_len", None) seq_len = getattr(self.model_tester, "seq_length", None) global_block_size = getattr(self.model_tester, "global_block_size", 4) global_seq_len = seq_len // global_block_size for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len + global_seq_len], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len + global_seq_len], ) def use_task_specific_params(model, task): model.config.update(model.config.task_specific_params[task]) @require_torch @require_sentencepiece @require_tokenizers class LongT5ModelIntegrationTests(unittest.TestCase): @cached_property def model(self): return LongT5ForConditionalGeneration.from_pretrained("Stancld/longt5-tglobal-large-16384-pubmed-3k_steps").to( torch_device ) @cached_property def tokenizer(self): return AutoTokenizer.from_pretrained("Stancld/longt5-tglobal-large-16384-pubmed-3k_steps") def expected_summary(self): return [ "background : coronary artery disease ( cad ) is the emerging cause of morbidity and mortality in" " developing world . it provides an excellent resolution for visualization of the coronaryarteries for" " catheter - based or operating interventions . although the association of this technique with major" " complications such as mortality is highly uncommon , it is frequently associated with various cardiac" " and noncardiac complications.materials and methods : in aortic stenosis , we aimed to report the" " diagnostic performance of 128-slice computed tomography coronary angiogram in 50 patients undergoing for" " major noncoron ary cardiac surgery referred" ] @slow def test_summarization(self): model = self.model tok = self.tokenizer ARTICLE = """coronary artery disease ( cad ) is the emerging cause of morbidity and mortality in developing world . \n it provides an excellent resolution for visualization of the coronary arteries for catheter - based or operating interventions . \n although the association of this technique with major complications such as mortality is highly uncommon , it is frequently associated with various cardiac and noncardiac complications . computed tomography ( ct ) coronary angiography is a promising technique for the evaluation of cad noninvasively . \n it assesses disease within the coronary artery and provides qualitative and quantitative information about nonobstructive atherosclerotic plaque burden within the vessel wall . \n thus , ct angiography - based disease evaluation may provide clinically more significant information than conventional angiography . the introduction of multi - slice computed tomography ( msct ) technology such as 64-slice , 12 8-slice , 256-slice , and now 320-slice msct has produced a high diagnostic accuracy of ct coronary angiography . \n it has consistently showed to have a very high negative predictive value ( well above 90% ) in ruling out patients with s ignificant cad defined as coronary luminal stenosis of > 50% . \n the american college of cardiology / american heart association recommends that coronary angiography should be performed before valve surgery in men aged > 40 years , women aged > 35 years with coronary risk factors and in postmenopausal women . \n the prevalence of cad in patients undergoing valve replacement is 2040% in developed countries . in the previous studies , \n the incidence of angiographically p roven cad in acquired valvular diseases has been shown to vary widely from 9% to 41% . in aortic stenosis , \n we aimed to report the diagnostic performance of 128-slice ct coronary angiography in 50 patients undergoing for major noncoron ary cardiac surgery referred for diagnostic invasive coronary angiography to assess the extent and severity of coronary stenosis . \n during january 2013 to december 2014 , we enrolled fifty major noncoronary cardiac surgery patients sche duled for invasive coronary angiography who fulfilled the following inclusion criteria of age 40 years , having low or intermediate probability of cad , left ventricular ejection fraction ( lvef ) > 35% , and patient giving informed conse nt for undergoing msct and conventional coronary angiography . \n those having any contraindication for contrast injection , lvef < 35% , high pretest probability of cad , and hemodynamic instability were excluded from the study . \n pati ents with heart rates of > 70 bpm received ( unless they had known overt heart failure or electrocardiogram ( ecg ) atrioventricular conduction abnormalities ) a single oral dose of 100 mg metoprolol 45 min before the scan . \n patients w ith heart rates of > 80 bpm received an additional oral dose of metoprolol if not contraindicated . \n all patients were scanned with a 128-slice ct scanner ( siemens , somatom definition as ) equipped with a new feature in msct technolog y , so - called z - axis flying - focus technology . \n the central 32 detector rows acquire 0.6-mm slices , and the flying - focus spot switches back and forth between 2 z positions between each reading . \n two slices per detector row a re acquired , which results in a higher oversampling rate in the z - axis , thereby reducing artifacts related to the spiral acquisition and improving spatial resolution down to 0.4 mm . \n a bolus of 6580 ml contrast material ( omnipaque ) was injected through an arm vein at a flow rate of 5 ml / s . \n a bolus tracking technique was used to synchronize the arrival of contrast in the coronary arteries with the initiation of the scan . to monitor the arrival of contrast m aterial , \n axial scans were obtained at the level of the ascending aorta with a delay of 10 s after the start of the contrast injection . \n the scan was automatically started when a threshold of 150 hounsfield units was reached in a re gion of interest positioned in the ascending aorta . \n images were reconstructed with ecg gating to obtain optimal , motion - free image quality . \n all scans were performed within 2 weeks of the msct coronary diagnostic angiogram . a s ingle observer unaware of the multi - slice ct results identified coronary lesion as a single vessel , double vessel , or triple vessel disease . \n all lesion , regardless of size , were included for comparison with ct coronary angiograp hy . \n lesions were classified as having nonsignificant disease ( luminal irregularities or < 50% stenosis ) or as having significant stenosis . \n stenosis was evaluated in two orthogonal views and classified as significant if the mean lumen diameter reduction was 50% using a validated quantitative coronary angiography ( qca ) . \n all scans were analyzed independently by a radiologist and a cardiologist who were unaware of the results of conventional coronary angiograp hy . \n total calcium scores of all patients were calculated with dedicated software and expressed as agatston scores . \n the agatston score is a commonly used scoring method that calculates the total amount of calcium on the basis of th e number , areas , and peak hounsfield units of the detected calcified lesions . \n all available coronary segments were visually scored for the presence of > 50% considered as significant stenosis . \n maximum intensity projections were used to identify coronary lesions and ( curved ) multiplanar reconstructions to classify lesions as significant or nonsignificant . \n data were analyzed using statistical system spss version 20 software ( chicago , il , usa ) . \n the di agnostic performance of ct coronary angiography for the detection of significant lesions in coronary arteries with qca as the standard of reference is presented as sensitivity , specificity , positive and negative predictive values , and positive and negative likelihood ratios with the corresponding exact 95% of confidence interval ( cis ) . \n comparison between ct and conventional coronary angiography was performed on the two level vessel by vessel ( no or any disease p er vessel ) , and patient by patient ( no or any disease per patient ) . \n all scans were performed within 2 weeks of the msct coronary diagnostic angiogram . a single observer unaware of the multi - slice ct results identified coronary lesion as a single vessel , double vessel , or triple vessel disease . \n all lesion , regardless of size , were included for comparison with ct coronary angiography . \n lesions were classified as having nonsignificant disease ( luminal irregularities or < 50% stenosis ) or as having significant stenosis . \n stenosis was evaluated in two orthogonal views and classified as significant if the mean lumen diameter reduction was 50% using a validated quantitative coronary an giography ( qca ) . \n all scans were analyzed independently by a radiologist and a cardiologist who were unaware of the results of conventional coronary angiography . \n total calcium scores of all patients were calculated with dedicated software and expressed as agatston scores . \n the agatston score is a commonly used scoring method that calculates the total amount of calcium on the basis of the number , areas , and peak hounsfield units of the detected calcified lesi ons . \n all available coronary segments were visually scored for the presence of > 50% considered as significant stenosis . \n maximum intensity projections were used to identify coronary lesions and ( curved ) multiplanar reconstruction s to classify lesions as significant or nonsignificant . \n data were analyzed using statistical system spss version 20 software ( chicago , il , usa ) . \n the diagnostic performance of ct coronary angiography for the detection of signif icant lesions in coronary arteries with qca as the standard of reference is presented as sensitivity , specificity , positive and negative predictive values , and positive and negative likelihood ratios with the corresponding exact 95% of confidence interval ( cis ) . \n comparison between ct and conventional coronary angiography was performed on the two level vessel by vessel ( no or any disease per vessel ) , and patient by patient ( no or any disease per patient ) . \n in this study , 29 ( 58% ) subjects were female , and 21 ( 42% ) were male showing an average age of 50.36 8.39 years . \n of fifty patients 24 ( 48% ) , 13 ( 26% ) , eight ( 16% ) , and five ( 10% ) underwent mitral valve replacement , double valve replacement ( dvr ) , aortic valve replacement , and other surgeries , respectively . \n high distribution of cad risk factors such as hypertension ( 24% ) , smoking ( 22% ) , and dyslipidemia ( 18% ) was observed in the stu dy group . \n the mean creatinine level was 0.766 0.17 and average dye used in conventional angiography was 48.5 26.6 whereas for ct angiography it was 72.8 6.32 . \n average radiation dose in conventional coronary angiography and msct coronary angiography was 5.2 msv and 9.2 msv , respectively . \n the majority of the patients had sinus rhythm ( 68% ) , whereas atrial fibrillation was found in 32% of the subjects . \n patients included in the study had low to intermed iate probability of cad . in this study , three patients had complications after conventional angiography . \n complications were of local site hematoma , acute kidney injury managed conservatively , and acute heart failure . \n a patient who developed hematoma was obese female patients with body mass index > 30 kg / m . \n the patient suffered from pseudoaneurysm , had hospitalized for 9 days , which leads to increased morbidity and cost of hospital stay . \n the diagnos tic accuracy of ct coronary angiography was evaluated regarding true positive , true negative values and is presented in table 1 . the overall sensitivity and \n specificity of ct angiography technique was 100% ( 95% ci : 39.76%100% ) and 91.30% ( 95% ci : 79.21%97.58% ) , respectively [ table 2 ] . \n the positive predictive value ( 50% ; 95% ci : 15.70%84.30% ) and negative predictive value ( 100% ; 95% ci : 91.59%100% ) of ct angiography were also fairly high in these patients . \n recent reports from multiple studies demonstrated that recent - generation msct scanners showed promise for noninvasive detection of coronary stenosis however , until now no studies were found regarding the clinical efficacy or prognostic value of 128-slice ct coronary angiography versus conventional invasive coronary angiography in the diagnosis of patients planned for major noncoronary surgeries such as dvr , bentall , atrial septal defect closure , etc . in our study , we reported 8% cad prevalence in patients planned for major noncoronary cardiac surgery . \n we performed conventional and msct coronary angiography in all patients and the results showed that ct coronary angiography with i nvasive coronary angiography as the reference standard had a considerably high sensitivity ( 100% ) and specificity ( 95.65% ) . \n the health economic model using invasive coronary angiography as the reference standard showed that at a p retest probability of cad of 70% or lower , ct coronary angiography resulted in lower cost per patient with a true positive diagnosis . at a pretest probability of cad of 70% or higher , invasive coronary angiography was associated with a lower cost per patient with a true positive diagnosis . in our study population , \n two patients developed local site complications in the form of hematoma and pseudoaneurysm after conventional angiography . \n hence , msct coronary ang iography will be more favorable in female obese patients with intermediate likelihood of cad . \n hence , msct coronary angiography will be cost - effective in patients of valvular heart diseases . \n however , ct angiography suffers from a drawback that average amount of dye used in msct coronary angiography were 72.8 6.32 ml which is higher than average amount of dye required for conventional angiography ( 48.6 26.6 ml ) . \n hence , the use of ct coronary angiography could not be used in patients with known renal dysfunction , where reduction of contrast dye load is highly advocated . \n our results show that 128-slice ct coronary angiography is a reliable technique to detect coronary stenosis in pat ients planned for noncoronary cardiac surgery . \n although there has been important technological progress in the development of ct coronary angiography , its clinical application remains limited . \n a study wth large numbers of patient s is required for the recommendation of only ct coronary angiography for the coronary evaluation in major non - cardiac surgeries . \n mehta institute of cardiology and research center ( affiliated to bj medical college , ahmedabad , guja rat , india ) . \n u.n . mehta institute of cardiology and research center ( affiliated to bj medical college , ahmedabad , gujarat , india ) . \n """ dct = tok( [ARTICLE], max_length=1024, padding="max_length", truncation=True, return_tensors="pt", ).to(torch_device) hypotheses_batch = model.generate( **dct, num_beams=4, length_penalty=2.0, max_length=142, min_length=56, no_repeat_ngram_size=3, do_sample=False, early_stopping=True, ) decoded = tok.batch_decode(hypotheses_batch, skip_special_tokens=True, clean_up_tokenization_spaces=False) self.assertListEqual( self.expected_summary(), decoded, ) @slow def test_inference_hidden_states(self): model = self.model input_ids = torch.tensor( [[100, 19, 3, 9, 7142, 1200, 145, 8, 1252, 14145, 2034, 812, 5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=torch.long, device=torch_device, ) decoder_input_ids = torch.tensor( [[100, 19, 3, 9, 7142, 1200, 145, 8, 1252, 14145, 2034, 812, 5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=torch.long, device=torch_device, ) attention_mask = torch.tensor( [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=torch.long, device=torch_device, ) output = model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, output_hidden_states=True ) # check if encoder_outputs match expected_output_slice = torch.tensor([0.0629, -0.1294, -0.0089, 0.0772, 0.0663], device=torch_device) self.assertTrue(torch.allclose(output.encoder_hidden_states[-1][0, 0, :5], expected_output_slice, atol=1e-4)) # check if logits match expected_output_slice = torch.tensor([5.5231, 6.1058, 3.1766, 8.2391, -5.9453], device=torch_device) self.assertTrue(torch.allclose(output.logits[0, 0, :5], expected_output_slice, atol=1e-4))
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/longt5/test_modeling_flax_longt5.py
# coding=utf-8 # Copyright 2022 Google LongT5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import numpy as np import transformers from transformers import is_flax_available from transformers.models.auto import get_values from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_sentencepiece, require_tokenizers, slow, ) from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html os.environ["XLA_PYTHON_CLIENT_ALLOCATOR"] = "platform" import jax import jax.numpy as jnp from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING, FLAX_MODEL_MAPPING, AutoTokenizer, LongT5Config from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.models.longt5.modeling_flax_longt5 import ( FlaxLongT5ForConditionalGeneration, FlaxLongT5Model, shift_tokens_right, ) class FlaxLongT5ModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, decoder_seq_length=9, local_radius=5, encoder_attention_type="local", global_block_size=3, # For common tests is_training=True, use_attention_mask=True, use_labels=True, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, dropout_rate=0.1, initializer_factor=0.002, eos_token_id=1, pad_token_id=0, decoder_start_token_id=0, scope=None, decoder_layers=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length self.local_radius = local_radius self.block_len = local_radius + 1 self.encoder_attention_type = encoder_attention_type self.global_block_size = global_block_size # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.scope = None self.decoder_layers = decoder_layers def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None decoder_attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) decoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) config = LongT5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, local_radius=self.local_radius, encoder_attention_type=self.encoder_attention_type, global_block_size=self.global_block_size, ) return ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, ) def create_and_check_model( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, ): model = FlaxLongT5Model(config=config) result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) decoder_output = result.last_hidden_state encoder_output = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.shape, (self.batch_size, self.encoder_seq_length, self.hidden_size)) self.parent.assertEqual(decoder_output.shape, (self.batch_size, self.decoder_seq_length, self.hidden_size)) def check_use_cache_forward_with_attn_mask( self, model_class_name, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, ): max_decoder_length = 20 model = model_class_name(config) encoder_outputs = model.encode(input_ids) # prevent fully zero'd out attention mask decoder_attention_mask = jnp.ones_like(decoder_attention_mask) decoder_attention_mask_cache = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])), ], axis=-1, ) past_key_values = model.init_cache(decoder_input_ids.shape[0], max_decoder_length, encoder_outputs) outputs_cache = model.decode( decoder_input_ids[:, :-1], encoder_outputs, decoder_attention_mask=decoder_attention_mask_cache, past_key_values=past_key_values, ) outputs_cache_next = model.decode( decoder_input_ids[:, -1:], encoder_outputs, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=decoder_attention_mask_cache, ) outputs = model.decode(decoder_input_ids, encoder_outputs, decoder_attention_mask=decoder_attention_mask) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } return config, inputs_dict @require_flax class FlaxLongT5ModelTest(FlaxModelTesterMixin, FlaxGenerationTesterMixin, unittest.TestCase): all_model_classes = (FlaxLongT5Model, FlaxLongT5ForConditionalGeneration) if is_flax_available() else () all_generative_model_classes = (FlaxLongT5ForConditionalGeneration,) if is_flax_available() else () is_encoder_decoder = True def setUp(self): self.model_tester = FlaxLongT5ModelTester(self) self.config_tester = ConfigTester(self, config_class=LongT5Config, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_v1_1(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() # check that gated gelu feed forward and different word embeddings work config = config_and_inputs[0] config.tie_word_embeddings = False config.feed_forward_proj = "gated-gelu" self.model_tester.create_and_check_model(config, *config_and_inputs[1:]) def test_use_cache_forward_with_attn_mask(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(model_class, *config_and_inputs) def test_encode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def encode_jitted(input_ids, attention_mask=None, **kwargs): return model.encode(input_ids=input_ids, attention_mask=attention_mask) with self.subTest("JIT Enabled"): jitted_outputs = encode_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = encode_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_decode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): model = model_class(config) encoder_outputs = model.encode(inputs_dict["input_ids"], inputs_dict["attention_mask"]) prepared_inputs_dict = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(decoder_input_ids, decoder_attention_mask, encoder_outputs): return model.decode( decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, ) with self.subTest("JIT Enabled"): jitted_outputs = decode_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = decode_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_shift_right(self): decoder_start_token_id = 0 pad_token_id = 1 labels = np.arange(2, 102).reshape(5, 20) labels[:2, 15:] = -100 decoder_input_ids = shift_tokens_right(labels, pad_token_id, decoder_start_token_id) np_decoder_input_ids = np.array(decoder_input_ids) padded_slice = np_decoder_input_ids[:2, (15 + 1) :] self.assertTrue((padded_slice == 1).all()) not_padded_slice = np_decoder_input_ids[2:, 1:] rolled_labels = np.roll(labels[2:], 1)[:, 1:] self.assertTrue((not_padded_slice == rolled_labels).all()) self.assertTrue((np_decoder_input_ids[:, 0] == 0).all()) # overwrite since special base model prefix is used def test_save_load_from_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = base_class(config) base_params = flatten_dict(unfreeze(model.params)) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname) base_param_from_head = flatten_dict(unfreeze(head_model.params)) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # overwrite since special base model prefix is used def test_save_load_to_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) base_params_from_head = flatten_dict(unfreeze(model.params)) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_length = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_length) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_length) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) block_len = getattr(self.model_tester, "block_len", None) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # Question Answering model returns start_logits and end_logits if model_class in get_values(FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING): correct_outlen += 1 # start_logits and end_logits instead of only 1 output self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len], ) # overwrite since special base model prefix is used @is_pt_flax_cross_test def test_save_load_from_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = base_class(config) base_params = flatten_dict(unfreeze(model.params)) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, base_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: # save pt model pt_model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname, from_pt=True) base_param_from_head = flatten_dict(unfreeze(head_model.params)) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # overwrite since special base model prefix is used @is_pt_flax_cross_test def test_save_load_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) base_params_from_head = flatten_dict(unfreeze(model.params)) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # overwrite since special base model prefix is used @is_pt_flax_cross_test def test_save_load_bf16_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) model.params = model.to_bf16(model.params) base_params_from_head = flatten_dict(unfreeze(model.params)) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") class FlaxLongT5TGlobalModelTest(FlaxLongT5ModelTest): def setUp(self): self.model_tester = FlaxLongT5ModelTester(self, encoder_attention_type="transient-global") self.config_tester = ConfigTester(self, config_class=LongT5Config, d_model=37) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_length = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_length) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_length) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) block_len = getattr(self.model_tester, "block_len", None) global_block_size = getattr(self.model_tester, "global_block_size", None) global_seq_len = encoder_seq_length // global_block_size for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len + global_seq_len], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # Question Answering model returns start_logits and end_logits if model_class in get_values(FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING): correct_outlen += 1 # start_logits and end_logits instead of only 1 output self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len + global_seq_len], ) @require_sentencepiece @require_tokenizers @require_flax class FlaxLongT5ModelIntegrationTests(unittest.TestCase): model_path = "Stancld/longt5-tglobal-large-16384-pubmed-3k_steps" def expected_summary(self): return [ "background : coronary artery disease ( cad ) is the emerging cause of morbidity and mortality in" " developing world . it provides an excellent resolution for visualization of the coronary arteries for" " catheter - based or operating interventions . although the association of this technique with major" " complications such as mortality is highly uncommon , it is frequently associated with various cardiac" " and noncardiac complications . computed tomography coronary angiography is a promising technique for the" " evaluation of cad noninvasively . it assesses disease within the coronary artery and provides" " qualitative and quantitative information about nonobstructive atherosclerotic plaque" ] @slow def test_summarization(self): model = FlaxLongT5ForConditionalGeneration.from_pretrained(self.model_path) tok = AutoTokenizer.from_pretrained(self.model_path) ARTICLE = """coronary artery disease ( cad ) is the emerging cause of morbidity and mortality in developing world . \n it provides an excellent resolution for visualization of the coronary arteries for catheter - based or operating interventions . \n although the association of this technique with major complications such as mortality is highly uncommon , it is frequently associated with various cardiac and noncardiac complications . computed tomography ( ct ) coronary angiography is a promising technique for the evaluation of cad noninvasively . \n it assesses disease within the coronary artery and provides qualitative and quantitative information about nonobstructive atherosclerotic plaque burden within the vessel wall . \n thus , ct angiography - based disease evaluation may provide clinically more significant information than conventional angiography . the introduction of multi - slice computed tomography ( msct ) technology such as 64-slice , 12 8-slice , 256-slice , and now 320-slice msct has produced a high diagnostic accuracy of ct coronary angiography . \n it has consistently showed to have a very high negative predictive value ( well above 90% ) in ruling out patients with s ignificant cad defined as coronary luminal stenosis of > 50% . \n the american college of cardiology / american heart association recommends that coronary angiography should be performed before valve surgery in men aged > 40 years , women aged > 35 years with coronary risk factors and in postmenopausal women . \n the prevalence of cad in patients undergoing valve replacement is 2040% in developed countries . in the previous studies , \n the incidence of angiographically p roven cad in acquired valvular diseases has been shown to vary widely from 9% to 41% . in aortic stenosis , \n we aimed to report the diagnostic performance of 128-slice ct coronary angiography in 50 patients undergoing for major noncoron ary cardiac surgery referred for diagnostic invasive coronary angiography to assess the extent and severity of coronary stenosis . \n during january 2013 to december 2014 , we enrolled fifty major noncoronary cardiac surgery patients sche duled for invasive coronary angiography who fulfilled the following inclusion criteria of age 40 years , having low or intermediate probability of cad , left ventricular ejection fraction ( lvef ) > 35% , and patient giving informed conse nt for undergoing msct and conventional coronary angiography . \n those having any contraindication for contrast injection , lvef < 35% , high pretest probability of cad , and hemodynamic instability were excluded from the study . \n pati ents with heart rates of > 70 bpm received ( unless they had known overt heart failure or electrocardiogram ( ecg ) atrioventricular conduction abnormalities ) a single oral dose of 100 mg metoprolol 45 min before the scan . \n patients w ith heart rates of > 80 bpm received an additional oral dose of metoprolol if not contraindicated . \n all patients were scanned with a 128-slice ct scanner ( siemens , somatom definition as ) equipped with a new feature in msct technolog y , so - called z - axis flying - focus technology . \n the central 32 detector rows acquire 0.6-mm slices , and the flying - focus spot switches back and forth between 2 z positions between each reading . \n two slices per detector row a re acquired , which results in a higher oversampling rate in the z - axis , thereby reducing artifacts related to the spiral acquisition and improving spatial resolution down to 0.4 mm . \n a bolus of 6580 ml contrast material ( omnipaque ) was injected through an arm vein at a flow rate of 5 ml / s . \n a bolus tracking technique was used to synchronize the arrival of contrast in the coronary arteries with the initiation of the scan . to monitor the arrival of contrast m aterial , \n axial scans were obtained at the level of the ascending aorta with a delay of 10 s after the start of the contrast injection . \n the scan was automatically started when a threshold of 150 hounsfield units was reached in a re gion of interest positioned in the ascending aorta . \n images were reconstructed with ecg gating to obtain optimal , motion - free image quality . \n all scans were performed within 2 weeks of the msct coronary diagnostic angiogram . a s ingle observer unaware of the multi - slice ct results identified coronary lesion as a single vessel , double vessel , or triple vessel disease . \n all lesion , regardless of size , were included for comparison with ct coronary angiograp hy . \n lesions were classified as having nonsignificant disease ( luminal irregularities or < 50% stenosis ) or as having significant stenosis . \n stenosis was evaluated in two orthogonal views and classified as significant if the mean lumen diameter reduction was 50% using a validated quantitative coronary angiography ( qca ) . \n all scans were analyzed independently by a radiologist and a cardiologist who were unaware of the results of conventional coronary angiograp hy . \n total calcium scores of all patients were calculated with dedicated software and expressed as agatston scores . \n the agatston score is a commonly used scoring method that calculates the total amount of calcium on the basis of th e number , areas , and peak hounsfield units of the detected calcified lesions . \n all available coronary segments were visually scored for the presence of > 50% considered as significant stenosis . \n maximum intensity projections were used to identify coronary lesions and ( curved ) multiplanar reconstructions to classify lesions as significant or nonsignificant . \n data were analyzed using statistical system spss version 20 software ( chicago , il , usa ) . \n the di agnostic performance of ct coronary angiography for the detection of significant lesions in coronary arteries with qca as the standard of reference is presented as sensitivity , specificity , positive and negative predictive values , and positive and negative likelihood ratios with the corresponding exact 95% of confidence interval ( cis ) . \n comparison between ct and conventional coronary angiography was performed on the two level vessel by vessel ( no or any disease p er vessel ) , and patient by patient ( no or any disease per patient ) . \n all scans were performed within 2 weeks of the msct coronary diagnostic angiogram . a single observer unaware of the multi - slice ct results identified coronary lesion as a single vessel , double vessel , or triple vessel disease . \n all lesion , regardless of size , were included for comparison with ct coronary angiography . \n lesions were classified as having nonsignificant disease ( luminal irregularities or < 50% stenosis ) or as having significant stenosis . \n stenosis was evaluated in two orthogonal views and classified as significant if the mean lumen diameter reduction was 50% using a validated quantitative coronary an giography ( qca ) . \n all scans were analyzed independently by a radiologist and a cardiologist who were unaware of the results of conventional coronary angiography . \n total calcium scores of all patients were calculated with dedicated software and expressed as agatston scores . \n the agatston score is a commonly used scoring method that calculates the total amount of calcium on the basis of the number , areas , and peak hounsfield units of the detected calcified lesi ons . \n all available coronary segments were visually scored for the presence of > 50% considered as significant stenosis . \n maximum intensity projections were used to identify coronary lesions and ( curved ) multiplanar reconstruction s to classify lesions as significant or nonsignificant . \n data were analyzed using statistical system spss version 20 software ( chicago , il , usa ) . \n the diagnostic performance of ct coronary angiography for the detection of signif icant lesions in coronary arteries with qca as the standard of reference is presented as sensitivity , specificity , positive and negative predictive values , and positive and negative likelihood ratios with the corresponding exact 95% of confidence interval ( cis ) . \n comparison between ct and conventional coronary angiography was performed on the two level vessel by vessel ( no or any disease per vessel ) , and patient by patient ( no or any disease per patient ) . \n in this study , 29 ( 58% ) subjects were female , and 21 ( 42% ) were male showing an average age of 50.36 8.39 years . \n of fifty patients 24 ( 48% ) , 13 ( 26% ) , eight ( 16% ) , and five ( 10% ) underwent mitral valve replacement , double valve replacement ( dvr ) , aortic valve replacement , and other surgeries , respectively . \n high distribution of cad risk factors such as hypertension ( 24% ) , smoking ( 22% ) , and dyslipidemia ( 18% ) was observed in the stu dy group . \n the mean creatinine level was 0.766 0.17 and average dye used in conventional angiography was 48.5 26.6 whereas for ct angiography it was 72.8 6.32 . \n average radiation dose in conventional coronary angiography and msct coronary angiography was 5.2 msv and 9.2 msv , respectively . \n the majority of the patients had sinus rhythm ( 68% ) , whereas atrial fibrillation was found in 32% of the subjects . \n patients included in the study had low to intermed iate probability of cad . in this study , three patients had complications after conventional angiography . \n complications were of local site hematoma , acute kidney injury managed conservatively , and acute heart failure . \n a patient who developed hematoma was obese female patients with body mass index > 30 kg / m . \n the patient suffered from pseudoaneurysm , had hospitalized for 9 days , which leads to increased morbidity and cost of hospital stay . \n the diagnos tic accuracy of ct coronary angiography was evaluated regarding true positive , true negative values and is presented in table 1 . the overall sensitivity and \n specificity of ct angiography technique was 100% ( 95% ci : 39.76%100% ) and 91.30% ( 95% ci : 79.21%97.58% ) , respectively [ table 2 ] . \n the positive predictive value ( 50% ; 95% ci : 15.70%84.30% ) and negative predictive value ( 100% ; 95% ci : 91.59%100% ) of ct angiography were also fairly high in these patients . \n recent reports from multiple studies demonstrated that recent - generation msct scanners showed promise for noninvasive detection of coronary stenosis however , until now no studies were found regarding the clinical efficacy or prognostic value of 128-slice ct coronary angiography versus conventional invasive coronary angiography in the diagnosis of patients planned for major noncoronary surgeries such as dvr , bentall , atrial septal defect closure , etc . in our study , we reported 8% cad prevalence in patients planned for major noncoronary cardiac surgery . \n we performed conventional and msct coronary angiography in all patients and the results showed that ct coronary angiography with i nvasive coronary angiography as the reference standard had a considerably high sensitivity ( 100% ) and specificity ( 95.65% ) . \n the health economic model using invasive coronary angiography as the reference standard showed that at a p retest probability of cad of 70% or lower , ct coronary angiography resulted in lower cost per patient with a true positive diagnosis . at a pretest probability of cad of 70% or higher , invasive coronary angiography was associated with a lower cost per patient with a true positive diagnosis . in our study population , \n two patients developed local site complications in the form of hematoma and pseudoaneurysm after conventional angiography . \n hence , msct coronary ang iography will be more favorable in female obese patients with intermediate likelihood of cad . \n hence , msct coronary angiography will be cost - effective in patients of valvular heart diseases . \n however , ct angiography suffers from a drawback that average amount of dye used in msct coronary angiography were 72.8 6.32 ml which is higher than average amount of dye required for conventional angiography ( 48.6 26.6 ml ) . \n hence , the use of ct coronary angiography could not be used in patients with known renal dysfunction , where reduction of contrast dye load is highly advocated . \n our results show that 128-slice ct coronary angiography is a reliable technique to detect coronary stenosis in pat ients planned for noncoronary cardiac surgery . \n although there has been important technological progress in the development of ct coronary angiography , its clinical application remains limited . \n a study wth large numbers of patient s is required for the recommendation of only ct coronary angiography for the coronary evaluation in major non - cardiac surgeries . \n mehta institute of cardiology and research center ( affiliated to bj medical college , ahmedabad , guja rat , india ) . \n u.n . mehta institute of cardiology and research center ( affiliated to bj medical college , ahmedabad , gujarat , india ) . \n """ dct = tok( [ARTICLE], max_length=1024, padding="max_length", truncation=True, return_tensors="np", ) hypotheses_batch = model.generate( **dct, num_beams=4, length_penalty=2.0, max_length=142, min_length=56, do_sample=False, early_stopping=True, ).sequences decoded = tok.batch_decode(hypotheses_batch, skip_special_tokens=True, clean_up_tokenization_spaces=False) self.assertListEqual( self.expected_summary(), decoded, )
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/marian/test_modeling_flax_marian.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import timeout_decorator # noqa from transformers import MarianConfig, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html os.environ["XLA_PYTHON_CLIENT_ALLOCATOR"] = "platform" import jax import jax.numpy as jnp from transformers import MarianTokenizer from transformers.models.marian.modeling_flax_marian import FlaxMarianModel, FlaxMarianMTModel, shift_tokens_right def prepare_marian_inputs_dict( config, input_ids, decoder_input_ids=None, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = np.where(input_ids != config.pad_token_id, 1, 0) if decoder_attention_mask is None: decoder_attention_mask = np.where(decoder_input_ids != config.pad_token_id, 1, 0) if head_mask is None: head_mask = np.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: decoder_head_mask = np.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: cross_attn_head_mask = np.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class FlaxMarianModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=32, eos_token_id=2, pad_token_id=1, bos_token_id=0, initializer_range=0.02, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.initializer_range = initializer_range def prepare_config_and_inputs(self): input_ids = np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size), 3, self.vocab_size) input_ids = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1), dtype=np.int64)), -1) decoder_input_ids = shift_tokens_right(input_ids, 1, 2) config = MarianConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, initializer_range=self.initializer_range, use_cache=False, ) inputs_dict = prepare_marian_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def check_use_cache_forward(self, model_class_name, config, inputs_dict): max_decoder_length = 20 model = model_class_name(config) encoder_outputs = model.encode(inputs_dict["input_ids"]) decoder_input_ids, decoder_attention_mask = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) past_key_values = model.init_cache(decoder_input_ids.shape[0], max_decoder_length, encoder_outputs) decoder_attention_mask = jnp.ones((decoder_input_ids.shape[0], max_decoder_length), dtype="i4") decoder_position_ids = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), ) outputs_cache = model.decode( decoder_input_ids[:, :-1], encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, decoder_position_ids=decoder_position_ids, ) decoder_position_ids = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model.decode( decoder_input_ids[:, -1:], encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=outputs_cache.past_key_values, decoder_position_ids=decoder_position_ids, ) outputs = model.decode(decoder_input_ids, encoder_outputs) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") def check_use_cache_forward_with_attn_mask(self, model_class_name, config, inputs_dict): max_decoder_length = 20 model = model_class_name(config) encoder_outputs = model.encode(inputs_dict["input_ids"]) decoder_input_ids, decoder_attention_mask = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) decoder_attention_mask_cache = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])), ], axis=-1, ) past_key_values = model.init_cache(decoder_input_ids.shape[0], max_decoder_length, encoder_outputs) decoder_position_ids = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), ) outputs_cache = model.decode( decoder_input_ids[:, :-1], encoder_outputs, decoder_attention_mask=decoder_attention_mask_cache, past_key_values=past_key_values, decoder_position_ids=decoder_position_ids, ) decoder_position_ids = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model.decode( decoder_input_ids[:, -1:], encoder_outputs, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=decoder_attention_mask_cache, decoder_position_ids=decoder_position_ids, ) outputs = model.decode(decoder_input_ids, encoder_outputs, decoder_attention_mask=decoder_attention_mask) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") @require_flax class FlaxMarianModelTest(FlaxModelTesterMixin, unittest.TestCase, FlaxGenerationTesterMixin): is_encoder_decoder = True all_model_classes = (FlaxMarianModel, FlaxMarianMTModel) if is_flax_available() else () all_generative_model_classes = (FlaxMarianMTModel,) if is_flax_available() else () def setUp(self): self.model_tester = FlaxMarianModelTester(self) def test_use_cache_forward(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(model_class, config, inputs_dict) def test_use_cache_forward_with_attn_mask(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(model_class, config, inputs_dict) def test_encode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def encode_jitted(input_ids, attention_mask=None, **kwargs): return model.encode(input_ids=input_ids, attention_mask=attention_mask) with self.subTest("JIT Enabled"): jitted_outputs = encode_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = encode_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_decode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): model = model_class(config) encoder_outputs = model.encode(inputs_dict["input_ids"], inputs_dict["attention_mask"]) prepared_inputs_dict = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(decoder_input_ids, decoder_attention_mask, encoder_outputs): return model.decode( decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, ) with self.subTest("JIT Enabled"): jitted_outputs = decode_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = decode_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("Helsinki-NLP/opus-mt-en-de") # FlaxMarianForSequenceClassification expects eos token in input_ids input_ids = np.ones((1, 1)) * model.config.eos_token_id outputs = model(input_ids) self.assertIsNotNone(outputs) @require_flax @require_sentencepiece @require_tokenizers class MarianIntegrationTest(unittest.TestCase): src = None tgt = None @classmethod def setUpClass(cls) -> None: cls.model_name = f"Helsinki-NLP/opus-mt-{cls.src}-{cls.tgt}" return cls @cached_property def tokenizer(self): return MarianTokenizer.from_pretrained(self.model_name) @property def eos_token_id(self) -> int: return self.tokenizer.eos_token_id @cached_property def model(self): model: FlaxMarianMTModel = FlaxMarianMTModel.from_pretrained(self.model_name) self.assertEqual(model.config.decoder_start_token_id, model.config.pad_token_id) return model def _assert_generated_batch_equal_expected(self, **tokenizer_kwargs): generated_words = self.translate_src_text(**tokenizer_kwargs) self.assertListEqual(self.expected_text, generated_words) def translate_src_text(self, **tokenizer_kwargs): model_inputs = self.tokenizer(self.src_text, padding=True, return_tensors="np", **tokenizer_kwargs) generated_ids = self.model.generate( model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, max_length=128, ).sequences generated_words = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True) return generated_words @require_flax @require_sentencepiece @require_tokenizers class TestMarian_EN_FR(MarianIntegrationTest): src = "en" tgt = "fr" src_text = [ "I am a small frog.", "Now I can forget the 100 words of german that I know.", ] expected_text = [ "Je suis une petite grenouille.", "Maintenant, je peux oublier les 100 mots d'allemand que je connais.", ] @slow def test_batch_generation_en_fr(self): self._assert_generated_batch_equal_expected() @require_flax @require_sentencepiece @require_tokenizers class TestMarian_FR_EN(MarianIntegrationTest): src = "fr" tgt = "en" src_text = [ "Donnez moi le micro.", "Tom et Mary étaient assis à une table.", # Accents ] expected_text = [ "Give me the microphone.", "Tom and Mary were sitting at a table.", ] @slow def test_batch_generation_fr_en(self): self._assert_generated_batch_equal_expected() @require_flax @require_sentencepiece @require_tokenizers class TestMarian_MT_EN(MarianIntegrationTest): """Cover low resource/high perplexity setting. This breaks without adjust_logits_generation overwritten""" src = "mt" tgt = "en" src_text = ["Billi messu b'mod ġentili, Ġesù fejjaq raġel li kien milqut bil - marda kerha tal - ġdiem."] expected_text = ["Touching gently, Jesus healed a man who was affected by the sad disease of leprosy."] @slow def test_batch_generation_mt_en(self): self._assert_generated_batch_equal_expected() @require_flax @require_sentencepiece @require_tokenizers class TestMarian_EN_DE(MarianIntegrationTest): src = "en" tgt = "de" src_text = [ "I am a small frog.", "Now I can forget the 100 words of german that I know.", "Tom asked his teacher for advice.", "That's how I would do it.", "Tom really admired Mary's courage.", "Turn around and close your eyes.", ] expected_text = [ "Ich bin ein kleiner Frosch.", "Jetzt kann ich die 100 Wörter des Deutschen vergessen, die ich kenne.", "Tom bat seinen Lehrer um Rat.", "So würde ich das machen.", "Tom bewunderte Marias Mut wirklich.", "Drehen Sie sich um und schließen Sie die Augen.", ] @slow def test_batch_generation_en_de(self): self._assert_generated_batch_equal_expected() @require_flax @require_sentencepiece @require_tokenizers class TestMarian_en_zh(MarianIntegrationTest): src = "en" tgt = "zh" src_text = ["My name is Wolfgang and I live in Berlin"] expected_text = ["我叫沃尔夫冈 我住在柏林"] @slow def test_batch_generation_eng_zho(self): self._assert_generated_batch_equal_expected() @require_flax @require_sentencepiece @require_tokenizers class TestMarian_RU_FR(MarianIntegrationTest): src = "ru" tgt = "fr" src_text = ["Он показал мне рукопись своей новой пьесы."] expected_text = ["Il m'a montré le manuscrit de sa nouvelle pièce."] @slow def test_batch_generation_ru_fr(self): self._assert_generated_batch_equal_expected() @require_flax @require_sentencepiece @require_tokenizers class TestMarian_en_ROMANCE(MarianIntegrationTest): """Multilingual on target side.""" src = "en" tgt = "ROMANCE" src_text = [ ">>fr<< Don't spend so much time watching TV.", ">>pt<< Your message has been sent.", ">>es<< He's two years older than me.", ] expected_text = [ "Ne passez pas autant de temps à regarder la télé.", "A sua mensagem foi enviada.", "Es dos años más viejo que yo.", ] @slow def test_batch_generation_en_ROMANCE_multi(self): self._assert_generated_batch_equal_expected()
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/marian/test_modeling_marian.py
# coding=utf-8 # Copyright 2021, The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Marian model. """ import tempfile import unittest from huggingface_hub.hf_api import list_models from transformers import MarianConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( AutoConfig, AutoModelWithLMHead, AutoTokenizer, MarianModel, MarianMTModel, TranslationPipeline, ) from transformers.models.marian.convert_marian_to_pytorch import ( ORG_NAME, convert_hf_name_to_opus_name, convert_opus_name_to_hf_name, ) from transformers.models.marian.modeling_marian import ( MarianDecoder, MarianEncoder, MarianForCausalLM, shift_tokens_right, ) def prepare_marian_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class MarianModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, decoder_start_token_id=3, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.decoder_start_token_id = decoder_start_token_id # forcing a certain token to be generated, sets all other tokens to -inf # if however the token to be generated is already at -inf then it can lead token # `nan` values and thus break generation self.forced_bos_token_id = None self.forced_eos_token_id = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp( 3, ) input_ids[:, -1] = self.eos_token_id # Eos Token decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.get_config() inputs_dict = prepare_marian_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def get_config(self): return MarianConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, forced_bos_token_id=self.forced_bos_token_id, forced_eos_token_id=self.forced_eos_token_id, ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = MarianModel(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] head_mask = inputs_dict["head_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = MarianModel(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = MarianEncoder.from_pretrained(tmpdirname).to(torch_device) encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = MarianDecoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, encoder_attention_mask=inputs_dict["attention_mask"], )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class MarianModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (MarianModel, MarianMTModel) if is_torch_available() else () all_generative_model_classes = (MarianMTModel,) if is_torch_available() else () pipeline_model_mapping = ( { "conversational": MarianMTModel, "feature-extraction": MarianModel, "summarization": MarianMTModel, "text-generation": MarianForCausalLM, "text2text-generation": MarianMTModel, "translation": MarianMTModel, } if is_torch_available() else {} ) is_encoder_decoder = True fx_compatible = True test_pruning = False test_missing_keys = False def setUp(self): self.model_tester = MarianModelTester(self) self.config_tester = ConfigTester(self, config_class=MarianConfig) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = MarianMTModel(config).eval().to(torch_device) if torch_device == "cuda": model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def test_share_encoder_decoder_embeddings(self): config, input_dict = self.model_tester.prepare_config_and_inputs() # check if embeddings are shared by default for model_class in self.all_model_classes: model = model_class(config) self.assertIs(model.get_encoder().embed_tokens, model.get_decoder().embed_tokens) self.assertIs(model.get_encoder().embed_tokens.weight, model.get_decoder().embed_tokens.weight) # check if embeddings are not shared when config.share_encoder_decoder_embeddings = False config.share_encoder_decoder_embeddings = False for model_class in self.all_model_classes: model = model_class(config) self.assertIsNot(model.get_encoder().embed_tokens, model.get_decoder().embed_tokens) self.assertIsNot(model.get_encoder().embed_tokens.weight, model.get_decoder().embed_tokens.weight) # check if a model with shared embeddings can be saved and loaded with share_encoder_decoder_embeddings = False config, _ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname, share_encoder_decoder_embeddings=False) self.assertIsNot(model.get_encoder().embed_tokens, model.get_decoder().embed_tokens) self.assertIsNot(model.get_encoder().embed_tokens.weight, model.get_decoder().embed_tokens.weight) def test_resize_decoder_token_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs() # check if resize_decoder_token_embeddings raises an error when embeddings are shared for model_class in self.all_model_classes: model = model_class(config) with self.assertRaises(ValueError): model.resize_decoder_token_embeddings(config.vocab_size + 1) # check if decoder embeddings are resized when config.share_encoder_decoder_embeddings = False config.share_encoder_decoder_embeddings = False for model_class in self.all_model_classes: model = model_class(config) model.resize_decoder_token_embeddings(config.vocab_size + 1) self.assertEqual(model.get_decoder().embed_tokens.weight.shape, (config.vocab_size + 1, config.d_model)) # check if lm_head is also resized config, _ = self.model_tester.prepare_config_and_inputs() config.share_encoder_decoder_embeddings = False model = MarianMTModel(config) model.resize_decoder_token_embeddings(config.vocab_size + 1) self.assertEqual(model.lm_head.weight.shape, (config.vocab_size + 1, config.d_model)) def test_tie_word_embeddings_decoder(self): pass def assert_tensors_close(a, b, atol=1e-12, prefix=""): """If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if torch.allclose(a, b, atol=atol): return True raise except Exception: pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item() if a.numel() > 100: msg = f"tensor values are {pct_different:.1%} percent different." else: msg = f"{a} != {b}" if prefix: msg = prefix + ": " + msg raise AssertionError(msg) def _long_tensor(tok_lst): return torch.tensor(tok_lst, dtype=torch.long, device=torch_device) class ModelManagementTests(unittest.TestCase): @slow @require_torch def test_model_names(self): model_list = list_models() model_ids = [x.modelId for x in model_list if x.modelId.startswith(ORG_NAME)] bad_model_ids = [mid for mid in model_ids if "+" in model_ids] self.assertListEqual([], bad_model_ids) self.assertGreater(len(model_ids), 500) @require_torch @require_sentencepiece @require_tokenizers class MarianIntegrationTest(unittest.TestCase): src = "en" tgt = "de" src_text = [ "I am a small frog.", "Now I can forget the 100 words of german that I know.", "Tom asked his teacher for advice.", "That's how I would do it.", "Tom really admired Mary's courage.", "Turn around and close your eyes.", ] expected_text = [ "Ich bin ein kleiner Frosch.", "Jetzt kann ich die 100 Wörter des Deutschen vergessen, die ich kenne.", "Tom bat seinen Lehrer um Rat.", "So würde ich das machen.", "Tom bewunderte Marias Mut wirklich.", "Drehen Sie sich um und schließen Sie die Augen.", ] # ^^ actual C++ output differs slightly: (1) des Deutschen removed, (2) ""-> "O", (3) tun -> machen @classmethod def setUpClass(cls) -> None: cls.model_name = f"Helsinki-NLP/opus-mt-{cls.src}-{cls.tgt}" return cls @cached_property def tokenizer(self): return AutoTokenizer.from_pretrained(self.model_name) @property def eos_token_id(self) -> int: return self.tokenizer.eos_token_id @cached_property def model(self): model: MarianMTModel = AutoModelWithLMHead.from_pretrained(self.model_name).to(torch_device) c = model.config self.assertListEqual(c.bad_words_ids, [[c.pad_token_id]]) self.assertEqual(c.max_length, 512) self.assertEqual(c.decoder_start_token_id, c.pad_token_id) if torch_device == "cuda": return model.half() else: return model def _assert_generated_batch_equal_expected(self, **tokenizer_kwargs): generated_words = self.translate_src_text(**tokenizer_kwargs) self.assertListEqual(self.expected_text, generated_words) def translate_src_text(self, **tokenizer_kwargs): model_inputs = self.tokenizer(self.src_text, padding=True, return_tensors="pt", **tokenizer_kwargs).to( torch_device ) self.assertEqual(self.model.device, model_inputs.input_ids.device) generated_ids = self.model.generate( model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, max_length=128 ) generated_words = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True) return generated_words @require_sentencepiece @require_tokenizers class TestMarian_EN_DE_More(MarianIntegrationTest): @slow def test_forward(self): src, tgt = ["I am a small frog"], ["Ich bin ein kleiner Frosch."] expected_ids = [38, 121, 14, 697, 38848, 0] model_inputs = self.tokenizer(src, text_target=tgt, return_tensors="pt").to(torch_device) self.assertListEqual(expected_ids, model_inputs.input_ids[0].tolist()) desired_keys = { "input_ids", "attention_mask", "labels", } self.assertSetEqual(desired_keys, set(model_inputs.keys())) model_inputs["decoder_input_ids"] = shift_tokens_right( model_inputs.labels, self.tokenizer.pad_token_id, self.model.config.decoder_start_token_id ) model_inputs["return_dict"] = True model_inputs["use_cache"] = False with torch.no_grad(): outputs = self.model(**model_inputs) max_indices = outputs.logits.argmax(-1) self.tokenizer.batch_decode(max_indices) def test_unk_support(self): t = self.tokenizer ids = t(["||"], return_tensors="pt").to(torch_device).input_ids[0].tolist() expected = [t.unk_token_id, t.unk_token_id, t.eos_token_id] self.assertEqual(expected, ids) def test_pad_not_split(self): input_ids_w_pad = self.tokenizer(["I am a small frog <pad>"], return_tensors="pt").input_ids[0].tolist() expected_w_pad = [38, 121, 14, 697, 38848, self.tokenizer.pad_token_id, 0] # pad self.assertListEqual(expected_w_pad, input_ids_w_pad) @slow def test_batch_generation_en_de(self): self._assert_generated_batch_equal_expected() def test_auto_config(self): config = AutoConfig.from_pretrained(self.model_name) self.assertIsInstance(config, MarianConfig) @require_sentencepiece @require_tokenizers class TestMarian_EN_FR(MarianIntegrationTest): src = "en" tgt = "fr" src_text = [ "I am a small frog.", "Now I can forget the 100 words of german that I know.", ] expected_text = [ "Je suis une petite grenouille.", "Maintenant, je peux oublier les 100 mots d'allemand que je connais.", ] @slow def test_batch_generation_en_fr(self): self._assert_generated_batch_equal_expected() @require_sentencepiece @require_tokenizers class TestMarian_FR_EN(MarianIntegrationTest): src = "fr" tgt = "en" src_text = [ "Donnez moi le micro.", "Tom et Mary étaient assis à une table.", # Accents ] expected_text = [ "Give me the microphone.", "Tom and Mary were sitting at a table.", ] @slow def test_batch_generation_fr_en(self): self._assert_generated_batch_equal_expected() @require_sentencepiece @require_tokenizers class TestMarian_RU_FR(MarianIntegrationTest): src = "ru" tgt = "fr" src_text = ["Он показал мне рукопись своей новой пьесы."] expected_text = ["Il m'a montré le manuscrit de sa nouvelle pièce."] @slow def test_batch_generation_ru_fr(self): self._assert_generated_batch_equal_expected() @require_sentencepiece @require_tokenizers class TestMarian_MT_EN(MarianIntegrationTest): """Cover low resource/high perplexity setting. This breaks without adjust_logits_generation overwritten""" src = "mt" tgt = "en" src_text = ["Billi messu b'mod ġentili, Ġesù fejjaq raġel li kien milqut bil - marda kerha tal - ġdiem."] expected_text = ["Touching gently, Jesus healed a man who was affected by the sad disease of leprosy."] @slow def test_batch_generation_mt_en(self): self._assert_generated_batch_equal_expected() @require_sentencepiece @require_tokenizers class TestMarian_en_zh(MarianIntegrationTest): src = "en" tgt = "zh" src_text = ["My name is Wolfgang and I live in Berlin"] expected_text = ["我叫沃尔夫冈 我住在柏林"] @slow def test_batch_generation_eng_zho(self): self._assert_generated_batch_equal_expected() @require_sentencepiece @require_tokenizers class TestMarian_en_ROMANCE(MarianIntegrationTest): """Multilingual on target side.""" src = "en" tgt = "ROMANCE" src_text = [ ">>fr<< Don't spend so much time watching TV.", ">>pt<< Your message has been sent.", ">>es<< He's two years older than me.", ] expected_text = [ "Ne passez pas autant de temps à regarder la télé.", "A sua mensagem foi enviada.", "Es dos años más viejo que yo.", ] @slow def test_batch_generation_en_ROMANCE_multi(self): self._assert_generated_batch_equal_expected() @slow def test_pipeline(self): device = 0 if torch_device == "cuda" else -1 pipeline = TranslationPipeline(self.model, self.tokenizer, framework="pt", device=device) output = pipeline(self.src_text) self.assertEqual(self.expected_text, [x["translation_text"] for x in output]) @require_sentencepiece @require_tokenizers class TestMarian_FI_EN_V2(MarianIntegrationTest): src = "fi" tgt = "en" src_text = [ "minä tykkään kirjojen lukemisesta", "Pidän jalkapallon katsomisesta", ] expected_text = ["I like to read books", "I like watching football"] @classmethod def setUpClass(cls) -> None: cls.model_name = "hf-internal-testing/test-opus-tatoeba-fi-en-v2" return cls @slow def test_batch_generation_fi_en(self): self._assert_generated_batch_equal_expected() @require_torch class TestConversionUtils(unittest.TestCase): def test_renaming_multilingual(self): old_names = [ "opus-mt-cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh-fi", "opus-mt-cmn+cn-fi", # no group "opus-mt-en-de", # standard name "opus-mt-en-de", # standard name ] expected = ["opus-mt-ZH-fi", "opus-mt-cmn_cn-fi", "opus-mt-en-de", "opus-mt-en-de"] self.assertListEqual(expected, [convert_opus_name_to_hf_name(x) for x in old_names]) def test_undoing_renaming(self): hf_names = ["opus-mt-ZH-fi", "opus-mt-cmn_cn-fi", "opus-mt-en-de", "opus-mt-en-de"] converted_opus_names = [convert_hf_name_to_opus_name(x) for x in hf_names] expected_opus_names = [ "cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh-fi", "cmn+cn-fi", "en-de", # standard name "en-de", ] self.assertListEqual(expected_opus_names, converted_opus_names) class MarianStandaloneDecoderModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, d_model=16, decoder_seq_length=7, is_training=True, is_decoder=True, use_attention_mask=True, use_cache=False, use_labels=True, decoder_start_token_id=2, decoder_ffn_dim=32, decoder_layers=4, encoder_attention_heads=4, decoder_attention_heads=4, max_position_embeddings=30, is_encoder_decoder=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, scope=None, ): self.parent = parent self.batch_size = batch_size self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.d_model = d_model self.hidden_size = d_model self.num_hidden_layers = decoder_layers self.decoder_layers = decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.encoder_attention_heads = encoder_attention_heads self.decoder_attention_heads = decoder_attention_heads self.num_attention_heads = decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.use_cache = use_cache self.max_position_embeddings = max_position_embeddings self.is_encoder_decoder = is_encoder_decoder self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 2 self.decoder_attention_idx = 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = MarianConfig( vocab_size=self.vocab_size, d_model=self.d_model, decoder_layers=self.decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, encoder_attention_heads=self.encoder_attention_heads, decoder_attention_heads=self.decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, max_position_embeddings=self.max_position_embeddings, is_encoder_decoder=self.is_encoder_decoder, ) return ( config, input_ids, attention_mask, lm_labels, ) def create_and_check_decoder_model_past( self, config, input_ids, attention_mask, lm_labels, ): config.use_cache = True model = MarianDecoder(config=config).to(torch_device).eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, attention_mask, lm_labels, ): model = MarianDecoder(config=config).to(torch_device).eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=attn_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class MarianStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (MarianDecoder, MarianForCausalLM) if is_torch_available() else () all_generative_model_classes = (MarianForCausalLM,) if is_torch_available() else () test_pruning = False is_encoder_decoder = False def setUp( self, ): self.model_tester = MarianStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=MarianConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_attn_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) def test_retain_grad_hidden_states_attentions(self): # decoder cannot keep gradients return @unittest.skip("The model doesn't support left padding") # and it's not used enough to be worth fixing :) def test_left_padding_compatibility(self): pass
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/marian/test_tokenization_marian.py
# coding=utf-8 # Copyright 2020 Huggingface # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_SP = get_tests_dir("fixtures/test_sentencepiece.model") mock_tokenizer_config = {"target_lang": "fi", "source_lang": "en"} zh_code = ">>zh<<" ORG_NAME = "Helsinki-NLP/" if is_torch_available(): FRAMEWORK = "pt" elif is_tf_available(): FRAMEWORK = "tf" else: FRAMEWORK = "jax" @require_sentencepiece class MarianTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = MarianTokenizer test_rust_tokenizer = False test_sentencepiece = True def setUp(self): super().setUp() vocab = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] vocab_tokens = dict(zip(vocab, range(len(vocab)))) save_dir = Path(self.tmpdirname) save_json(vocab_tokens, save_dir / VOCAB_FILES_NAMES["vocab"]) save_json(mock_tokenizer_config, save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"]) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(SAMPLE_SP, save_dir / VOCAB_FILES_NAMES["source_spm"]) copyfile(SAMPLE_SP, save_dir / VOCAB_FILES_NAMES["target_spm"]) tokenizer = MarianTokenizer.from_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname) def get_tokenizer(self, **kwargs) -> MarianTokenizer: return MarianTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self, tokenizer): return ( "This is a test", "This is a test", ) def test_convert_token_and_id(self): """Test ``_convert_token_to_id`` and ``_convert_id_to_token``.""" token = "</s>" token_id = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "</s>") self.assertEqual(vocab_keys[1], "<unk>") self.assertEqual(vocab_keys[-1], "<pad>") self.assertEqual(len(vocab_keys), 9) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 9) def test_tokenizer_equivalence_en_de(self): en_de_tokenizer = MarianTokenizer.from_pretrained(f"{ORG_NAME}opus-mt-en-de") batch = en_de_tokenizer(["I am a small frog"], return_tensors=None) self.assertIsInstance(batch, BatchEncoding) expected = [38, 121, 14, 697, 38848, 0] self.assertListEqual(expected, batch.input_ids[0]) save_dir = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(save_dir) contents = [x.name for x in Path(save_dir).glob("*")] self.assertIn("source.spm", contents) MarianTokenizer.from_pretrained(save_dir) def test_outputs_not_longer_than_maxlen(self): tok = self.get_tokenizer() batch = tok( ["I am a small frog" * 1000, "I am a small frog"], padding=True, truncation=True, return_tensors=FRAMEWORK ) self.assertIsInstance(batch, BatchEncoding) self.assertEqual(batch.input_ids.shape, (2, 512)) def test_outputs_can_be_shorter(self): tok = self.get_tokenizer() batch_smaller = tok(["I am a tiny frog", "I am a small frog"], padding=True, return_tensors=FRAMEWORK) self.assertIsInstance(batch_smaller, BatchEncoding) self.assertEqual(batch_smaller.input_ids.shape, (2, 10)) @slow def test_tokenizer_integration(self): # fmt: off expected_encoding = {'input_ids': [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="Helsinki-NLP/opus-mt-en-de", revision="1a8c2263da11e68e50938f97e10cd57820bd504c", decode_kwargs={"use_source_tokenizer": True}, ) def test_tokenizer_integration_seperate_vocabs(self): tokenizer = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs") source_text = "Tämä on testi" target_text = "This is a test" expected_src_ids = [76, 7, 2047, 2] expected_target_ids = [69, 12, 11, 940, 2] src_ids = tokenizer(source_text).input_ids self.assertListEqual(src_ids, expected_src_ids) target_ids = tokenizer(text_target=target_text).input_ids self.assertListEqual(target_ids, expected_target_ids) decoded = tokenizer.decode(target_ids, skip_special_tokens=True) self.assertEqual(decoded, target_text)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/marian/test_modeling_tf_marian.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest import warnings from transformers import AutoTokenizer, MarianConfig, MarianTokenizer, TranslationPipeline, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeq2SeqLM, TFMarianModel, TFMarianMTModel @require_tf class TFMarianModelTester: config_cls = MarianConfig config_updates = {} hidden_act = "gelu" def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def prepare_config_and_inputs_for_common(self): input_ids = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size) eos_tensor = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1) input_ids = tf.concat([input_ids, eos_tensor], axis=1) decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, ) inputs_dict = prepare_marian_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def check_decoder_model_past_large_inputs(self, config, inputs_dict): model = TFMarianModel(config=config).get_decoder() input_ids = inputs_dict["input_ids"] input_ids = input_ids[:1, :] attention_mask = inputs_dict["attention_mask"][:1, :] head_mask = inputs_dict["head_mask"] self.batch_size = 1 # first forward pass outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = tf.cast(ids_tensor((self.batch_size, 3), 2), tf.int8) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def prepare_marian_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = tf.cast(tf.math.not_equal(input_ids, config.pad_token_id), tf.int8) if decoder_attention_mask is None: decoder_attention_mask = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.int8), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id), tf.int8), ], axis=-1, ) if head_mask is None: head_mask = tf.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: decoder_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: cross_attn_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class TFMarianModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFMarianMTModel, TFMarianModel) if is_tf_available() else () all_generative_model_classes = (TFMarianMTModel,) if is_tf_available() else () pipeline_model_mapping = ( { "conversational": TFMarianMTModel, "feature-extraction": TFMarianModel, "summarization": TFMarianMTModel, "text2text-generation": TFMarianMTModel, "translation": TFMarianMTModel, } if is_tf_available() else {} ) is_encoder_decoder = True test_pruning = False test_onnx = False def setUp(self): self.model_tester = TFMarianModelTester(self) self.config_tester = ConfigTester(self, config_class=MarianConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs) @require_tf class AbstractMarianIntegrationTest(unittest.TestCase): maxDiff = 1000 # show more chars for failing integration tests @classmethod def setUpClass(cls) -> None: cls.model_name = f"Helsinki-NLP/opus-mt-{cls.src}-{cls.tgt}" return cls @cached_property def tokenizer(self) -> MarianTokenizer: return AutoTokenizer.from_pretrained(self.model_name) @property def eos_token_id(self) -> int: return self.tokenizer.eos_token_id @cached_property def model(self): warnings.simplefilter("error") model: TFMarianMTModel = TFAutoModelForSeq2SeqLM.from_pretrained(self.model_name) assert isinstance(model, TFMarianMTModel) c = model.config self.assertListEqual(c.bad_words_ids, [[c.pad_token_id]]) self.assertEqual(c.max_length, 512) self.assertEqual(c.decoder_start_token_id, c.pad_token_id) return model def _assert_generated_batch_equal_expected(self, **tokenizer_kwargs): generated_words = self.translate_src_text(**tokenizer_kwargs) self.assertListEqual(self.expected_text, generated_words) def translate_src_text(self, **tokenizer_kwargs): model_inputs = self.tokenizer(self.src_text, **tokenizer_kwargs, padding=True, return_tensors="tf") generated_ids = self.model.generate( model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, max_length=128 ) generated_words = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=True) return generated_words @require_sentencepiece @require_tokenizers @require_tf class TestMarian_MT_EN(AbstractMarianIntegrationTest): """Cover low resource/high perplexity setting. This breaks if pad_token_id logits not set to LARGE_NEGATIVE.""" src = "mt" tgt = "en" src_text = ["Billi messu b'mod ġentili, Ġesù fejjaq raġel li kien milqut bil - marda kerha tal - ġdiem."] expected_text = ["Touching gently, Jesus healed a man who was affected by the sad disease of leprosy."] @unittest.skip("Skipping until #12647 is resolved.") @slow def test_batch_generation_mt_en(self): self._assert_generated_batch_equal_expected() @require_sentencepiece @require_tokenizers @require_tf class TestMarian_en_zh(AbstractMarianIntegrationTest): src = "en" tgt = "zh" src_text = ["My name is Wolfgang and I live in Berlin"] expected_text = ["我叫沃尔夫冈 我住在柏林"] @unittest.skip("Skipping until #12647 is resolved.") @slow def test_batch_generation_en_zh(self): self._assert_generated_batch_equal_expected() @require_sentencepiece @require_tokenizers @require_tf class TestMarian_en_ROMANCE(AbstractMarianIntegrationTest): """Multilingual on target side.""" src = "en" tgt = "ROMANCE" src_text = [ ">>fr<< Don't spend so much time watching TV.", ">>pt<< Your message has been sent.", ">>es<< He's two years older than me.", ] expected_text = [ "Ne passez pas autant de temps à regarder la télé.", "A sua mensagem foi enviada.", "Es dos años más viejo que yo.", ] @unittest.skip("Skipping until #12647 is resolved.") @slow def test_batch_generation_en_ROMANCE_multi(self): self._assert_generated_batch_equal_expected() @unittest.skip("Skipping until #12647 is resolved.") @slow def test_pipeline(self): pipeline = TranslationPipeline(self.model, self.tokenizer, framework="tf") output = pipeline(self.src_text) self.assertEqual(self.expected_text, [x["translation_text"] for x in output])
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/deformable_detr/test_modeling_deformable_detr.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Deformable DETR model. """ import inspect import math import unittest from typing import Dict, List, Tuple from transformers import DeformableDetrConfig, ResNetConfig, is_torch_available, is_vision_available from transformers.file_utils import cached_property from transformers.testing_utils import ( require_timm, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DeformableDetrForObjectDetection, DeformableDetrModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class DeformableDetrModelTester: def __init__( self, parent, batch_size=8, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=8, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, num_queries=12, num_channels=3, image_size=196, n_targets=8, num_labels=91, num_feature_levels=4, encoder_n_points=2, decoder_n_points=6, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_queries = num_queries self.num_channels = num_channels self.image_size = image_size self.n_targets = n_targets self.num_labels = num_labels self.num_feature_levels = num_feature_levels self.encoder_n_points = encoder_n_points self.decoder_n_points = decoder_n_points # we also set the expected seq length for both encoder and decoder self.encoder_seq_length = ( math.ceil(self.image_size / 8) ** 2 + math.ceil(self.image_size / 16) ** 2 + math.ceil(self.image_size / 32) ** 2 + math.ceil(self.image_size / 64) ** 2 ) self.decoder_seq_length = self.num_queries def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) pixel_mask = torch.ones([self.batch_size, self.image_size, self.image_size], device=torch_device) labels = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) labels = [] for i in range(self.batch_size): target = {} target["class_labels"] = torch.randint( high=self.num_labels, size=(self.n_targets,), device=torch_device ) target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device) target["masks"] = torch.rand(self.n_targets, self.image_size, self.image_size, device=torch_device) labels.append(target) config = self.get_config() return config, pixel_values, pixel_mask, labels def get_config(self): resnet_config = ResNetConfig( num_channels=3, embeddings_size=10, hidden_sizes=[10, 20, 30, 40], depths=[1, 1, 2, 1], hidden_act="relu", num_labels=3, out_features=["stage2", "stage3", "stage4"], out_indices=[2, 3, 4], ) return DeformableDetrConfig( d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, num_queries=self.num_queries, num_labels=self.num_labels, num_feature_levels=self.num_feature_levels, encoder_n_points=self.encoder_n_points, decoder_n_points=self.decoder_n_points, use_timm_backbone=False, backbone_config=resnet_config, ) def prepare_config_and_inputs_for_common(self): config, pixel_values, pixel_mask, labels = self.prepare_config_and_inputs() inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def create_and_check_deformable_detr_model(self, config, pixel_values, pixel_mask, labels): model = DeformableDetrModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.num_queries, self.hidden_size)) def create_and_check_deformable_detr_object_detection_head_model(self, config, pixel_values, pixel_mask, labels): model = DeformableDetrForObjectDetection(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) @require_torch class DeformableDetrModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (DeformableDetrModel, DeformableDetrForObjectDetection) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": DeformableDetrModel, "object-detection": DeformableDetrForObjectDetection} if is_torch_available() else {} ) is_encoder_decoder = True test_torchscript = False test_pruning = False test_head_masking = False test_missing_keys = False # special case for head models def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "DeformableDetrForObjectDetection": labels = [] for i in range(self.model_tester.batch_size): target = {} target["class_labels"] = torch.ones( size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long ) target["boxes"] = torch.ones( self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float ) target["masks"] = torch.ones( self.model_tester.n_targets, self.model_tester.image_size, self.model_tester.image_size, device=torch_device, dtype=torch.float, ) labels.append(target) inputs_dict["labels"] = labels return inputs_dict def setUp(self): self.model_tester = DeformableDetrModelTester(self) self.config_tester = ConfigTester(self, config_class=DeformableDetrConfig, has_text_modality=False) def test_config(self): # we don't test common_properties and arguments_init as these don't apply for Deformable DETR self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() def test_deformable_detr_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deformable_detr_model(*config_and_inputs) def test_deformable_detr_object_detection_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deformable_detr_object_detection_head_model(*config_and_inputs) @unittest.skip(reason="Deformable DETR does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Deformable DETR does not have a get_input_embeddings method") def test_model_common_attributes(self): pass @unittest.skip(reason="Deformable DETR is not a generative model") def test_generate_without_input_ids(self): pass @unittest.skip(reason="Deformable DETR does not use token embeddings") def test_resize_tokens_embeddings(self): pass @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, self.model_tester.num_feature_levels, self.model_tester.encoder_n_points, ], ) out_len = len(outputs) correct_outlen = 8 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning # Object Detection model returns pred_logits and pred_boxes if model_class.__name__ == "DeformableDetrForObjectDetection": correct_outlen += 2 self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, self.model_tester.num_queries, self.model_tester.num_queries], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, self.model_tester.num_feature_levels, self.model_tester.decoder_n_points, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, self.model_tester.num_feature_levels, self.model_tester.encoder_n_points, ], ) def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(t): t[t != t] = 0 return t def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): with torch.no_grad(): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values(), dict_object.values() ): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: print("Model class:", model_class) model = model_class(config) model.to(torch_device) model.eval() tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence( model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True} ) def test_retain_grad_hidden_states_attentions(self): # removed retain_grad and grad on decoder_hidden_states, as queries don't require grad config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) # we take the second output since last_hidden_state is the second item output = outputs[1] encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_attentions = outputs.encoder_attentions[0] encoder_hidden_states.retain_grad() encoder_attentions.retain_grad() decoder_attentions = outputs.decoder_attentions[0] decoder_attentions.retain_grad() cross_attentions = outputs.cross_attentions[0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(encoder_attentions.grad) self.assertIsNotNone(decoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = ["pixel_values", "pixel_mask"] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" in arg_names else [] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["pixel_values", "pixel_mask"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_different_timm_backbone(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # let's pick a random timm backbone config.backbone = "tf_mobilenetv3_small_075" for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if model_class.__name__ == "DeformableDetrForObjectDetection": expected_shape = ( self.model_tester.batch_size, self.model_tester.num_queries, self.model_tester.num_labels, ) self.assertEqual(outputs.logits.shape, expected_shape) self.assertTrue(outputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: print("Model class:", model_class) model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if param.requires_grad: if ( "level_embed" in name or "sampling_offsets.bias" in name or "value_proj" in name or "output_proj" in name or "reference_points" in name ): continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) TOLERANCE = 1e-4 # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_timm @require_vision @slow class DeformableDetrModelIntegrationTests(unittest.TestCase): @cached_property def default_image_processor(self): return AutoImageProcessor.from_pretrained("SenseTime/deformable-detr") if is_vision_available() else None def test_inference_object_detection_head(self): model = DeformableDetrForObjectDetection.from_pretrained("SenseTime/deformable-detr").to(torch_device) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt").to(torch_device) pixel_values = encoding["pixel_values"].to(torch_device) pixel_mask = encoding["pixel_mask"].to(torch_device) with torch.no_grad(): outputs = model(pixel_values, pixel_mask) expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.num_labels)) self.assertEqual(outputs.logits.shape, expected_shape_logits) expected_logits = torch.tensor( [[-9.6645, -4.3449, -5.8705], [-9.7035, -3.8504, -5.0724], [-10.5634, -5.3379, -7.5116]] ).to(torch_device) expected_boxes = torch.tensor( [[0.8693, 0.2289, 0.2492], [0.3150, 0.5489, 0.5845], [0.5563, 0.7580, 0.8518]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_logits, atol=1e-4)) expected_shape_boxes = torch.Size((1, model.config.num_queries, 4)) self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes, atol=1e-4)) # verify postprocessing results = image_processor.post_process_object_detection( outputs, threshold=0.3, target_sizes=[image.size[::-1]] )[0] expected_scores = torch.tensor([0.7999, 0.7894, 0.6331, 0.4720, 0.4382]).to(torch_device) expected_labels = [17, 17, 75, 75, 63] expected_slice_boxes = torch.tensor([16.5028, 52.8390, 318.2544, 470.7841]).to(torch_device) self.assertEqual(len(results["scores"]), 5) self.assertTrue(torch.allclose(results["scores"], expected_scores, atol=1e-4)) self.assertSequenceEqual(results["labels"].tolist(), expected_labels) self.assertTrue(torch.allclose(results["boxes"][0, :], expected_slice_boxes)) def test_inference_object_detection_head_with_box_refine_two_stage(self): model = DeformableDetrForObjectDetection.from_pretrained( "SenseTime/deformable-detr-with-box-refine-two-stage" ).to(torch_device) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt").to(torch_device) pixel_values = encoding["pixel_values"].to(torch_device) pixel_mask = encoding["pixel_mask"].to(torch_device) with torch.no_grad(): outputs = model(pixel_values, pixel_mask) expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.num_labels)) self.assertEqual(outputs.logits.shape, expected_shape_logits) expected_logits = torch.tensor( [[-6.7108, -4.3213, -6.3777], [-8.9014, -6.1799, -6.7240], [-6.9315, -4.4735, -6.2298]] ).to(torch_device) expected_boxes = torch.tensor( [[0.2583, 0.5499, 0.4683], [0.7652, 0.9068, 0.4882], [0.5490, 0.2763, 0.0564]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_logits, atol=1e-4)) expected_shape_boxes = torch.Size((1, model.config.num_queries, 4)) self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes, atol=1e-4)) @require_torch_gpu def test_inference_object_detection_head_equivalence_cpu_gpu(self): image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt") pixel_values = encoding["pixel_values"] pixel_mask = encoding["pixel_mask"] # 1. run model on CPU model = DeformableDetrForObjectDetection.from_pretrained("SenseTime/deformable-detr-single-scale") with torch.no_grad(): cpu_outputs = model(pixel_values, pixel_mask) # 2. run model on GPU model.to("cuda") with torch.no_grad(): gpu_outputs = model(pixel_values.to("cuda"), pixel_mask.to("cuda")) # 3. assert equivalence for key in cpu_outputs.keys(): assert torch.allclose(cpu_outputs[key], gpu_outputs[key].cpu(), atol=1e-4) expected_logits = torch.tensor( [[-9.9051, -4.2541, -6.4852], [-9.6947, -4.0854, -6.8033], [-10.0665, -5.8470, -7.7003]] ) assert torch.allclose(cpu_outputs.logits[0, :3, :3], expected_logits, atol=1e-4)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/deformable_detr/test_image_processing_deformable_detr.py
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class DeformableDetrImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], do_rescale=True, rescale_factor=1 / 255, do_pad=True, ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p size = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_pad = do_pad def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def get_expected_values(self, image_inputs, batched=False): """ This function computes the expected height and width when providing images to DeformableDetrImageProcessor, assuming do_resize is set to True with a scalar size. """ if not batched: image = image_inputs[0] if isinstance(image, Image.Image): w, h = image.size else: h, w = image.shape[1], image.shape[2] if w < h: expected_height = int(self.size["shortest_edge"] * h / w) expected_width = self.size["shortest_edge"] elif w > h: expected_height = self.size["shortest_edge"] expected_width = int(self.size["shortest_edge"] * w / h) else: expected_height = self.size["shortest_edge"] expected_width = self.size["shortest_edge"] else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width @require_torch @require_vision class DeformableDetrImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = DeformableDetrImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = DeformableDetrImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "do_pad")) self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 1333}) self.assertEqual(image_processor.do_pad, True) image_processor = self.image_processing_class.from_dict( self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=False ) self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 84}) self.assertEqual(image_processor.do_pad, False) def test_batch_feature(self): pass def test_call_pil(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs, batched=True) encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_call_numpy(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs, batched=True) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_call_pytorch(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs, batched=True) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) @slow def test_call_pytorch_with_coco_detection_annotations(self): # prepare image and target image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt", "r") as f: target = json.loads(f.read()) target = {"image_id": 39769, "annotations": target} # encode them image_processing = DeformableDetrImageProcessor() encoding = image_processing(images=image, annotations=target, return_tensors="pt") # verify pixel values expected_shape = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4)) # verify area expected_area = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area)) # verify boxes expected_boxes_shape = torch.Size([6, 4]) self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape) expected_boxes_slice = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3)) # verify image_id expected_image_id = torch.tensor([39769]) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id)) # verify is_crowd expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd)) # verify class_labels expected_class_labels = torch.tensor([75, 75, 63, 65, 17, 17]) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels)) # verify orig_size expected_orig_size = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size)) # verify size expected_size = torch.tensor([800, 1066]) self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size)) @slow def test_call_pytorch_with_coco_panoptic_annotations(self): # prepare image, target and masks_path image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt", "r") as f: target = json.loads(f.read()) target = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target} masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic") # encode them image_processing = DeformableDetrImageProcessor(format="coco_panoptic") encoding = image_processing(images=image, annotations=target, masks_path=masks_path, return_tensors="pt") # verify pixel values expected_shape = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4)) # verify area expected_area = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area)) # verify boxes expected_boxes_shape = torch.Size([6, 4]) self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape) expected_boxes_slice = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3)) # verify image_id expected_image_id = torch.tensor([39769]) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id)) # verify is_crowd expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd)) # verify class_labels expected_class_labels = torch.tensor([17, 17, 63, 75, 75, 93]) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels)) # verify masks expected_masks_sum = 822873 self.assertEqual(encoding["labels"][0]["masks"].sum().item(), expected_masks_sum) # verify orig_size expected_orig_size = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size)) # verify size expected_size = torch.tensor([800, 1066]) self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size))
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/musicgen/test_modeling_musicgen.py
# coding=utf-8 # Copyright 2021, The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Musicgen model. """ import copy import inspect import math import unittest import numpy as np from transformers import ( EncodecConfig, MusicgenConfig, MusicgenDecoderConfig, MusicgenProcessor, PretrainedConfig, T5Config, ) from transformers.testing_utils import is_torch_available, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MusicgenForCausalLM, MusicgenForConditionalGeneration, MusicgenModel, set_seed, ) from transformers.generation import ( GreedySearchDecoderOnlyOutput, GreedySearchEncoderDecoderOutput, InfNanRemoveLogitsProcessor, LogitsProcessorList, SampleDecoderOnlyOutput, SampleEncoderDecoderOutput, ) def _config_zero_init(config): configs_no_init = copy.deepcopy(config) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(configs_no_init, key, 1e-10) if isinstance(getattr(configs_no_init, key, None), PretrainedConfig): no_init_subconfig = _config_zero_init(getattr(configs_no_init, key)) setattr(configs_no_init, key, no_init_subconfig) return configs_no_init def prepare_musicgen_decoder_inputs_dict( config, input_ids, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = input_ids.reshape(-1, config.num_codebooks, input_ids.shape[-1])[:, 0, :] attention_mask = attention_mask.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.num_hidden_layers, config.num_attention_heads, device=torch_device) if encoder_attention_mask is None and encoder_hidden_states is not None: encoder_attention_mask = torch.ones(encoder_hidden_states.shape[:2], device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones(config.num_hidden_layers, config.num_attention_heads, device=torch_device) return { "input_ids": input_ids, "attention_mask": attention_mask, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, "head_mask": head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class MusicgenDecoderTester: def __init__( self, parent, batch_size=2, seq_length=7, is_training=False, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=100, pad_token_id=99, bos_token_id=99, num_codebooks=4, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.num_codebooks = num_codebooks def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size * self.num_codebooks, self.seq_length], self.vocab_size) encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) config = self.get_config() inputs_dict = prepare_musicgen_decoder_inputs_dict( config, input_ids, encoder_hidden_states=encoder_hidden_states ) return config, inputs_dict def get_config(self): config = MusicgenDecoderConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, d_ff=self.intermediate_size, pad_token_id=self.pad_token_id, decoder_start_token_id=self.bos_token_id, bos_token_id=self.bos_token_id, num_codebooks=self.num_codebooks, tie_word_embeddings=False, ) return config def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict @require_torch class MusicgenDecoderTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (MusicgenModel, MusicgenForCausalLM) if is_torch_available() else () greedy_sample_model_classes = ( (MusicgenForCausalLM,) if is_torch_available() else () ) # we don't want to run all the generation tests, only a specific subset pipeline_model_mapping = {} test_pruning = False test_resize_embeddings = False def setUp(self): self.model_tester = MusicgenDecoderTester(self) self.config_tester = ConfigTester(self, config_class=MusicgenDecoderConfig, hidden_size=16) def test_config(self): self.config_tester.run_common_tests() # override since we have to compute the input embeddings over codebooks def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) input_ids = inputs["input_ids"] del inputs["input_ids"] embed_tokens = model.get_input_embeddings() input_ids = input_ids.reshape(-1, config.num_codebooks, input_ids.shape[-1]) inputs["inputs_embeds"] = sum( [embed_tokens[codebook](input_ids[:, codebook]) for codebook in range(config.num_codebooks)] ) with torch.no_grad(): model(**inputs)[0] # override since we have embeddings / LM heads over multiple codebooks def test_model_common_attributes(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) first_embed = model.get_input_embeddings()[0] self.assertIsInstance(first_embed, torch.nn.Embedding) lm_heads = model.get_output_embeddings() self.assertTrue(lm_heads is None or isinstance(lm_heads[0], torch.nn.Linear)) # skip as this model doesn't support all arguments tested def test_model_outputs_equivalence(self): pass # skip as this model has multiple inputs embeds and lm heads that should not be tied def test_tie_model_weights(self): pass # skip as this model has multiple inputs embeds and lm heads that should not be tied def test_tied_weights_keys(self): pass def _get_input_ids_and_config(self, batch_size=2): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict["input_ids"] # take max batch_size sequence_length = input_ids.shape[-1] input_ids = input_ids[: batch_size * config.num_codebooks, :] # generate max 3 tokens max_length = input_ids.shape[-1] + 3 attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long) return config, input_ids, attention_mask, max_length @staticmethod def _get_logits_processor_and_kwargs( input_length, eos_token_id, forced_bos_token_id=None, forced_eos_token_id=None, max_length=None, diversity_penalty=None, ): process_kwargs = { "min_length": input_length + 1 if max_length is None else max_length - 1, } logits_processor = LogitsProcessorList() return process_kwargs, logits_processor # override since we don't expect the outputs of `.generate` and `.greedy_search` to be the same, since we perform # additional post-processing in the former def test_greedy_generate_dict_outputs(self): for model_class in self.greedy_sample_model_classes: # disable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() config.use_cache = False model = model_class(config).to(torch_device).eval() output_greedy, output_generate = self._greedy_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertIsInstance(output_greedy, GreedySearchDecoderOnlyOutput) self.assertIsInstance(output_generate, GreedySearchDecoderOnlyOutput) self.assertNotIn(config.pad_token_id, output_generate) # override since we don't expect the outputs of `.generate` and `.greedy_search` to be the same, since we perform # additional post-processing in the former def test_greedy_generate_dict_outputs_use_cache(self): for model_class in self.greedy_sample_model_classes: # enable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() output_greedy, output_generate = self._greedy_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertIsInstance(output_greedy, GreedySearchDecoderOnlyOutput) self.assertIsInstance(output_generate, GreedySearchDecoderOnlyOutput) # override since we don't expect the outputs of `.generate` and `.sample` to be the same, since we perform # additional post-processing in the former def test_sample_generate(self): for model_class in self.greedy_sample_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() model = model_class(config).to(torch_device).eval() process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=2) # check `generate()` and `sample()` are equal output_sample, output_generate = self._sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=3, logits_processor=logits_processor, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, process_kwargs=process_kwargs, ) self.assertIsInstance(output_sample, torch.Tensor) self.assertIsInstance(output_generate, torch.Tensor) # override since we don't expect the outputs of `.generate` and `.sample` to be the same, since we perform # additional post-processing in the former def test_sample_generate_dict_output(self): for model_class in self.greedy_sample_model_classes: # disable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() config.use_cache = False model = model_class(config).to(torch_device).eval() process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1) output_sample, output_generate = self._sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=1, logits_processor=logits_processor, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, process_kwargs=process_kwargs, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertIsInstance(output_sample, SampleDecoderOnlyOutput) self.assertIsInstance(output_generate, SampleDecoderOnlyOutput) def prepare_musicgen_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.reshape( -1, config.decoder.num_codebooks, decoder_input_ids.shape[-1] )[:, 0, :] decoder_attention_mask = decoder_attention_mask.ne(config.decoder.pad_token_id) if head_mask is None: head_mask = torch.ones( config.text_encoder.num_hidden_layers, config.text_encoder.num_attention_heads, device=torch_device ) if decoder_head_mask is None: decoder_head_mask = torch.ones( config.decoder.num_hidden_layers, config.decoder.num_attention_heads, device=torch_device ) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones( config.decoder.num_hidden_layers, config.decoder.num_attention_heads, device=torch_device ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class MusicgenTester: def __init__( self, parent, batch_size=2, seq_length=7, is_training=False, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=100, pad_token_id=99, bos_token_id=99, num_codebooks=4, num_filters=4, codebook_size=128, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.num_codebooks = num_codebooks self.num_filters = num_filters self.codebook_size = codebook_size def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) decoder_input_ids = ids_tensor([self.batch_size * self.num_codebooks, self.seq_length], self.vocab_size) config = self.get_config() inputs_dict = prepare_musicgen_inputs_dict(config, input_ids, decoder_input_ids=decoder_input_ids) return config, inputs_dict def get_config(self): text_encoder_config = T5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.intermediate_size, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, ) audio_encoder_config = EncodecConfig( hidden_size=self.vocab_size, compress=1, num_filters=self.num_filters, codebook_size=self.codebook_size, codebook_dim=self.vocab_size, ) decoder_config = MusicgenDecoderConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, ffn_dim=self.intermediate_size, pad_token_id=self.pad_token_id, decoder_start_token_id=self.bos_token_id, bos_token_id=self.bos_token_id, num_codebooks=self.num_codebooks, tie_word_embeddings=False, ) config = MusicgenConfig.from_sub_models_config(text_encoder_config, audio_encoder_config, decoder_config) return config def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict @require_torch class MusicgenTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (MusicgenForConditionalGeneration,) if is_torch_available() else () greedy_sample_model_classes = (MusicgenForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = {} test_pruning = False # training is not supported yet for MusicGen test_headmasking = False test_resize_embeddings = False # not to test torchscript as the model tester doesn't prepare `input_values` and `padding_mask` # (and `torchscript` hates `None` values). test_torchscript = False def setUp(self): self.model_tester = MusicgenTester(self) def _check_output_with_attentions(self, outputs, config, input_ids, decoder_input_ids): text_encoder_config = config.text_encoder decoder_config = config.decoder encoder_attentions = outputs["encoder_attentions"] self.assertEqual(len(encoder_attentions), text_encoder_config.num_hidden_layers) self.assertEqual( encoder_attentions[0].shape[-3:], (text_encoder_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), ) decoder_attentions = outputs["decoder_attentions"] num_decoder_layers = decoder_config.num_hidden_layers self.assertEqual(len(decoder_attentions), num_decoder_layers) self.assertEqual( decoder_attentions[0].shape[-3:], (decoder_config.num_attention_heads, decoder_input_ids.shape[-1], decoder_input_ids.shape[-1]), ) cross_attentions = outputs["cross_attentions"] self.assertEqual(len(cross_attentions), num_decoder_layers) cross_attention_input_seq_len = decoder_input_ids.shape[-1] self.assertEqual( cross_attentions[0].shape[-3:], (decoder_config.num_attention_heads, cross_attention_input_seq_len, input_ids.shape[-1]), ) def check_musicgen_model_output_attentions( self, model_class, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs, ): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, output_attentions=True, **kwargs, ) self._check_output_with_attentions(outputs, config, input_ids, decoder_input_ids) def check_musicgen_model_output_attentions_from_config( self, model_class, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs, ): # Similar to `check_musicgen_model_output_attentions`, but with `output_attentions` triggered from the # config file. Contrarily to most models, changing the model's config won't work -- the defaults are loaded # from the inner models' configurations. config.output_attentions = True # model config -> won't work model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, **kwargs, ) self.assertTrue( all(key not in outputs for key in ["encoder_attentions", "decoder_attentions", "cross_attentions"]) ) config.text_encoder.output_attentions = True # inner model config -> will work config.audio_encoder.output_attentions = True config.decoder.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, **kwargs, ) self._check_output_with_attentions(outputs, config, input_ids, decoder_input_ids) # override since changing `output_attentions` from the top-level model config won't work def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.check_musicgen_model_output_attentions(model_class, config, **inputs_dict) self.check_musicgen_model_output_attentions_from_config(model_class, config, **inputs_dict) # override since we have a specific forward signature for musicgen def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = [ "input_ids", "attention_mask", "input_values", "padding_mask", "decoder_input_ids", "decoder_attention_mask", ] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) # override since changing `gradient_checkpointing` from the top-level model config won't work def test_gradient_checkpointing_backward_compatibility(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if not model_class.supports_gradient_checkpointing: continue config.text_encoder.gradient_checkpointing = True config.audio_encoder.gradient_checkpointing = True config.decoder.gradient_checkpointing = True model = model_class(config) self.assertTrue(model.is_gradient_checkpointing) # skip as this model has multiple inputs embeds and lm heads that should not be tied def test_tie_model_weights(self): pass # skip as this model has multiple inputs embeds and lm heads that should not be tied def test_tied_model_weights_key_ignore(self): pass # skip as this model has multiple inputs embeds and lm heads that should not be tied def test_tied_weights_keys(self): pass # override since changing `output_hidden_states` / `output_attentions` from the top-level model config won't work def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.text_encoder.output_hidden_states = True config.audio_encoder.output_hidden_states = True config.decoder.output_hidden_states = True config.text_encoder.output_attentions = True config.decoder.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() decoder_hidden_states = outputs.decoder_hidden_states[0] decoder_hidden_states.retain_grad() if self.has_attentions: encoder_attentions = outputs.encoder_attentions[0] encoder_attentions.retain_grad() decoder_attentions = outputs.decoder_attentions[0] decoder_attentions.retain_grad() cross_attentions = outputs.cross_attentions[0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(decoder_hidden_states.grad) if self.has_attentions: self.assertIsNotNone(encoder_attentions.grad) self.assertIsNotNone(decoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) # override since changing `output_hidden_states` from the top-level model config won't work def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states expected_num_layers = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(hidden_states), expected_num_layers) seq_length = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.text_encoder.output_hidden_states = True config.audio_encoder.output_hidden_states = True config.decoder.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) # override since the conv layers and lstm's in encodec are exceptions def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = ["conv"] ignore_init = ["lstm"] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) elif not any(x in name for x in ignore_init): self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # override since we have embeddings / LM heads over multiple codebooks def test_model_common_attributes(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), torch.nn.Embedding) lm_heads = model.get_output_embeddings() self.assertTrue(lm_heads is None or isinstance(lm_heads[0], torch.nn.Linear)) def _get_input_ids_and_config(self, batch_size=2): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict["input_ids"] # take max batch_size sequence_length = input_ids.shape[-1] input_ids = input_ids[:batch_size, :] attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long) # generate max 3 tokens decoder_input_ids = inputs_dict["decoder_input_ids"] max_length = decoder_input_ids.shape[-1] + 3 decoder_input_ids = decoder_input_ids[: batch_size * config.decoder.num_codebooks, :] return config, input_ids, attention_mask, decoder_input_ids, max_length # override since the `input_ids` cannot be used as the `decoder_input_ids` for musicgen (input / outputs are # different modalities -> different shapes) def _greedy_generate( self, model, input_ids, attention_mask, decoder_input_ids, max_length, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], eos_token_id=model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, num_beams=1, max_length=max_length, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, **logits_process_kwargs, **model_kwargs, ) encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_greedy = model.greedy_search( decoder_input_ids, max_length=max_length, logits_processor=logits_processor, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, encoder_outputs=encoder_outputs, **model_kwargs, ) return output_greedy, output_generate # override since the `input_ids` cannot be used as the `decoder_input_ids` for musicgen (input / outputs are # different modalities -> different shapes) def _sample_generate( self, model, input_ids, attention_mask, decoder_input_ids, max_length, num_return_sequences, logits_processor, logits_warper, logits_warper_kwargs, process_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): torch.manual_seed(0) model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=True, num_beams=1, max_length=max_length, num_return_sequences=num_return_sequences, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, **logits_warper_kwargs, **process_kwargs, **model_kwargs, ) torch.manual_seed(0) encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=num_return_sequences, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) # prevent flaky generation test failures logits_processor.append(InfNanRemoveLogitsProcessor()) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_sample = model.sample( decoder_input_ids.repeat_interleave(num_return_sequences, dim=0), max_length=max_length, logits_processor=logits_processor, logits_warper=logits_warper, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, encoder_outputs=encoder_outputs, **model_kwargs, ) return output_sample, output_generate @staticmethod def _get_logits_processor_and_kwargs( input_length, eos_token_id, forced_bos_token_id=None, forced_eos_token_id=None, max_length=None, diversity_penalty=None, ): process_kwargs = { "min_length": input_length + 1 if max_length is None else max_length - 1, } logits_processor = LogitsProcessorList() return process_kwargs, logits_processor def test_greedy_generate_dict_outputs(self): for model_class in self.greedy_sample_model_classes: # disable cache config, input_ids, attention_mask, decoder_input_ids, max_length = self._get_input_ids_and_config() config.use_cache = False model = model_class(config).to(torch_device).eval() output_greedy, output_generate = self._greedy_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, max_length=max_length, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertIsInstance(output_greedy, GreedySearchEncoderDecoderOutput) self.assertIsInstance(output_generate, GreedySearchEncoderDecoderOutput) self.assertNotIn(config.pad_token_id, output_generate) def test_greedy_generate_dict_outputs_use_cache(self): for model_class in self.greedy_sample_model_classes: # enable cache config, input_ids, attention_mask, decoder_input_ids, max_length = self._get_input_ids_and_config() config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() output_greedy, output_generate = self._greedy_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, max_length=max_length, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertIsInstance(output_greedy, GreedySearchEncoderDecoderOutput) self.assertIsInstance(output_generate, GreedySearchEncoderDecoderOutput) def test_sample_generate(self): for model_class in self.greedy_sample_model_classes: config, input_ids, attention_mask, decoder_input_ids, max_length = self._get_input_ids_and_config() model = model_class(config).to(torch_device).eval() process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=2) # check `generate()` and `sample()` are equal output_sample, output_generate = self._sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, max_length=max_length, num_return_sequences=1, logits_processor=logits_processor, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, process_kwargs=process_kwargs, ) self.assertIsInstance(output_sample, torch.Tensor) self.assertIsInstance(output_generate, torch.Tensor) def test_sample_generate_dict_output(self): for model_class in self.greedy_sample_model_classes: # disable cache config, input_ids, attention_mask, decoder_input_ids, max_length = self._get_input_ids_and_config() config.use_cache = False model = model_class(config).to(torch_device).eval() process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1) output_sample, output_generate = self._sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, max_length=max_length, num_return_sequences=3, logits_processor=logits_processor, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, process_kwargs=process_kwargs, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertIsInstance(output_sample, SampleEncoderDecoderOutput) self.assertIsInstance(output_generate, SampleEncoderDecoderOutput) def test_generate_without_input_ids(self): config, _, _, _, max_length = self._get_input_ids_and_config() # if no bos token id => cannot generate from None if config.bos_token_id is None: return for model_class in self.greedy_sample_model_classes: model = model_class(config).to(torch_device) model.eval() output_ids_generate = model.generate(do_sample=False, max_length=max_length, remove_invalid_values=True) self.assertIsNotNone(output_ids_generate) def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.greedy_sample_model_classes: model = model_class(config).eval().to(torch_device) if torch_device == "cuda": model.half() model.generate(**input_dict, max_new_tokens=10) model.generate(**input_dict, do_sample=True, max_new_tokens=10) def get_bip_bip(bip_duration=0.125, duration=0.5, sample_rate=32000): """Produces a series of 'bip bip' sounds at a given frequency.""" timesteps = np.arange(int(duration * sample_rate)) / sample_rate wav = np.cos(2 * math.pi * 440 * timesteps) time_period = (timesteps % (2 * bip_duration)) / (2 * bip_duration) envelope = time_period >= 0.5 return wav * envelope def place_dict_on_device(dict_to_place, device): for key in dict_to_place: if dict_to_place[key] is not None and isinstance(dict_to_place[key], torch.Tensor): dict_to_place[key] = dict_to_place[key].to(device) return dict_to_place @require_torch class MusicgenIntegrationTests(unittest.TestCase): @cached_property def model(self): return MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small").to(torch_device) @cached_property def processor(self): return MusicgenProcessor.from_pretrained("facebook/musicgen-small") @slow def test_logits_text_prompt(self): model = self.model processor = self.processor inputs = processor(text=["80s music", "Club techno"], padding=True, return_tensors="pt") # prepare the encoder inputs input_ids = inputs.input_ids.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) # prepare the decoder inputs pad_token_id = model.generation_config.pad_token_id decoder_input_ids = ( torch.ones((input_ids.shape[0] * model.decoder.num_codebooks, 1), dtype=torch.long).to(torch_device) * pad_token_id ) with torch.no_grad(): logits = model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, ).logits # fmt: off EXPECTED_LOGITS = torch.tensor( [ -0.9708, -3.0149, -4.6415, -1.4754, -0.2786, -2.3523, -2.6049, -6.7467, -1.0206, -3.2984, -3.3968, -1.5108, -1.5786, -3.1493, -1.1503, -0.0545, ] ) # fmt: on self.assertTrue(logits.shape == (*decoder_input_ids.shape, model.decoder.config.vocab_size)) self.assertTrue(torch.allclose(logits[0, 0, :16].cpu(), EXPECTED_LOGITS, atol=1e-4)) @slow def test_logits_text_audio_prompt(self): model = self.model processor = self.processor audio = [get_bip_bip(duration=0.5), get_bip_bip(duration=1.0)] text = ["80s music", "Club techno"] inputs = processor(audio=audio, text=text, padding=True, return_tensors="pt") # prepare the text encoder inputs input_ids = inputs.input_ids.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) # prepare the audio encoder inputs input_values = inputs.input_values.to(torch_device) padding_mask = inputs.padding_mask.to(torch_device) with torch.no_grad(): logits = model( input_ids, attention_mask=attention_mask, input_values=input_values, padding_mask=padding_mask, ).logits # fmt: off EXPECTED_LOGITS = torch.tensor( [ 0.1841, -2.9324, -0.7898, 0.1857, 0.4971, -2.8685, -1.6525, -1.6541, 2.7757, -2.5942, -3.0959, -1.0120, -1.0147, -0.4605, -0.8885, 0.6820, ] ) # fmt: on self.assertTrue(logits.shape == (8, 50, 2048)) self.assertTrue(torch.allclose(logits[0, -1, :16].cpu(), EXPECTED_LOGITS, atol=1e-4)) @slow def test_generate_unconditional_greedy(self): model = self.model # only generate 1 sample with greedy - since it's deterministic all elements of the batch will be the same unconditional_inputs = model.get_unconditional_inputs(num_samples=1) unconditional_inputs = place_dict_on_device(unconditional_inputs, device=torch_device) output_values = model.generate(**unconditional_inputs, do_sample=False, max_new_tokens=5) # fmt: off EXPECTED_VALUES = torch.tensor( [ 0.0056, 0.0064, 0.0063, 0.0054, 0.0042, 0.0033, 0.0024, 0.0015, 0.0015, 0.0010, 0.0004, -0.0012, -0.0036, -0.0055, -0.0067, -0.0071, ] ) # fmt: on self.assertTrue(output_values.shape == (1, 1, 3200)) self.assertTrue(torch.allclose(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, atol=1e-4)) @slow def test_generate_unconditional_sampling(self): model = self.model # for stochastic sampling we can generate multiple outputs unconditional_inputs = model.get_unconditional_inputs(num_samples=2) unconditional_inputs = place_dict_on_device(unconditional_inputs, device=torch_device) set_seed(0) output_values = model.generate(**unconditional_inputs, do_sample=True, max_new_tokens=10) # fmt: off EXPECTED_VALUES = torch.tensor( [ 0.0765, 0.0758, 0.0749, 0.0759, 0.0759, 0.0771, 0.0775, 0.0760, 0.0762, 0.0765, 0.0767, 0.0760, 0.0738, 0.0714, 0.0713, 0.0730, ] ) # fmt: on self.assertTrue(output_values.shape == (2, 1, 4480)) self.assertTrue(torch.allclose(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, atol=1e-4)) @slow def test_generate_text_prompt_greedy(self): model = self.model processor = self.processor inputs = processor(text=["80s music", "Club techno"], padding=True, return_tensors="pt") # prepare the encoder inputs input_ids = inputs.input_ids.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) output_values = model.generate( input_ids, attention_mask=attention_mask, do_sample=False, guidance_scale=None, max_new_tokens=10 ) # fmt: off EXPECTED_VALUES = torch.tensor( [ -1.1998e-04, -2.2302e-04, 4.6296e-04, 1.0524e-03, 2.4827e-04, -4.0288e-05, -1.2468e-04, 4.9846e-05, 7.1485e-04, 4.4197e-04, ] ) # fmt: on self.assertTrue(output_values.shape == (2, 1, 4480)) self.assertTrue(torch.allclose(output_values[0, 0, :10].cpu(), EXPECTED_VALUES, atol=1e-4)) @slow def test_generate_text_prompt_greedy_with_classifier_free_guidance(self): model = self.model processor = self.processor inputs = processor(text=["80s music", "Club techno"], padding=True, return_tensors="pt") # prepare the encoder inputs input_ids = inputs.input_ids.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) output_values = model.generate( input_ids, attention_mask=attention_mask, do_sample=False, guidance_scale=3, max_new_tokens=10 ) # fmt: off EXPECTED_VALUES = torch.tensor( [ 0.0283, 0.0246, 0.0650, 0.0640, 0.0599, 0.0711, 0.0420, 0.0112, 0.0511, 0.0746, 0.1363, 0.1213, 0.0185, -0.0578, -0.0908, 0.0443, ] ) # fmt: on self.assertTrue(output_values.shape == (2, 1, 4480)) self.assertTrue(torch.allclose(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, atol=1e-4)) @slow def test_generate_text_prompt_sampling(self): model = self.model processor = self.processor inputs = processor(text=["80s music", "Club techno"], padding=True, return_tensors="pt") # prepare the encoder inputs input_ids = inputs.input_ids.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) set_seed(0) output_values = model.generate( input_ids, attention_mask=attention_mask, do_sample=True, guidance_scale=None, max_new_tokens=10 ) # fmt: off EXPECTED_VALUES = torch.tensor( [ -0.0047, -0.0094, -0.0028, -0.0018, -0.0057, -0.0007, -0.0104, -0.0211, -0.0097, -0.0150, -0.0066, -0.0004, -0.0201, -0.0325, -0.0326, -0.0098, ] ) # fmt: on self.assertTrue(output_values.shape == (2, 1, 4480)) self.assertTrue(torch.allclose(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, atol=1e-4)) @slow def test_generate_text_audio_prompt(self): model = self.model processor = self.processor audio = [get_bip_bip(duration=0.5), get_bip_bip(duration=1.0)] text = ["80s music", "Club techno"] inputs = processor(audio=audio, text=text, padding=True, return_tensors="pt") inputs = place_dict_on_device(inputs, device=torch_device) output_values = model.generate(**inputs, do_sample=False, guidance_scale=None, max_new_tokens=10) # fmt: off EXPECTED_VALUES = torch.tensor( [ -0.0036, -0.0130, -0.0261, -0.0384, -0.0557, -0.0718, -0.0680, -0.0632, -0.0529, -0.0403, -0.0289, -0.0198, -0.0136, -0.0101, -0.0095, -0.0040, ] ) # fmt: on self.assertTrue( output_values.shape == (2, 1, 36480) ) # input values take shape 32000 and we generate from there self.assertTrue(torch.allclose(output_values[0, 0, -16:].cpu(), EXPECTED_VALUES, atol=1e-4))
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/musicgen/test_processing_musicgen.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the MusicGen processor.""" import random import shutil import tempfile import unittest import numpy as np from transformers import T5Tokenizer, T5TokenizerFast from transformers.testing_utils import require_sentencepiece, require_torch from transformers.utils.import_utils import is_speech_available, is_torch_available if is_torch_available(): pass if is_speech_available(): from transformers import EncodecFeatureExtractor, MusicgenProcessor global_rng = random.Random() def floats_list(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = global_rng values = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values @require_torch @require_sentencepiece class MusicgenProcessorTest(unittest.TestCase): def setUp(self): self.checkpoint = "facebook/musicgen-small" self.tmpdirname = tempfile.mkdtemp() def get_tokenizer(self, **kwargs): return T5Tokenizer.from_pretrained(self.checkpoint, **kwargs) def get_feature_extractor(self, **kwargs): return EncodecFeatureExtractor.from_pretrained(self.checkpoint, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() feature_extractor = self.get_feature_extractor() processor = MusicgenProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) processor.save_pretrained(self.tmpdirname) processor = MusicgenProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer, T5TokenizerFast) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string()) self.assertIsInstance(processor.feature_extractor, EncodecFeatureExtractor) def test_save_load_pretrained_additional_features(self): processor = MusicgenProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") feature_extractor_add_kwargs = self.get_feature_extractor(do_normalize=False, padding_value=1.0) processor = MusicgenProcessor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, T5TokenizerFast) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string()) self.assertIsInstance(processor.feature_extractor, EncodecFeatureExtractor) def test_feature_extractor(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = MusicgenProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(raw_speech, return_tensors="np") input_processor = processor(raw_speech, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = MusicgenProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) input_str = "This is a test string" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_tokenizer_decode(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = MusicgenProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(sequences=predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_model_input_names(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = MusicgenProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) self.assertListEqual( processor.model_input_names, feature_extractor.model_input_names, msg="`processor` and `feature_extractor` model input names do not match", ) def test_decode_audio(self): feature_extractor = self.get_feature_extractor(padding_side="left") tokenizer = self.get_tokenizer() processor = MusicgenProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) raw_speech = [floats_list((1, x))[0] for x in range(5, 20, 5)] padding_mask = processor(raw_speech).padding_mask generated_speech = np.asarray(floats_list((3, 20)))[:, None, :] decoded_audios = processor.batch_decode(generated_speech, padding_mask=padding_mask) self.assertIsInstance(decoded_audios, list) for audio in decoded_audios: self.assertIsInstance(audio, np.ndarray) self.assertTrue(decoded_audios[0].shape == (1, 10)) self.assertTrue(decoded_audios[1].shape == (1, 15)) self.assertTrue(decoded_audios[2].shape == (1, 20))
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/whisper/test_modeling_tf_whisper.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the TensorFlow Whisper model. """ from __future__ import annotations import inspect import tempfile import traceback import unittest import numpy as np from transformers import WhisperConfig, WhisperFeatureExtractor, WhisperProcessor from transformers.testing_utils import is_tf_available, require_tf, require_tokenizers, run_test_in_subprocess, slow from transformers.utils import cached_property from transformers.utils.import_utils import is_datasets_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_datasets_available(): import datasets from datasets import load_dataset if is_tf_available(): import tensorflow as tf from transformers import TFWhisperForConditionalGeneration, TFWhisperModel, set_seed from transformers.models.whisper.modeling_tf_whisper import TFWhisperDecoder, TFWhisperEncoder def prepare_whisper_inputs_dict( config, input_features, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if decoder_attention_mask is None: decoder_attention_mask = tf.where(decoder_input_ids != config.pad_token_id, 1, 0) if head_mask is None: head_mask = tf.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: decoder_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: cross_attn_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_features": input_features, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class TFWhisperModelTester: def __init__( self, parent, batch_size=13, seq_length=60, is_training=True, use_labels=False, vocab_size=200, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, input_channels=1, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, max_source_positions=30, max_target_positions=60, bos_token_id=98, eos_token_id=98, pad_token_id=0, num_mel_bins=80, decoder_start_token_id=85, num_conv_layers=1, suppress_tokens=None, begin_suppress_tokens=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.input_channels = input_channels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_mel_bins = num_mel_bins self.max_position_embeddings = max_position_embeddings self.max_source_positions = max_source_positions self.max_target_positions = max_target_positions self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.decoder_start_token_id = decoder_start_token_id self.num_conv_layers = num_conv_layers self.suppress_tokens = suppress_tokens self.begin_suppress_tokens = begin_suppress_tokens def prepare_config_and_inputs(self): input_features = floats_tensor([self.batch_size, self.num_mel_bins, self.seq_length], self.vocab_size) decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.get_config() inputs_dict = prepare_whisper_inputs_dict( config, attention_mask=None, input_features=input_features, decoder_input_ids=decoder_input_ids, ) return config, inputs_dict def get_config(self): return WhisperConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, input_channels=self.input_channels, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, max_source_positions=self.max_source_positions, max_target_positions=self.max_target_positions, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_ffn_dim=self.hidden_size, encoder_ffn_dim=self.hidden_size, decoder_start_token_id=self.decoder_start_token_id, suppress_tokens=self.suppress_tokens, begin_suppress_tokens=self.begin_suppress_tokens, ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_subsampled_output_lengths(self, input_lengths): """ Computes the output length of the convolutional layers """ for i in range(self.num_conv_layers): input_lengths = (input_lengths - 1) // 2 + 1 return input_lengths def create_and_check_model_forward(self, config, inputs_dict): model = TFWhisperModel(config=config) input_features = inputs_dict["input_features"] decoder_input_ids = inputs_dict["decoder_input_ids"] # first forward pass last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state self.parent.assertTrue(last_hidden_state.shape, (13, 7, 16)) def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = TFWhisperModel(config=config).get_decoder() # take a slice so we're shorter than the seqeuence length and can append later input_ids = inputs_dict["decoder_input_ids"][:, :-10] attention_mask = inputs_dict["decoder_attention_mask"][:, :-10] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_token = ids_tensor((self.batch_size, 3), config.vocab_size) next_tokens = tf.where(next_token <= 2, 2, next_token) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = np.random.randint(0, output_from_past.shape[-1]) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(np.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = TFWhisperModel(config=config) outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = TFWhisperEncoder.from_pretrained(tmpdirname) encoder_last_hidden_state_2 = encoder(inputs_dict["input_features"])[0] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = TFWhisperDecoder.from_pretrained(tmpdirname) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max() < 1e-3) @require_tf class TFWhisperModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFWhisperModel, TFWhisperForConditionalGeneration) if is_tf_available() else () all_generative_model_classes = (TFWhisperForConditionalGeneration,) if is_tf_available() else () pipeline_model_mapping = {"feature-extraction": TFWhisperModel} if is_tf_available() else {} is_encoder_decoder = True fx_compatible = False test_pruning = False test_missing_keys = False test_onnx = False input_name = "input_features" def setUp(self): self.model_tester = TFWhisperModelTester(self) self.config_tester = ConfigTester(self, config_class=WhisperConfig) self.maxDiff = 3000 def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) model.build() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=False) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_model_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def _get_input_ids_and_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict[self.input_name] # cut to half length & take max batch_size 3 max_batch_size = 3 input_ids = input_ids[:max_batch_size, :, :] # generate max 3 tokens max_length = 4 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` config.pad_token_id = config.eos_token_id return config, input_ids, None, max_length # not implemented currently def test_inputs_embeds(self): pass @unittest.skip("Training is not yet supported") def test_training(self): pass def test_generate_with_head_masking(self): pass @unittest.skip("fp16 is not yet supported for TF models") def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() config.max_target_positions = 400 input_features = input_dict["input_features"] model = TFWhisperForConditionalGeneration(config) model.generate(input_features) model.generate(input_features, num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = [ "input_features", "decoder_input_ids", "decoder_attention_mask", ] expected_arg_names.extend( ["decoder_position_ids", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length else: seq_length = self.model_tester.seq_length subsampled_seq_length = model._get_feat_extract_output_lengths(seq_length) self.assertListEqual( list(hidden_states[0].shape[-2:]), [subsampled_seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_length) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=5e-5, name="outputs", attributes=None): # We override with a slightly higher tol value, as test recently became flaky super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol, name, attributes) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) decoder_key_length = getattr(self.model_tester, "decoder_key_length", encoder_key_length) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) subsampled_encoder_seq_length = model._get_feat_extract_output_lengths(encoder_seq_length) subsampled_encoder_key_length = model._get_feat_extract_output_lengths(encoder_key_length) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) out_len = len(outputs) correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, subsampled_encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) def test_generate_without_input_ids(self): pass @staticmethod def _get_encoder_outputs( model, input_ids, attention_mask, output_attentions=None, output_hidden_states=None, num_interleave=1 ): encoder = model.get_encoder() encoder_outputs = encoder( input_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave( num_interleave, dim=0 ) input_ids = input_ids[:, :, 0] input_ids = tf.zeros_like(input_ids[:, :1], dtype=tf.int64) + tf.convert_to_tensor( [model._get_decoder_start_token_id()] ) attention_mask = None return encoder_outputs, input_ids, attention_mask def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_sequences=1): batch_size, mel, seq_length = input_ids.shape subsampled_seq_length = self.model_tester.get_subsampled_output_lengths(seq_length) num_sequences_in_output = batch_size * num_return_sequences gen_len = ( output.sequences.shape[-1] - 1 if config.is_encoder_decoder else output.sequences.shape[-1] - seq_length ) # scores self._check_scores(num_sequences_in_output, output.scores, length=gen_len, config=config) # Attentions # encoder self._check_encoder_attention_for_generate( output.encoder_attentions, batch_size, config, subsampled_seq_length ) # decoder self._check_attentions_for_generate( num_sequences_in_output, output.decoder_attentions, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) # Hidden States # encoder self._check_encoder_hidden_states_for_generate( output.encoder_hidden_states, batch_size, config, subsampled_seq_length ) # decoder self._check_hidden_states_for_generate( num_sequences_in_output, output.decoder_hidden_states, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) # overwritten from parent due to the inability to work when non-text inputs are not passed AND because the input is # `input_features` def test_lm_head_model_random_no_beam_search_generate(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_features = inputs_dict.get("input_features", None) # iterate over all generative models for model_class in self.all_generative_model_classes: model = model_class(config) if config.bos_token_id is None: # if bos token id is not defined model needs input_features with self.assertRaises(AssertionError): model.generate(do_sample=True, max_length=5) # num_return_sequences = 1 self._check_generated_ids(model.generate(input_features, do_sample=True)) with self.assertRaises(ValueError): # generating multiple sequences when no beam search generation # is not allowed as it would always generate the same sequences model.generate(input_features, do_sample=False, num_return_sequences=2) # num_return_sequences > 1, sample self._check_generated_ids(model.generate(input_features, do_sample=True, num_return_sequences=2)) # check bad words tokens language generation # create list of 1-seq bad token and list of 2-seq of bad tokens bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)] output_tokens = model.generate( input_features, do_sample=True, bad_words_ids=bad_words_ids, num_return_sequences=2 ) # only count generated tokens generated_ids = output_tokens[:, input_features.shape[-1] :] self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids)) # overwritten from parent due to the inability to work when non-text inputs are not passed AND because the input is # `input_features` def test_lm_head_model_random_beam_search_generate(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_features = inputs_dict.get("input_features", None) for model_class in self.all_generative_model_classes: model = model_class(config) if config.bos_token_id is None: # if bos token id is not defined model needs input_ids, num_return_sequences = 1 self._check_generated_ids(model.generate(input_features, do_sample=True, num_beams=2)) with self.assertRaises(ValueError): # generating more sequences than having beams leads is not possible model.generate(input_features, do_sample=False, num_return_sequences=3, num_beams=2) # num_return_sequences > 1, sample self._check_generated_ids( model.generate( input_features, do_sample=True, num_beams=2, num_return_sequences=2, ) ) # num_return_sequences > 1, greedy self._check_generated_ids( model.generate(input_features, do_sample=False, num_beams=2, num_return_sequences=2) ) # check bad words tokens language generation # create list of 1-seq bad token and list of 2-seq of bad tokens bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)] output_tokens = model.generate( input_features, do_sample=False, bad_words_ids=bad_words_ids, num_beams=2, num_return_sequences=2 ) # only count generated tokens generated_ids = output_tokens[:, input_features.shape[-1] :] self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids)) def test_generate_with_prompt_ids_and_task_and_language(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() model = TFWhisperForConditionalGeneration(config) input_features = input_dict["input_features"] prompt_ids = np.arange(5) language = "<|de|>" task = "translate" lang_id = 6 task_id = 7 model.generation_config.__setattr__("lang_to_id", {language: lang_id}) model.generation_config.__setattr__("task_to_id", {task: task_id}) output = model.generate(input_features, max_new_tokens=5, task=task, language=language, prompt_ids=prompt_ids) expected_output_start = [ *prompt_ids.tolist(), model.generation_config.decoder_start_token_id, lang_id, task_id, ] for row in output.numpy().tolist(): self.assertListEqual(row[: len(expected_output_start)], expected_output_start) def test_generate_with_prompt_ids_and_forced_decoder_ids(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() model = TFWhisperForConditionalGeneration(config) input_features = input_dict["input_features"] prompt_ids = np.asarray(range(5)) forced_decoder_ids = [(1, 6), (2, 7), (3, 8)] output = model.generate( input_features, max_new_tokens=5, forced_decoder_ids=forced_decoder_ids, prompt_ids=prompt_ids ) expected_output_start = [ *prompt_ids.tolist(), model.generation_config.decoder_start_token_id, *[token for _rank, token in forced_decoder_ids], ] for row in output.numpy().tolist(): self.assertListEqual(row[: len(expected_output_start)], expected_output_start) def _load_datasamples(num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] def _test_large_logits_librispeech(in_queue, out_queue, timeout): error = None try: _ = in_queue.get(timeout=timeout) set_seed(0) model = TFWhisperModel.from_pretrained("openai/whisper-large") input_speech = _load_datasamples(1) processor = WhisperProcessor.from_pretrained("openai/whisper-large") processed_inputs = processor( audio=input_speech, text="This part of the speech", add_special_tokens=False, return_tensors="tf" ) input_features = processed_inputs.input_features decoder_input_ids = processed_inputs.labels logits = model( input_features, decoder_input_ids=decoder_input_ids, output_hidden_states=False, output_attentions=False, use_cache=False, ) logits = logits.last_hidden_state @ tf.transpose(model.model.decoder.embed_tokens.weights[0]) # fmt: off EXPECTED_LOGITS = tf.convert_to_tensor( [ 2.1382, 0.9381, 4.4671, 3.5589, 2.4022, 3.8576, -0.6521, 2.5472, 1.8301, 1.9957, 2.3432, 1.4678, 0.5459, 2.2597, 1.5179, 2.5357, 1.1624, 0.6194, 1.0757, 1.8259, 2.4076, 1.6601, 2.3503, 1.3376, 1.9891, 1.8635, 3.8931, 5.3699, 4.4772, 3.9184 ] ) # fmt: on unittest.TestCase().assertTrue(np.allclose(logits[0, 0, :30], EXPECTED_LOGITS, atol=1e-4)) except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() def _test_large_generation(in_queue, out_queue, timeout): error = None try: _ = in_queue.get(timeout=timeout) set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-large") input_speech = _load_datasamples(1) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features generated_ids = model.generate( input_features, do_sample=False, max_length=20, language="<|en|>", task="transcribe" ) transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = " Mr. Quilter is the apostle of the middle classes and we are glad" unittest.TestCase().assertEqual(transcript, EXPECTED_TRANSCRIPT) except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() def _test_large_generation_multilingual(in_queue, out_queue, timeout): error = None try: _ = in_queue.get(timeout=timeout) set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-large") ds = load_dataset("common_voice", "ja", split="test", streaming=True) ds = ds.cast_column("audio", datasets.Audio(sampling_rate=16_000)) input_speech = next(iter(ds))["audio"]["array"] input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features generated_ids = model.generate( input_features, do_sample=False, max_length=20, language="<|ja|>", task="transcribe" ) transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = "木村さんに電話を貸してもらいました" unittest.TestCase().assertEqual(transcript, EXPECTED_TRANSCRIPT) generated_ids = model.generate( input_features, do_sample=False, max_length=20, language="<|en|>", task="transcribe" ) transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = " Kimura-san called me." unittest.TestCase().assertEqual(transcript, EXPECTED_TRANSCRIPT) generated_ids = model.generate( input_features, do_sample=False, max_length=20, language="<|ja|>", task="translate" ) transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = " I borrowed a phone from Kimura san" unittest.TestCase().assertEqual(transcript, EXPECTED_TRANSCRIPT) except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() def _test_large_batched_generation(in_queue, out_queue, timeout): error = None try: _ = in_queue.get(timeout=timeout) set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-large") input_speech = _load_datasamples(4) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features generated_ids_1 = model.generate(input_features[0:2], max_length=20) generated_ids_2 = model.generate(input_features[2:4], max_length=20) generated_ids = np.concatenate([generated_ids_1, generated_ids_2]) # fmt: off EXPECTED_IDS = [ [50258, 50358, 50363, 2221, 13, 2326, 388, 391, 307, 264, 50244, 295, 264, 2808, 5359, 293, 321, 366, 5404, 281], [50258, 50358, 50363, 6966, 307, 2221, 13, 2326, 388, 391, 311, 9060, 1570, 1880, 813, 702, 1871, 13, 50257, 50257], [50258, 50358, 50363, 634, 5112, 505, 300, 412, 341, 42729, 3196, 295, 264, 1064, 11, 365, 5272, 293, 12904, 9256], [50258, 50358, 50363, 634, 575, 12525, 22618, 1968, 6144, 35617, 20084, 1756, 311, 589, 307, 534, 10281, 934, 439, 11] ] # fmt: on unittest.TestCase().assertEqual(generated_ids.tolist(), EXPECTED_IDS) # fmt: off EXPECTED_TRANSCRIPT = [ " Mr. Quilter is the apostle of the middle classes and we are glad to", " Nor is Mr. Quilter's manner less interesting than his matter.", " He tells us that at this festive season of the year, with Christmas and roast beef", " He has grave doubts whether Sir Frederick Layton's work is really Greek after all," ] # fmt: on transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) unittest.TestCase().assertListEqual(transcript, EXPECTED_TRANSCRIPT) except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() @require_tf @require_tokenizers class TFWhisperModelIntegrationTests(unittest.TestCase): @cached_property def default_processor(self): return WhisperProcessor.from_pretrained("openai/whisper-base") def _load_datasamples(self, num_samples): return _load_datasamples(num_samples) @slow def test_tiny_logits_librispeech(self): set_seed(0) model = TFWhisperModel.from_pretrained("openai/whisper-tiny") input_speech = self._load_datasamples(1) feature_extractor = WhisperFeatureExtractor() input_features = feature_extractor(input_speech, return_tensors="tf").input_features logits = model( input_features, decoder_input_ids=tf.convert_to_tensor([[50258, 50259, 50359]]), output_hidden_states=False, output_attentions=False, return_dict=False, use_cache=False, ) # fmt: off EXPECTED_LOGITS = tf.convert_to_tensor( [ 2.9892, -6.7607, 5.7348, 3.6096, 0.2152, -5.7321, 4.8855, -1.6407, 0.2823, -1.5718, 10.4269, 3.4427, 0.0219, -8.0612, 3.4784, 8.4246, 4.0575, -2.2864, 11.1084, 0.9963, 0.9884, -8.5154, -3.5469, -9.3713, 0.9786, 3.5435, 7.4850, -5.2579, -1.4366, 10.4841 ] ) # fmt: on self.assertTrue(np.allclose(logits[0][0, 0, :30], EXPECTED_LOGITS, atol=1e-4)) # fmt: off EXPECTED_GENERATION = tf.convert_to_tensor( [ -1.4651, -2.6944, 2.7821, 2.3793, 4.0738, 0.0188, -3.3203, 1.9836, 0.0520, 0.7095, 1.1063, 0.2952, -3.6786, -0.5249, 0.3105, 4.7691, 1.1562, 1.3046, 0.5810, -0.3624, 1.7006, 1.3424, 0.9817, 2.1958, 1.8775, -5.7046, -0.7679, 4.0113, 2.6848, 2.8609 ] ) # fmt: on head_logits = logits[0] @ tf.transpose(model.model.decoder.embed_tokens.weights[0]) self.assertTrue(np.allclose(head_logits[0, 0, :30], EXPECTED_GENERATION, atol=1e-4)) @slow def test_small_en_logits_librispeech(self): set_seed(0) model = TFWhisperModel.from_pretrained("openai/whisper-small.en") input_speech = self._load_datasamples(1) feaure_extractor = WhisperFeatureExtractor() input_features = feaure_extractor(input_speech, return_tensors="tf").input_features logits = model( input_features, decoder_input_ids=tf.convert_to_tensor([[model.config.decoder_start_token_id]]), output_hidden_states=False, output_attentions=False, use_cache=False, ) logits = logits.last_hidden_state @ tf.transpose(model.model.decoder.embed_tokens.weights[0]) # fmt: off EXPECTED_LOGITS = tf.convert_to_tensor( [ -3.6784, -7.7211, -9.5070, -11.9286, -7.6489, -9.7026, -5.6188, -8.0104, -4.6238, -5.1833, -9.0485, -3.4079, -5.4874, -2.6935, -6.3479, -7.3398, -6.9558, -7.6867, -7.4748, -8.3463, -9.9781, -10.8389, -10.3105, -11.7201, -9.7261, -7.1590, -5.9272, -12.4509, -11.1146, -8.1918 ] ) # fmt: on self.assertTrue(np.allclose(logits[0, 0, :30], EXPECTED_LOGITS, atol=1e-4)) @slow def test_large_logits_librispeech(self): run_test_in_subprocess(test_case=self, target_func=_test_large_logits_librispeech, inputs=None) @slow def test_tiny_en_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") model.config.decoder_start_token_id = 50257 input_speech = self._load_datasamples(1) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features generated_ids = model.generate(input_features, num_beams=5, max_length=20) transcript = processor.tokenizer.batch_decode(generated_ids)[0] EXPECTED_TRANSCRIPT = ( "<|startoftranscript|><|notimestamps|> Mr. Quilter is the apostle of the middle" " classes, and we are glad to" ) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_tiny_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") input_speech = self._load_datasamples(1) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features generated_ids = model.generate(input_features, num_beams=5, max_length=20) transcript = processor.tokenizer.decode(generated_ids[0]) EXPECTED_TRANSCRIPT = ( "<|startoftranscript|><|en|><|transcribe|><|notimestamps|> Mr. Quilter is the apostle of the middle" " classes and we are glad" ) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_tiny_xla_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") input_speech = self._load_datasamples(1) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features xla_generate = tf.function(model.generate, jit_compile=True) generated_ids = model.generate(input_features, num_beams=5, max_length=20) generated_ids_xla = xla_generate(input_features, num_beams=5, max_length=20) transcript = processor.tokenizer.decode(generated_ids[0]) transcript_xla = processor.tokenizer.decode(generated_ids_xla[0]) EXPECTED_TRANSCRIPT = ( "<|startoftranscript|><|en|><|transcribe|><|notimestamps|> Mr. Quilter is the apostle of the middle" " classes and we are glad" ) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) self.assertEqual(transcript_xla, EXPECTED_TRANSCRIPT) @slow def test_large_generation(self): run_test_in_subprocess(test_case=self, target_func=_test_large_generation, inputs=None) @slow def test_large_generation_multilingual(self): run_test_in_subprocess(test_case=self, target_func=_test_large_generation_multilingual, inputs=None) @slow def test_large_batched_generation(self): run_test_in_subprocess(test_case=self, target_func=_test_large_batched_generation, inputs=None) @slow def test_tiny_en_batched_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") input_speech = self._load_datasamples(4) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features generated_ids = model.generate(input_features, max_length=20) # fmt: off EXPECTED_LOGITS = tf.convert_to_tensor( [ [50257, 50362, 1770, 13, 2264, 346, 353, 318, 262, 46329, 286, 262, 3504, 6097, 11, 290, 356, 389, 9675, 284], [50257, 50362, 5414, 318, 1770, 13, 2264, 346, 353, 338, 5642, 1342, 3499, 621, 465, 2300, 13, 50256, 50256, 50256], [50257, 50362, 679, 4952, 514, 326, 379, 428, 43856, 1622, 286, 262, 614, 11, 351, 6786, 290, 32595, 12023, 28236], [50257, 50362, 679, 468, 12296, 17188, 1771, 7361, 26113, 18881, 1122, 338, 670, 318, 1107, 8312, 706, 477, 290, 460] ] ) # fmt: on self.assertTrue(np.allclose(generated_ids, EXPECTED_LOGITS)) # fmt: off EXPECTED_TRANSCRIPT = [ " Mr. Quilter is the apostle of the middle classes, and we are glad to", " Nor is Mr. Quilter's manner less interesting than his matter.", " He tells us that at this festive season of the year, with Christmas and roast beef looming", " He has grave doubts whether Sir Frederick Layton's work is really Greek after all and can", ] # fmt: on transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) self.assertListEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_tiny_en_batched_xla_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") input_speech = self._load_datasamples(4) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features xla_generate = tf.function(model.generate, jit_compile=True) generated_ids = model.generate(input_features, max_length=20) generated_ids_xla = xla_generate(input_features, max_length=20) # fmt: off EXPECTED_LOGITS = tf.convert_to_tensor( [ [50257, 50362, 1770, 13, 2264, 346, 353, 318, 262, 46329, 286, 262, 3504, 6097, 11, 290, 356, 389, 9675, 284], [50257, 50362, 5414, 318, 1770, 13, 2264, 346, 353, 338, 5642, 1342, 3499, 621, 465, 2300, 13, 50256, 50256, 50256], [50257, 50362, 679, 4952, 514, 326, 379, 428, 43856, 1622, 286, 262, 614, 11, 351, 6786, 290, 32595, 12023, 28236], [50257, 50362, 679, 468, 12296, 17188, 1771, 7361, 26113, 18881, 1122, 338, 670, 318, 1107, 8312, 706, 477, 290, 460] ] ) # fmt: on self.assertTrue(np.allclose(generated_ids, EXPECTED_LOGITS)) self.assertTrue(np.allclose(generated_ids_xla, EXPECTED_LOGITS)) # fmt: off EXPECTED_TRANSCRIPT = [ " Mr. Quilter is the apostle of the middle classes, and we are glad to", " Nor is Mr. Quilter's manner less interesting than his matter.", " He tells us that at this festive season of the year, with Christmas and roast beef looming", " He has grave doubts whether Sir Frederick Layton's work is really Greek after all and can", ] # fmt: on transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) transcript_xla = processor.batch_decode(generated_ids_xla, skip_special_tokens=True) self.assertListEqual(transcript, EXPECTED_TRANSCRIPT) self.assertListEqual(transcript_xla, EXPECTED_TRANSCRIPT)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/whisper/test_processor_whisper.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import tempfile import unittest import pytest from transformers import WhisperTokenizer, is_speech_available from transformers.testing_utils import require_sentencepiece, require_torch, require_torchaudio from .test_feature_extraction_whisper import floats_list if is_speech_available(): from transformers import WhisperFeatureExtractor, WhisperProcessor TRANSCRIBE = 50358 NOTIMESTAMPS = 50362 @require_torch @require_torchaudio @require_sentencepiece class WhisperProcessorTest(unittest.TestCase): def setUp(self): self.checkpoint = "openai/whisper-small.en" self.tmpdirname = tempfile.mkdtemp() def get_tokenizer(self, **kwargs): return WhisperTokenizer.from_pretrained(self.checkpoint, **kwargs) def get_feature_extractor(self, **kwargs): return WhisperFeatureExtractor.from_pretrained(self.checkpoint, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() feature_extractor = self.get_feature_extractor() processor = WhisperProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) processor.save_pretrained(self.tmpdirname) processor = WhisperProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer, WhisperTokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string()) self.assertIsInstance(processor.feature_extractor, WhisperFeatureExtractor) def test_save_load_pretrained_additional_features(self): processor = WhisperProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") feature_extractor_add_kwargs = self.get_feature_extractor(do_normalize=False, padding_value=1.0) processor = WhisperProcessor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, WhisperTokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string()) self.assertIsInstance(processor.feature_extractor, WhisperFeatureExtractor) def test_feature_extractor(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = WhisperProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(raw_speech, return_tensors="np") input_processor = processor(raw_speech, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = WhisperProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) input_str = "This is a test string" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_tokenizer_decode(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = WhisperProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_model_input_names(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = WhisperProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) self.assertListEqual( processor.model_input_names, feature_extractor.model_input_names, msg="`processor` and `feature_extractor` model input names do not match", ) def test_get_decoder_prompt_ids(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = WhisperProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) forced_decoder_ids = processor.get_decoder_prompt_ids(task="transcribe", no_timestamps=True) self.assertIsInstance(forced_decoder_ids, list) for ids in forced_decoder_ids: self.assertIsInstance(ids, (list, tuple)) expected_ids = [TRANSCRIBE, NOTIMESTAMPS] self.assertListEqual([ids[-1] for ids in forced_decoder_ids], expected_ids) def test_get_prompt_ids(self): processor = WhisperProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) prompt_ids = processor.get_prompt_ids("Mr. Quilter") decoded_prompt = processor.tokenizer.decode(prompt_ids) self.assertListEqual(prompt_ids.tolist(), [50360, 1770, 13, 2264, 346, 353]) self.assertEqual(decoded_prompt, "<|startofprev|> Mr. Quilter") def test_empty_get_prompt_ids(self): processor = WhisperProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) prompt_ids = processor.get_prompt_ids("") decoded_prompt = processor.tokenizer.decode(prompt_ids) self.assertListEqual(prompt_ids.tolist(), [50360, 220]) self.assertEqual(decoded_prompt, "<|startofprev|> ") def test_get_prompt_ids_with_special_tokens(self): processor = WhisperProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) def _test_prompt_error_raised_helper(prompt, special_token): with pytest.raises(ValueError) as excinfo: processor.get_prompt_ids(prompt) expected = f"Encountered text in the prompt corresponding to disallowed special token: {special_token}." self.assertEqual(expected, str(excinfo.value)) _test_prompt_error_raised_helper("<|startofprev|> test", "<|startofprev|>") _test_prompt_error_raised_helper("test <|notimestamps|>", "<|notimestamps|>") _test_prompt_error_raised_helper("test <|zh|> test <|transcribe|>", "<|zh|>")
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/whisper/test_tokenization_whisper.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers.models.whisper import WhisperTokenizer, WhisperTokenizerFast from transformers.models.whisper.tokenization_whisper import _combine_tokens_into_words, _find_longest_common_sequence from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin ES_CODE = 50262 EN_CODE = 50259 END_OF_TRANSCRIPT = 50257 START_OF_TRANSCRIPT = 50258 TRANSLATE = 50358 TRANSCRIBE = 50359 NOTIMESTAMPS = 50363 class WhisperTokenizerTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = WhisperTokenizer rust_tokenizer_class = WhisperTokenizerFast test_rust_tokenizer = True test_sentencepiece = False test_seq2seq = False def setUp(self): super().setUp() tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-tiny") tokenizer.pad_token_id = 50256 tokenizer.pad_token = "<|endoftext|>" tokenizer.save_pretrained(self.tmpdirname) def test_convert_token_and_id(self): """Test ``_convert_token_to_id`` and ``_convert_id_to_token``.""" token = "Where" token_id = 14436 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "!") self.assertEqual(vocab_keys[1], '"') self.assertEqual(vocab_keys[-1], "<|notimestamps|>") self.assertEqual(len(vocab_keys), 50364) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 50258) def test_full_tokenizer(self): tokenizer = WhisperTokenizer.from_pretrained(self.tmpdirname) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["This", "Ġis", "Ġa", "Ġ", "test"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [5723, 307, 257, 220, 31636], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, # fmt: off ['I', 'Ġwas', 'Ġborn', 'Ġin', 'Ġ9', '2000', ',', 'Ġand', 'Ġ', 'this', 'Ġis', 'Ġfals', 'é', '.'], # fmt: on ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual(ids, [40, 390, 4232, 294, 1722, 25743, 11, 293, 220, 11176, 307, 16720, 526, 13]) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, # fmt: off ['I', 'Ġwas', 'Ġborn', 'Ġin', 'Ġ9', '2000', ',', 'Ġand', 'Ġ', 'this', 'Ġis', 'Ġfals', 'é', '.'], # fmt: on ) def test_tokenizer_slow_store_full_signature(self): pass def test_tokenizer_fast_store_full_signature(self): pass def test_special_tokens_initialization(self): # Whisper relies on specific additional special tokens, so we skip this # general test. In particular, this test loads fast tokenizer from slow # tokenizer, and the conversion uses prefix_tokens, where we reference # additional special tokens by specific indices, hence overriding the # list with less tokens leads to out of index error pass @slow def test_tokenizer_integration(self): # fmt: off expected_encoding = {'input_ids': [[50257, 50362, 41762, 364, 357, 36234, 1900, 355, 12972, 13165, 354, 12, 35636, 364, 290, 12972, 13165, 354, 12, 5310, 13363, 12, 4835, 8, 3769, 2276, 12, 29983, 45619, 357, 13246, 51, 11, 402, 11571, 12, 17, 11, 5564, 13246, 38586, 11, 16276, 44, 11, 4307, 346, 33, 861, 11, 16276, 7934, 23029, 329, 12068, 15417, 28491, 357, 32572, 52, 8, 290, 12068, 15417, 16588, 357, 32572, 38, 8, 351, 625, 3933, 10, 2181, 13363, 4981, 287, 1802, 10, 8950, 290, 2769, 48817, 1799, 1022, 449, 897, 11, 9485, 15884, 354, 290, 309, 22854, 37535, 13, 50256], [50257, 50362, 13246, 51, 318, 3562, 284, 662, 12, 27432, 2769, 8406, 4154, 282, 24612, 422, 9642, 9608, 276, 2420, 416, 26913, 21143, 319, 1111, 1364, 290, 826, 4732, 287, 477, 11685, 13, 50256], [50257, 50362, 464, 2068, 7586, 21831, 18045, 625, 262, 16931, 3290, 13, 50256]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="openai/whisper-tiny.en", padding=False ) def test_output_offsets(self): tokenizer = self.get_tokenizer() previous_sequence = [51492, 406, 3163, 1953, 466, 13, 51612, 51612] self.assertEqual( tokenizer.decode(previous_sequence, output_offsets=True), { "text": " not worth thinking about.", "offsets": [{"text": " not worth thinking about.", "timestamp": (22.56, 24.96)}], }, ) # Merge when the previous sequence is a suffix of the next sequence # fmt: off next_sequences_1 = [50364, 295, 6177, 3391, 11, 19817, 3337, 507, 307, 406, 3163, 1953, 466, 13, 50614, 50614, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50834, 50257] # fmt: on self.assertEqual( tokenizer.decode(next_sequences_1, output_offsets=True), { "text": ( " of spectators, retrievality is not worth thinking about. His instant panic was followed by a" " small, sharp blow high on his chest.<|endoftext|>" ), "offsets": [ {"text": " of spectators, retrievality is not worth thinking about.", "timestamp": (0.0, 5.0)}, { "text": " His instant panic was followed by a small, sharp blow high on his chest.", "timestamp": (5.0, 9.4), }, ], }, ) def test_find_longest_common_subsequence(self): previous_sequence = [1, 2, 3] next_sequence = [2, 3, 4, 5] merge = _find_longest_common_sequence([previous_sequence, next_sequence]) self.assertEqual(merge, [1, 2, 3, 4, 5]) # Now previous is larger than next. # We merge what we can and remove the extra right side of the left sequence previous_sequence = [1, 2, 3, 4, 5, 6, 7] next_sequence = [2, 3, 4, 5] merge = _find_longest_common_sequence([previous_sequence, next_sequence]) self.assertEqual(merge, [1, 2, 3, 4, 5]) # Nothing in common previous_sequence = [1, 2, 3] next_sequence = [4, 5, 6] merge = _find_longest_common_sequence([previous_sequence, next_sequence]) self.assertEqual(merge, [1, 2, 3, 4, 5, 6]) # Some errors in the overlap. # We take from previous on the left, from the next on the right of the overlap previous_sequence = [1, 2, 3, 4, 99] next_sequence = [2, 98, 4, 5, 6] merge = _find_longest_common_sequence([previous_sequence, next_sequence]) self.assertEqual(merge, [1, 2, 3, 4, 5, 6]) # We take from previous on the left, from the next on the right of the overlap previous_sequence = [1, 2, 99, 4, 5] next_sequence = [2, 3, 4, 98, 6] merge = _find_longest_common_sequence([previous_sequence, next_sequence]) self.assertEqual(merge, [1, 2, 99, 4, 98, 6]) # This works on 3 sequences seq1 = [1, 2, 3] seq2 = [2, 3, 4] seq3 = [3, 4, 5] merge = _find_longest_common_sequence([seq1, seq2, seq3]) self.assertEqual(merge, [1, 2, 3, 4, 5]) # This works on 3 sequences with errors seq1 = [1, 2, 3, 98, 5] seq2 = [2, 99, 4, 5, 6, 7] seq3 = [4, 97, 6, 7, 8] merge = _find_longest_common_sequence([seq1, seq2, seq3]) self.assertEqual(merge, [1, 2, 3, 4, 5, 6, 7, 8]) def test_skip_special_tokens_skips_prompt_ids(self): tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() # fmt: off encoded_input = [ 50361, 2221, 13, 2326, 388, 391, 50258, 50259, 50359, 50363, 1282, 264, 2674, 9156, 295, 1523, 11, 2221, 13, 2326, 388, 391, 13657, 365, 2681, 21296, 17711, 13, 50257, ] # fmt: on expected_with_special_tokens = "<|startofprev|> Mr. Quilter<|startoftranscript|><|en|><|transcribe|><|notimestamps|> On the general principles of art, Mr. Quilter writes with equal lucidity.<|endoftext|>" expected_without_special_tokens = " On the general principles of art, Mr. Quilter writes with equal lucidity." self.assertEqual(tokenizer.decode(encoded_input, skip_special_tokens=False), expected_with_special_tokens) self.assertEqual(tokenizer.decode(encoded_input, skip_special_tokens=True), expected_without_special_tokens) self.assertEqual(rust_tokenizer.decode(encoded_input, skip_special_tokens=False), expected_with_special_tokens) self.assertEqual( rust_tokenizer.decode(encoded_input, skip_special_tokens=True), expected_without_special_tokens ) def test_skip_special_tokens_with_timestamps(self): tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() # fmt: off encoded_input = [ 50258, 50363, 50364, 634, 575, 12525, 22618, 1968, 6144, 35617, 20084, 1756, 311, 589, 307, 534, 10281, 934, 439, 293, 50676, 50676, 393, 4411, 294, 309, 457, 707, 295, 33301, 286, 392, 6628, 13, 50836, 50257, ] # fmt: on expected_with_special_tokens = "<|startoftranscript|><|notimestamps|><|0.00|> He has grave doubts whether Sir Frederick Layton's work is really Greek after all and<|6.24|><|6.24|> can discover in it but little of rocky Ithaca.<|9.44|><|endoftext|>" expected_without_special_tokens = "<|0.00|> He has grave doubts whether Sir Frederick Layton's work is really Greek after all and<|6.24|><|6.24|> can discover in it but little of rocky Ithaca.<|9.44|>" self.assertEqual( tokenizer.decode(encoded_input, decode_with_timestamps=True, skip_special_tokens=False), expected_with_special_tokens, ) self.assertEqual( tokenizer.decode(encoded_input, decode_with_timestamps=True, skip_special_tokens=True), expected_without_special_tokens, ) self.assertEqual( rust_tokenizer.decode(encoded_input, decode_with_timestamps=True, skip_special_tokens=False), expected_with_special_tokens, ) self.assertEqual( rust_tokenizer.decode(encoded_input, decode_with_timestamps=True, skip_special_tokens=True), expected_without_special_tokens, ) def test_fast_tokenizer_get_prompt_ids(self): tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() prompt = "This is test prompt text." tokenizer_prompt_ids = tokenizer.get_prompt_ids(prompt) fast_tokenizer_prompt_ids = rust_tokenizer.get_prompt_ids(prompt) self.assertListEqual(tokenizer_prompt_ids.tolist(), fast_tokenizer_prompt_ids.tolist()) def test_combine_tokens_into_words(self): tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() # 'whatever "whatever" said someone, clever!?' encoded_input = [1363, 7969, 503, 1363, 7969, 1, 848, 1580, 11, 13494, 7323] expected_words = ["whatever", ' "whatever"', " said", " someone,", " clever!?"] expected_tokens = [[1363, 7969], [503, 1363, 7969, 1], [848], [1580, 11], [13494, 7323]] expected_indices = [[0, 1], [2, 3, 4, 5], [6], [7, 8], [9, 10]] output = _combine_tokens_into_words(tokenizer, encoded_input) self.assertEqual(expected_words, output[0]) self.assertEqual(expected_tokens, output[1]) self.assertEqual(expected_indices, output[2]) output_rust = _combine_tokens_into_words(rust_tokenizer, encoded_input) self.assertEqual(expected_words, output_rust[0]) self.assertEqual(expected_tokens, output_rust[1]) self.assertEqual(expected_indices, output_rust[2]) class SpeechToTextTokenizerMultilinguialTest(unittest.TestCase): checkpoint_name = "openai/whisper-small.en" @classmethod def setUpClass(cls): cls.tokenizer: WhisperTokenizer = WhisperTokenizer.from_pretrained(cls.checkpoint_name) return cls def test_tokenizer_equivalence(self): text = "다람쥐 헌 쳇바퀴에 타고파" multilingual_tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-tiny", language="korean") monolingual_tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-tiny.en") monolingual_tokens = monolingual_tokenizer.encode(text, add_special_tokens=False) multilingual_tokens = multilingual_tokenizer.encode(text, add_special_tokens=False) assert monolingual_tokenizer.decode(monolingual_tokens) == text assert multilingual_tokenizer.decode(multilingual_tokens) == text assert len(monolingual_tokens) > len(multilingual_tokens) # fmt: off EXPECTED_ENG = [ 46695, 97, 167, 252, 234, 168, 98, 238, 220, 169, 245, 234, 23821, 111, 229, 167, 108, 242, 169, 222, 112, 168, 245, 238, 220, 169, 225, 222, 166, 111, 254, 169, 234, 234 ] EXPECTED_MULTI = [ 9835, 22855, 168, 98, 238, 13431, 234, 43517, 229, 47053, 169, 222, 19086, 19840, 1313, 17974 ] # fmt: on self.assertListEqual(monolingual_tokens, EXPECTED_ENG) self.assertListEqual(multilingual_tokens, EXPECTED_MULTI) def test_tokenizer_special(self): multilingual_tokenizer = WhisperTokenizer.from_pretrained( "openai/whisper-tiny", language="english", task="transcribe" ) text = "Hey! How are you feeling? J'ai l'impression que 郷さん est prêt" multilingual_tokens = multilingual_tokenizer.encode(text) # fmt: off # format: <|startoftranscript|> <|lang-id|> <|task|> <|notimestamps|> ... transcription ids ... <|endoftext|> EXPECTED_MULTI = [ START_OF_TRANSCRIPT, EN_CODE, TRANSCRIBE, NOTIMESTAMPS, 7057, 0, 1012, 366, 291, 2633, 30, 508, 6, 1301, 287, 6, 36107, 631, 220, 11178, 115, 15567, 871, 44393, END_OF_TRANSCRIPT ] EXPECTED_SPECIAL_TEXT = ( "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>Hey! How are you feeling? " "J'ai l'impression que 郷さん est prêt<|endoftext|>" ) # fmt: on self.assertListEqual(multilingual_tokens, EXPECTED_MULTI) special_transcript = multilingual_tokenizer.decode(multilingual_tokens, skip_special_tokens=False) self.assertEqual(special_transcript, EXPECTED_SPECIAL_TEXT) transcript = multilingual_tokenizer.decode(multilingual_tokens, skip_special_tokens=True) self.assertEqual(transcript, text) def test_vocab_size(self): self.assertEqual(self.tokenizer.vocab_size, 50257) # Copied from transformers.tests.speech_to_test.test_tokenization_speech_to_text.py def test_tokenizer_decode_ignores_language_codes(self): self.assertIn(ES_CODE, self.tokenizer.all_special_ids) generated_ids = [ES_CODE, 4, 1601, 47, 7647, 2] result = self.tokenizer.decode(generated_ids, skip_special_tokens=True) expected_spanish = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=True) self.assertEqual(result, expected_spanish) self.assertNotIn(self.tokenizer.eos_token, result) def test_batch_encoding(self): multilingual_tokenizer = WhisperTokenizer.from_pretrained( "openai/whisper-tiny", language="spanish", task="translate" ) batch = ["El gato ", "El gato se sentó"] batch_output = multilingual_tokenizer.batch_encode_plus(batch, padding=True).input_ids # fmt: off EXPECTED_MULTI = [ [START_OF_TRANSCRIPT, ES_CODE, TRANSLATE, NOTIMESTAMPS, 17356, 290, 2513, 220, END_OF_TRANSCRIPT, END_OF_TRANSCRIPT, END_OF_TRANSCRIPT], [START_OF_TRANSCRIPT, ES_CODE, TRANSLATE, NOTIMESTAMPS, 17356, 290, 2513, 369, 2279, 812, END_OF_TRANSCRIPT] ] # fmt: on self.assertListEqual(batch_output, EXPECTED_MULTI) def test_set_prefix_tokens(self): multilingual_tokenizer = WhisperTokenizer.from_pretrained( "openai/whisper-tiny", language="spanish", task="translate" ) # change the language prefix token from Spanish to English multilingual_tokenizer.set_prefix_tokens(language="english") batch = ["the cat", "the cat sat"] batch_output = multilingual_tokenizer.batch_encode_plus(batch, padding=True).input_ids # fmt: off EXPECTED_MULTI = [ [START_OF_TRANSCRIPT, EN_CODE, TRANSLATE, NOTIMESTAMPS, 3322, 3857, END_OF_TRANSCRIPT, END_OF_TRANSCRIPT], [START_OF_TRANSCRIPT, EN_CODE, TRANSLATE, NOTIMESTAMPS, 3322, 3857, 3227, END_OF_TRANSCRIPT] ] # fmt: on self.assertListEqual(batch_output, EXPECTED_MULTI) def test_batch_encoding_decoding(self): multilingual_tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-tiny", language="spanish") batch = ["hola güey", "que onda"] batch_encoding = multilingual_tokenizer.batch_encode_plus(batch, padding=True).input_ids transcription = multilingual_tokenizer.batch_decode(batch_encoding, skip_special_tokens=True) self.assertListEqual(batch, transcription) def test_offset_decoding(self): multilingual_tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-tiny") # fmt: off INPUT_TOKENS = [ 50258, 50259, 50359, 50364, 441, 1857, 4174, 11, 5242, 366, 257, 1333, 295, 493, 2794, 2287, 293, 12018, 14880, 11, 293, 25730, 311, 454, 34152, 4496, 904, 50724, 50724, 366, 382, 4048, 382, 257, 361, 18459, 13065, 13, 2221, 13, 7145, 74, 325, 38756, 311, 29822, 7563, 412, 472, 709, 294, 264, 51122, 51122, 912, 636, 300, 2221, 13, 2741, 5767, 1143, 281, 7319, 702, 7798, 13, 400, 2221, 13, 2619, 4004, 811, 2709, 702, 51449, 51449, 50257 ] # fmt: on output = multilingual_tokenizer.decode(INPUT_TOKENS, output_offsets=True)["offsets"] self.assertEqual( output, [ { "text": ( " Lennils, pictures are a sort of upguards and atom paintings, and Mason's exquisite idles" ), "timestamp": (0.0, 7.2), }, { "text": ( " are as national as a jingo poem. Mr. Birkut Foster's landscapes smile at one much in the" ), "timestamp": (7.2, 15.16), }, { "text": " same way that Mr. Carker used to flash his teeth. And Mr. John Colier gives his", "timestamp": (15.16, 21.7), }, ], ) # test `decode_with_offsets` output = multilingual_tokenizer.decode(INPUT_TOKENS, decode_with_timestamps=True) self.assertEqual( output, "<|startoftranscript|><|en|><|transcribe|><|0.00|> Lennils, pictures are a sort of upguards and atom" " paintings, and Mason's exquisite idles<|7.20|><|7.20|> are as national as a jingo poem. Mr. Birkut" " Foster's landscapes smile at one much in the<|15.16|><|15.16|> same way that Mr. Carker used to flash" " his teeth. And Mr. John Colier gives his<|21.70|><|21.70|><|endoftext|>", ) # test a single sequence with timestamps # fmt: off INPUT_TOKENS = [ 50364, 441, 1857, 4174, 11, 5242, 366, 257, 1333, 295, 493, 2794, 2287, 293, 12018, 14880, 11, 293, 25730, 311, 454, 34152, 4496, 904, 50724 ] # fmt: on output = multilingual_tokenizer.decode(INPUT_TOKENS, output_offsets=True)["offsets"] self.assertEqual( output[0], { "text": " Lennils, pictures are a sort of upguards and atom paintings, and Mason's exquisite idles", "timestamp": (0.0, 7.2), }, ) # test a sequence without a single timestamps # fmt: off INPUT_TOKENS = [ 441, 1857, 4174, 11, 5242, 366, 257, 1333, 295, 493, 2794, 2287, 293, 12018, 14880, 11, 293, 25730, 311, 454, 34152, 4496, 904, 50724 ] # fmt: on output = multilingual_tokenizer.decode(INPUT_TOKENS, output_offsets=True)["offsets"] self.assertEqual(output, [])
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/whisper/test_feature_extraction_whisper.py
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch global_rng = random.Random() def floats_list(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = global_rng values = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values @require_torch @require_torchaudio class WhisperFeatureExtractionTester(unittest.TestCase): def __init__( self, parent, batch_size=7, min_seq_length=400, max_seq_length=2000, feature_size=10, hop_length=160, chunk_length=8, padding_value=0.0, sampling_rate=4_000, return_attention_mask=False, do_normalize=True, ): self.parent = parent self.batch_size = batch_size self.min_seq_length = min_seq_length self.max_seq_length = max_seq_length self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) self.padding_value = padding_value self.sampling_rate = sampling_rate self.return_attention_mask = return_attention_mask self.do_normalize = do_normalize self.feature_size = feature_size self.chunk_length = chunk_length self.hop_length = hop_length def prepare_feat_extract_dict(self): return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def prepare_inputs_for_common(self, equal_length=False, numpify=False): def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) if equal_length: speech_inputs = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)] else: # make sure that inputs increase in size speech_inputs = [ floats_list((x, self.feature_size)) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) ] if numpify: speech_inputs = [np.asarray(x) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class WhisperFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = WhisperFeatureExtractor if is_speech_available() else None def setUp(self): self.feat_extract_tester = WhisperFeatureExtractionTester(self) def test_feat_extract_from_and_save_pretrained(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: saved_file = feat_extract_first.save_pretrained(tmpdirname)[0] check_json_file_has_correct_format(saved_file) feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() mel_1 = feat_extract_first.mel_filters mel_2 = feat_extract_second.mel_filters self.assertTrue(np.allclose(mel_1, mel_2)) self.assertEqual(dict_first, dict_second) def test_feat_extract_to_json_file(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "feat_extract.json") feat_extract_first.to_json_file(json_file_path) feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() mel_1 = feat_extract_first.mel_filters mel_2 = feat_extract_second.mel_filters self.assertTrue(np.allclose(mel_1, mel_2)) self.assertEqual(dict_first, dict_second) def test_call(self): # Tests that all call wrap to encode_plus and batch_encode_plus feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) # create three inputs of length 800, 1000, and 1200 speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] # Test feature size input_features = feature_extractor(np_speech_inputs, padding="max_length", return_tensors="np").input_features self.assertTrue(input_features.ndim == 3) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size) # Test not batched input encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").input_features self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) # Test batched encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) # Test 2-D numpy arrays are batched. speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)] np_speech_inputs = np.asarray(speech_inputs) encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) # Test truncation required speech_inputs = [floats_list((1, x))[0] for x in range(200, (feature_extractor.n_samples + 500), 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] speech_inputs_truncated = [x[: feature_extractor.n_samples] for x in speech_inputs] np_speech_inputs_truncated = [np.asarray(speech_input) for speech_input in speech_inputs_truncated] encoded_sequences_1 = feature_extractor(np_speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs_truncated, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) def test_double_precision_pad(self): import torch feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) np_speech_inputs = np.random.rand(100, 32).astype(np.float64) py_speech_inputs = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: np_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="np") self.assertTrue(np_processed.input_features.dtype == np.float32) pt_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt") self.assertTrue(pt_processed.input_features.dtype == torch.float32) def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_integration(self): # fmt: off EXPECTED_INPUT_FEATURES = torch.tensor( [ 0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951, 0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678, 0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554, -0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854 ] ) # fmt: on input_speech = self._load_datasamples(1) feature_extractor = WhisperFeatureExtractor() input_features = feature_extractor(input_speech, return_tensors="pt").input_features self.assertEqual(input_features.shape, (1, 80, 3000)) self.assertTrue(torch.allclose(input_features[0, 0, :30], EXPECTED_INPUT_FEATURES, atol=1e-4)) def test_zero_mean_unit_variance_normalization_trunc_np_longest(self): feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) audio = self._load_datasamples(1)[0] audio = ((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue audio = feat_extract.zero_mean_unit_var_norm([audio], attention_mask=None)[0] self.assertTrue(np.all(np.mean(audio) < 1e-3)) self.assertTrue(np.all(np.abs(np.var(audio) - 1) < 1e-3))
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/whisper/test_modeling_flax_whisper.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import inspect import tempfile import unittest import transformers from transformers import WhisperConfig, is_flax_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, slow from transformers.utils import cached_property from transformers.utils.import_utils import is_datasets_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_datasets_available(): import datasets from datasets import load_dataset if is_flax_available(): import jax import numpy as np from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import ( FLAX_MODEL_MAPPING, FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, WhisperFeatureExtractor, WhisperProcessor, ) from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model @require_flax class FlaxWhisperModelTester: config_cls = WhisperConfig config_updates = {} hidden_act = "gelu" def __init__( self, parent, batch_size=13, seq_length=60, is_training=True, use_labels=False, vocab_size=99, d_model=16, decoder_attention_heads=4, decoder_ffn_dim=16, decoder_layers=2, encoder_attention_heads=4, encoder_ffn_dim=16, encoder_layers=2, input_channels=1, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=70, max_source_positions=30, max_target_positions=40, bos_token_id=98, eos_token_id=98, pad_token_id=0, num_mel_bins=80, decoder_start_token_id=85, num_conv_layers=1, suppress_tokens=None, begin_suppress_tokens=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.d_model = d_model self.hidden_size = d_model self.num_hidden_layers = encoder_layers self.num_attention_heads = encoder_attention_heads self.decoder_attention_heads = decoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.encoder_attention_heads = encoder_attention_heads self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_seq_length = seq_length // 2 self.decoder_seq_length = 1 self.input_channels = input_channels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_mel_bins = num_mel_bins self.max_position_embeddings = max_position_embeddings self.max_source_positions = max_source_positions self.max_target_positions = max_target_positions self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.decoder_start_token_id = decoder_start_token_id self.num_conv_layers = num_conv_layers self.suppress_tokens = suppress_tokens self.begin_suppress_tokens = begin_suppress_tokens def prepare_config_and_inputs_for_common(self): input_features = floats_tensor([self.batch_size, self.num_mel_bins, self.seq_length], self.vocab_size) decoder_input_ids = np.array(self.batch_size * [[self.decoder_start_token_id]]) config = WhisperConfig( vocab_size=self.vocab_size, num_mel_bins=self.num_mel_bins, decoder_start_token_id=self.decoder_start_token_id, is_encoder_decoder=True, activation_function=self.hidden_act, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_source_positions=self.max_source_positions, max_target_positions=self.max_target_positions, pad_token_id=self.pad_token_id, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, tie_word_embeddings=True, d_model=self.d_model, decoder_attention_heads=self.decoder_attention_heads, decoder_ffn_dim=self.decoder_ffn_dim, decoder_layers=self.decoder_layers, encoder_attention_heads=self.encoder_attention_heads, encoder_ffn_dim=self.encoder_ffn_dim, encoder_layers=self.encoder_layers, suppress_tokens=self.suppress_tokens, begin_suppress_tokens=self.begin_suppress_tokens, ) inputs_dict = prepare_whisper_inputs_dict(config, input_features, decoder_input_ids) return config, inputs_dict def prepare_whisper_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, ): if decoder_attention_mask is None: decoder_attention_mask = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape, dtype=np.int8), np.not_equal(decoder_input_ids[:, 1:], config.pad_token_id).astype(np.int8), ], axis=-1, ) return { "input_features": input_ids, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } def partialclass(cls, *args, **kwargs): class NewCls(cls): __init__ = functools.partialmethod(cls.__init__, *args, **kwargs) return NewCls def make_partial_class(full_class, *args, **kwargs): partial_class = partialclass(full_class, *args, **kwargs) partial_class.__name__ = full_class.__name__ partial_class.__module__ = full_class.__module__ return partial_class @require_flax class FlaxWhisperModelTest(FlaxModelTesterMixin, unittest.TestCase): all_model_classes = (FlaxWhisperForConditionalGeneration, FlaxWhisperModel) if is_flax_available() else () all_generative_model_classes = (FlaxWhisperForConditionalGeneration,) if is_flax_available() else () is_encoder_decoder = True test_pruning = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = FlaxWhisperModelTester(self) _, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() self.init_shape = (1,) + inputs_dict["input_features"].shape[1:] self.all_model_classes = ( make_partial_class(model_class, input_shape=self.init_shape) for model_class in self.all_model_classes ) self.config_tester = ConfigTester(self, config_class=WhisperConfig) def test_config(self): self.config_tester.run_common_tests() # overwrite because of `input_features` def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_features", "decoder_input_ids"] self.assertListEqual(arg_names[:2], expected_arg_names) # overwrite because of `input_features` def test_jit_compilation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def model_jitted(input_features, decoder_input_ids, **kwargs): return model(input_features=input_features, decoder_input_ids=decoder_input_ids, **kwargs) with self.subTest("JIT Enabled"): jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = model_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def check_pt_flax_outputs(self, fx_outputs, pt_outputs, model_class, tol=5e-5, name="outputs", attributes=None): # We override with a slightly higher tol value, as test recently became flaky super().check_pt_flax_outputs(fx_outputs, pt_outputs, model_class, tol, name, attributes) # overwrite because of `input_features` @is_pt_flax_cross_test def test_save_load_bf16_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = make_partial_class(FLAX_MODEL_MAPPING[config.__class__], input_shape=self.init_shape) for model_class in self.all_model_classes: if model_class.__name__ == base_class.__name__: continue model = model_class(config) model.params = model.to_bf16(model.params) base_params_from_head = flatten_dict(unfreeze(model.params[model.base_model_prefix])) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # overwrite because of `input_features` @is_pt_flax_cross_test def test_save_load_from_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = make_partial_class(FLAX_MODEL_MAPPING[config.__class__], input_shape=self.init_shape) for model_class in self.all_model_classes: if model_class.__name__ == base_class.__name__: continue model = base_class(config) base_params = flatten_dict(unfreeze(model.params)) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, base_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: # save pt model pt_model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname, from_pt=True) base_param_from_head = flatten_dict(unfreeze(head_model.params[head_model.base_model_prefix])) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # overwrite because of `input_features` @is_pt_flax_cross_test def test_save_load_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = make_partial_class(FLAX_MODEL_MAPPING[config.__class__], input_shape=self.init_shape) for model_class in self.all_model_classes: if model_class.__name__ == base_class.__name__: continue model = model_class(config) base_params_from_head = flatten_dict(unfreeze(model.params[model.base_model_prefix])) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # overwrite because of `input_features` def test_save_load_from_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = make_partial_class(FLAX_MODEL_MAPPING[config.__class__], input_shape=self.init_shape) for model_class in self.all_model_classes: if model_class.__name__ == base_class.__name__: continue model = base_class(config) base_params = flatten_dict(unfreeze(model.params)) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname) base_param_from_head = flatten_dict(unfreeze(head_model.params[head_model.base_model_prefix])) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # overwrite because of `input_features` def test_save_load_to_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = make_partial_class(FLAX_MODEL_MAPPING[config.__class__], input_shape=self.init_shape) for model_class in self.all_model_classes: if model_class.__name__ == base_class.__name__: continue model = model_class(config) base_params_from_head = flatten_dict(unfreeze(model.params[model.base_model_prefix])) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @slow @require_flax class FlaxWhisperModelIntegrationTest(unittest.TestCase): @cached_property def default_processor(self): return WhisperProcessor.from_pretrained("openai/whisper-base") def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_tiny_logits_librispeech(self): model = FlaxWhisperModel.from_pretrained("openai/whisper-tiny", from_pt=True) input_speech = self._load_datasamples(1) feature_extractor = WhisperFeatureExtractor() input_features = feature_extractor(input_speech, return_tensors="np").input_features logits = model( input_features, decoder_input_ids=np.array([[50258, 50259, 50359]]), output_hidden_states=False, output_attentions=False, return_dict=False, ) # fmt: off EXPECTED_LOGITS = np.array( [ 2.9892, -6.7607, 5.7348, 3.6096, 0.2152, -5.7321, 4.8855, -1.6407, 0.2823, -1.5718, 10.4269, 3.4427, 0.0219, -8.0612, 3.4784, 8.4246, 4.0575, -2.2864, 11.1084, 0.9963, 0.9884, -8.5154, -3.5469, -9.3713, 0.9786, 3.5435, 7.4850, -5.2579, -1.4366, 10.4841 ] ) # fmt: on self.assertTrue(np.allclose(logits[0][0, 0, :30], EXPECTED_LOGITS, atol=1e-4)) def test_small_en_logits_librispeech(self): model = FlaxWhisperModel.from_pretrained("openai/whisper-small.en", from_pt=True) input_speech = self._load_datasamples(1) feature_extractor = WhisperFeatureExtractor() input_features = feature_extractor(input_speech, return_tensors="np").input_features logits = model( input_features, decoder_input_ids=np.array([model.config.decoder_start_token_id]), output_hidden_states=False, output_attentions=False, return_dict=False, ) logits = logits[0] @ model.params["model"]["decoder"]["embed_tokens"]["embedding"].T # fmt: off EXPECTED_LOGITS = np.array( [ -3.6784, -7.7211, -9.5070, -11.9286, -7.6489, -9.7026, -5.6188, -8.0104, -4.6238, -5.1833, -9.0485, -3.4079, -5.4874, -2.6935, -6.3479, -7.3398, -6.9558, -7.6867, -7.4748, -8.3463, -9.9781, -10.8389, -10.3105, -11.7201, -9.7261, -7.1590, -5.9272, -12.4509, -11.1146, -8.1918 ] ) # fmt: on self.assertTrue(np.allclose(logits[0, 0, :30], EXPECTED_LOGITS, atol=1e-4)) def test_large_logits_librispeech(self): model = FlaxWhisperModel.from_pretrained("openai/whisper-large", from_pt=True) input_speech = self._load_datasamples(1) processor = WhisperProcessor.from_pretrained("openai/whisper-large") processed_inputs = processor( audio=input_speech, text="This part of the speech", add_special_tokens=False, return_tensors="np" ) input_features = processed_inputs.input_features decoder_input_ids = processed_inputs.labels logits = model( input_features, decoder_input_ids=decoder_input_ids, output_hidden_states=False, output_attentions=False, return_dict=False, ) logits = logits[0] @ model.params["model"]["decoder"]["embed_tokens"]["embedding"].T # fmt: off EXPECTED_LOGITS = np.array( [ 2.1382, 0.9381, 4.4671, 3.5589, 2.4022, 3.8576, -0.6521, 2.5472, 1.8301, 1.9957, 2.3432, 1.4678, 0.5459, 2.2597, 1.5179, 2.5357, 1.1624, 0.6194, 1.0757, 1.8259, 2.4076, 1.6601, 2.3503, 1.3376, 1.9891, 1.8635, 3.8931, 5.3699, 4.4772, 3.9184 ] ) # fmt: on self.assertTrue(np.allclose(logits[0, 0, :30], EXPECTED_LOGITS, atol=1e-4)) def test_tiny_en_generation(self): processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") model = FlaxWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en", from_pt=True) model.config.decoder_start_token_id = 50257 input_speech = self._load_datasamples(1) input_features = processor.feature_extractor( raw_speech=input_speech, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="jax" ).input_features generated_ids = model.generate(input_features, num_beams=5, max_length=20).sequences transcript = processor.tokenizer.decode(generated_ids[0]) EXPECTED_TRANSCRIPT = ( "<|startoftranscript|><|en|><|transcribe|><|notimestamps|> Mr. Quilter is the apostle of the middle" " classes and we are glad to" ) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) def test_tiny_generation(self): processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") model = FlaxWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny", from_pt=True) input_speech = self._load_datasamples(1) input_features = processor.feature_extractor( raw_speech=input_speech, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="jax" ).input_features generated_ids = model.generate(input_features, num_beams=5, max_length=20).sequences transcript = processor.tokenizer.decode(generated_ids[0]) EXPECTED_TRANSCRIPT = ( "<|startoftranscript|><|en|><|transcribe|><|notimestamps|> Mr. Quilter is the apostle of the middle" " classes and we are glad" ) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) def test_large_generation(self): processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = FlaxWhisperForConditionalGeneration.from_pretrained("openai/whisper-large", from_pt=True) input_speech = self._load_datasamples(1) input_features = processor.feature_extractor( raw_speech=input_speech, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="jax" ).input_features model.config.forced_decoder_ids = processor.get_decoder_prompt_ids(language="en", task="transcribe") generated_ids = model.generate(input_features, num_beams=5, max_length=20).sequences transcript = processor.tokenizer.decode(generated_ids[0], skip_special_tokens=True) EXPECTED_TRANSCRIPT = " Mr. Quilter is the apostle of the middle classes and we are glad" self.assertEqual(transcript, EXPECTED_TRANSCRIPT) def test_large_generation_multilingual(self): processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = FlaxWhisperForConditionalGeneration.from_pretrained("openai/whisper-large", from_pt=True) ds = load_dataset("common_voice", "ja", split="test", streaming=True) ds = ds.cast_column("audio", datasets.Audio(sampling_rate=16_000)) input_speech = next(iter(ds))["audio"]["array"] input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="np") model.config.forced_decoder_ids = processor.get_decoder_prompt_ids(language="ja", task="transcribe") generated_ids = model.generate(input_features, do_sample=False, max_length=20).sequences transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = "木村さんに電話を貸してもらいました" self.assertEqual(transcript, EXPECTED_TRANSCRIPT) model.config.forced_decoder_ids = processor.get_decoder_prompt_ids(language="en", task="transcribe") generated_ids = model.generate( input_features, do_sample=False, max_length=20, ).sequences transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = " Kimura-san called me." self.assertEqual(transcript, EXPECTED_TRANSCRIPT) model.config.forced_decoder_ids = processor.get_decoder_prompt_ids(language="ja", task="translate") generated_ids = model.generate(input_features, do_sample=False, max_length=20).sequences transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = " I borrowed a phone from Kimura san" self.assertEqual(transcript, EXPECTED_TRANSCRIPT) def test_large_batched_generation(self): processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = FlaxWhisperForConditionalGeneration.from_pretrained("openai/whisper-large", from_pt=True) input_speech = self._load_datasamples(4) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="np").input_features generated_ids = model.generate(input_features, max_length=20).sequences # fmt: off EXPECTED_LOGITS = np.array( [ [50258, 50358, 50363, 2221, 13, 2326, 388, 391, 307, 264, 50244, 295, 264, 2808, 5359, 293, 321, 366, 5404, 281], [50258, 50358, 50363, 6966, 307, 2221, 13, 2326, 388, 391, 311, 9060, 1570, 1880, 813, 702, 1871, 13, 50257, 50257], [50258, 50358, 50363, 634, 5112, 505, 300, 412, 341, 42729, 3196, 295, 264, 1064, 11, 365, 5272, 293, 12904, 9256], [50258, 50358, 50363, 634, 575, 12525, 22618, 1968, 6144, 35617, 20084, 1756, 311, 589, 307, 534, 10281, 934, 439, 11] ] ) # fmt: on self.assertTrue(np.allclose(generated_ids, EXPECTED_LOGITS)) # fmt: off EXPECTED_TRANSCRIPT = [ " Mr. Quilter is the apostle of the middle classes and we are glad to", " Nor is Mr. Quilter's manner less interesting than his matter.", " He tells us that at this festive season of the year, with Christmas and roast beef", " He has grave doubts whether Sir Frederick Layton's work is really Greek after all,", ] # fmt: on transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) self.assertListEqual(transcript, EXPECTED_TRANSCRIPT) def test_tiny_en_batched_generation(self): processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") model = FlaxWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en", from_pt=True) input_speech = self._load_datasamples(4) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="np").input_features generated_ids = model.generate(input_features, max_length=20).sequences # fmt: off EXPECTED_LOGITS = np.array( [ [50257, 50362, 1770, 13, 2264, 346, 353, 318, 262, 46329, 286, 262, 3504, 6097, 11, 290, 356, 389, 9675, 284], [50257, 50362, 5414, 318, 1770, 13, 2264, 346, 353, 338, 5642, 1342, 3499, 621, 465, 2300, 13, 50256, 50256, 50256], [50257, 50362, 679, 4952, 514, 326, 379, 428, 43856, 1622, 286, 262, 614, 11, 351, 6786, 290, 32595, 12023, 28236], [50257, 50362, 679, 468, 12296, 17188, 1771, 7361, 26113, 18881, 1122, 338, 670, 318, 1107, 8312, 706, 477, 290, 460] ] ) # fmt: on self.assertTrue(np.allclose(generated_ids, EXPECTED_LOGITS)) # fmt: off EXPECTED_TRANSCRIPT = [ " Mr. Quilter is the apostle of the middle classes, and we are glad to", " Nor is Mr. Quilter's manner less interesting than his matter.", " He tells us that at this festive season of the year, with Christmas and roast beef looming", " He has grave doubts whether Sir Frederick Layton's work is really Greek after all and can", ] # fmt: on transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) self.assertListEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_tiny_timestamp_generation(self): processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") model = FlaxWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") input_speech = np.concatenate(self._load_datasamples(4)) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="jax").input_features generate_fn = jax.jit(functools.partial(model.generate, max_length=448, return_timestamps=True)) generated_ids = generate_fn(input_features) # fmt: off EXPECTED_OUTPUT = np.array([50258, 50259, 50359, 50364, 2221, 13, 2326, 388, 391, 307, 264, 50244, 295, 264, 2808, 5359, 11, 293, 321, 366, 5404, 281, 2928, 702, 14943, 13, 50692, 50692, 6966, 307, 2221, 13, 2326, 388, 391, 311, 9060, 1570, 1880, 813, 702, 1871, 13, 50926, 50926, 634, 5112, 505, 300, 412, 341, 42729, 3196, 295, 264, 1064, 11, 365, 5272, 293, 12904, 9256, 450, 10539, 51208, 51208, 949, 505, 11, 14138, 10117, 490, 3936, 293, 1080, 3542, 5160, 881, 26336, 281, 264, 1575, 13, 51552, 51552, 634, 575, 12525, 22618, 1968, 6144, 35617, 7354, 1292, 6, 589, 307, 534, 10281, 934, 439, 11, 293, 51836, 51836, 50257]) # fmt: on self.assertTrue(np.allclose(generated_ids, EXPECTED_OUTPUT)) EXPECTED_TRANSCRIPT = [ { "text": ( " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel. Nor is" " Mr. Quilter's manner less interesting than his matter. He tells us that at this festive season" " of the year, with Christmas and roast beef looming before us, similarly drawn from eating and" " its results occur most readily to the mind. He has grave doubts whether Sir Frederick Latins'" " work is really Greek after all, and" ), "offsets": [ { "text": ( " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel." ), "timestamp": (0.0, 6.5600000000000005), }, { "text": " Nor is Mr. Quilter's manner less interesting than his matter.", "timestamp": (6.5600000000000005, 11.24), }, { "text": ( " He tells us that at this festive season of the year, with Christmas and roast beef" " looming" ), "timestamp": (11.24, 16.88), }, { "text": ( " before us, similarly drawn from eating and its results occur most readily to the mind." ), "timestamp": (16.88, 23.76), }, { "text": ( " He has grave doubts whether Sir Frederick Latins' work is really Greek after all, and" ), "timestamp": (23.76, 29.44), }, ], } ] transcript = processor.batch_decode(generated_ids, skip_special_tokens=True, output_offsets=True) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) class FlaxWhisperEncoderModelTester: def __init__( self, parent, batch_size=13, seq_length=60, is_training=True, use_labels=True, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, input_channels=1, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, max_source_positions=30, num_mel_bins=80, num_conv_layers=1, suppress_tokens=None, begin_suppress_tokens=None, classifier_proj_size=4, num_labels=2, is_encoder_decoder=False, is_decoder=False, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.input_channels = input_channels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_mel_bins = num_mel_bins self.max_position_embeddings = max_position_embeddings self.max_source_positions = max_source_positions self.num_conv_layers = num_conv_layers self.suppress_tokens = suppress_tokens self.begin_suppress_tokens = begin_suppress_tokens self.classifier_proj_size = classifier_proj_size self.num_labels = num_labels self.is_encoder_decoder = is_encoder_decoder self.is_decoder = is_decoder def get_config(self): return WhisperConfig( d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, input_channels=self.input_channels, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, max_source_positions=self.max_source_positions, decoder_ffn_dim=self.hidden_size, encoder_ffn_dim=self.hidden_size, suppress_tokens=self.suppress_tokens, begin_suppress_tokens=self.begin_suppress_tokens, classifier_proj_size=self.classifier_proj_size, num_labels=self.num_labels, is_encoder_decoder=self.is_encoder_decoder, is_decoder=self.is_decoder, ) def prepare_whisper_encoder_inputs_dict( self, input_features, ): return { "input_features": input_features, } def prepare_config_and_inputs(self): input_features = floats_tensor([self.batch_size, self.num_mel_bins, self.seq_length]) config = self.get_config() inputs_dict = self.prepare_whisper_encoder_inputs_dict( input_features=input_features, ) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_subsampled_output_lengths(self, input_lengths): """ Computes the output length of the convolutional layers """ for i in range(self.num_conv_layers): input_lengths = (input_lengths - 1) // 2 + 1 return input_lengths @property def encoder_seq_length(self): return self.get_subsampled_output_lengths(self.seq_length) @require_flax class WhisperEncoderModelTest(FlaxModelTesterMixin, unittest.TestCase): all_model_classes = (FlaxWhisperForAudioClassification,) if is_flax_available() else () is_encoder_decoder = False fx_compatible = False test_pruning = False test_missing_keys = False input_name = "input_features" def setUp(self): self.model_tester = FlaxWhisperEncoderModelTester(self) _, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() self.init_shape = (1,) + inputs_dict["input_features"].shape[1:] self.all_model_classes = ( make_partial_class(model_class, input_shape=self.init_shape) for model_class in self.all_model_classes ) self.config_tester = ConfigTester(self, config_class=WhisperConfig) def test_config(self): self.config_tester.run_common_tests() # overwrite because of `input_features` def test_jit_compilation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def model_jitted(input_features, **kwargs): return model(input_features=input_features, **kwargs) with self.subTest("JIT Enabled"): jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = model_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) # overwrite because of `input_features` def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_features", "attention_mask", "output_attentions"] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_inputs_embeds(self): pass # WhisperEncoder has no inputs_embeds and thus the `get_input_embeddings` fn is not implemented def test_model_common_attributes(self): pass # WhisperEncoder cannot resize token embeddings since it has no tokens embeddings def test_resize_tokens_embeddings(self): pass # WhisperEncoder does not have any base model def test_save_load_to_base(self): pass # WhisperEncoder does not have any base model def test_save_load_from_base(self): pass # WhisperEncoder does not have any base model @is_pt_flax_cross_test def test_save_load_from_base_pt(self): pass # WhisperEncoder does not have any base model @is_pt_flax_cross_test def test_save_load_to_base_pt(self): pass # WhisperEncoder does not have any base model @is_pt_flax_cross_test def test_save_load_bf16_to_base_pt(self): pass
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/whisper/test_modeling_whisper.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Whisper model. """ import copy import inspect import os import tempfile import unittest import numpy as np import transformers from transformers import WhisperConfig from transformers.testing_utils import is_pt_flax_cross_test, require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_flax_available, is_torch_available from transformers.utils.import_utils import is_datasets_available from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_datasets_available(): import datasets from datasets import load_dataset if is_torch_available(): import torch from transformers import ( WhisperFeatureExtractor, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperProcessor, set_seed, ) from transformers.models.whisper.modeling_whisper import WhisperDecoder, WhisperEncoder if is_flax_available(): import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) def prepare_whisper_inputs_dict( config, input_features, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) return { # "input_ids": input_features, "input_features": input_features, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_torch class WhisperModelTester: def __init__( self, parent, batch_size=2, seq_length=60, is_training=True, use_labels=False, vocab_size=200, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, input_channels=1, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, max_source_positions=30, max_target_positions=40, bos_token_id=98, eos_token_id=98, pad_token_id=0, num_mel_bins=80, decoder_start_token_id=85, num_conv_layers=1, suppress_tokens=None, begin_suppress_tokens=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.input_channels = input_channels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_mel_bins = num_mel_bins self.max_position_embeddings = max_position_embeddings self.max_source_positions = max_source_positions self.max_target_positions = max_target_positions self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.decoder_start_token_id = decoder_start_token_id self.num_conv_layers = num_conv_layers self.suppress_tokens = suppress_tokens self.begin_suppress_tokens = begin_suppress_tokens def prepare_config_and_inputs(self): input_features = floats_tensor([self.batch_size, self.num_mel_bins, self.seq_length], self.vocab_size) decoder_input_ids = torch.tensor(self.batch_size * [[self.decoder_start_token_id]], device=torch_device) config = self.get_config() inputs_dict = prepare_whisper_inputs_dict( config, attention_mask=None, input_features=input_features, decoder_input_ids=decoder_input_ids, ) return config, inputs_dict def get_config(self): return WhisperConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, input_channels=self.input_channels, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, max_source_positions=self.max_source_positions, max_target_positions=self.max_target_positions, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_ffn_dim=self.hidden_size, encoder_ffn_dim=self.hidden_size, decoder_start_token_id=self.decoder_start_token_id, suppress_tokens=self.suppress_tokens, begin_suppress_tokens=self.begin_suppress_tokens, ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_subsampled_output_lengths(self, input_lengths): """ Computes the output length of the convolutional layers """ for i in range(self.num_conv_layers): input_lengths = (input_lengths - 1) // 2 + 1 return input_lengths def create_and_check_model_forward(self, config, inputs_dict, freeze_encoder=False): model = WhisperModel(config=config).to(torch_device).eval() if freeze_encoder: model.freeze_encoder() input_features = inputs_dict["input_features"] decoder_input_ids = inputs_dict["decoder_input_ids"] # first forward pass last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state self.parent.assertTrue(last_hidden_state.shape, (13, 7, 16)) def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = WhisperModel(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["decoder_input_ids"] attention_mask = inputs_dict["decoder_attention_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size).clamp(2) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = WhisperModel(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = WhisperEncoder.from_pretrained(tmpdirname).to(torch_device) encoder_last_hidden_state_2 = encoder(inputs_dict["input_features"])[0] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = WhisperDecoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class WhisperModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (WhisperModel, WhisperForConditionalGeneration) if is_torch_available() else () all_generative_model_classes = (WhisperForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "audio-classification": WhisperForAudioClassification, "automatic-speech-recognition": WhisperForConditionalGeneration, "feature-extraction": WhisperModel, } if is_torch_available() else {} ) is_encoder_decoder = True fx_compatible = False test_pruning = False test_missing_keys = False # Needs higher percentages after model tester's vocab_size is changed to 200 (PR #21222) # `0.5` is for `test_disk_offload` (which also works for `test_model_parallelism`) model_split_percents = [0.5, 0.8, 0.9] input_name = "input_features" # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name in [ "AutomaticSpeechRecognitionPipelineTests", "AudioClassificationPipelineTests", ]: # RuntimeError: The size of tensor a (1500) must match the size of tensor b (30) at non-singleton # dimension 1 return True return False def setUp(self): self.model_tester = WhisperModelTester(self) self.config_tester = ConfigTester(self, config_class=WhisperConfig) self.maxDiff = 3000 def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_model_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) def test_model_forward_with_frozen_encoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs, freeze_encoder=True) def test_requires_grad_with_frozen_encoder(self): config = self.model_tester.get_config() for model_class in self.all_model_classes: model = model_class(config) model.freeze_encoder() try: encoder_grads = [param.requires_grad for param in model.encoder.parameters()] decoder_grads = [param.requires_grad for param in model.decoder.parameters()] except AttributeError: encoder_grads = [param.requires_grad for param in model.model.encoder.parameters()] decoder_grads = [param.requires_grad for param in model.model.decoder.parameters()] self.assertFalse(all(encoder_grads)) self.assertTrue(all(decoder_grads)) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) def _get_input_ids_and_config(self, batch_size=3): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict[self.input_name] # cut to half length & take max batch_size=batch_size input_ids = input_ids[:batch_size, :, :] # generate max 3 tokens max_length = 4 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` config.pad_token_id = config.eos_token_id return config, input_ids, None, max_length def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) decoder_input_ids = inputs.pop("decoder_input_ids", None) inputs.pop("decoder_attention_mask", None) wte = model.get_input_embeddings() inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] # training is not supported yet def test_training(self): pass def test_training_gradient_checkpointing(self): pass def test_generate_with_head_masking(self): pass def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() config.max_target_positions = 400 input_features = input_dict["input_features"] model = WhisperForConditionalGeneration(config).eval().to(torch_device) if torch_device == "cuda": input_features = input_features.half() model.half() model.generate(input_features) model.generate(input_features, num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def test_generate_language(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_features = input_dict["input_features"] model = WhisperForConditionalGeneration(config).to(torch_device) # Hack to keep the test fast and not require downloading a model with a generation_config model.generation_config.__setattr__("lang_to_id", {"<|en|>": 1}) model.generation_config.__setattr__("task_to_id", {"transcribe": 2}) # test language code model.generate(input_features, language="en") # test tokenizer code model.generate(input_features, language="<|en|>") # test language name model.generate(input_features, language="English") def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = [ "input_features", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length else: seq_length = self.model_tester.seq_length subsampled_seq_length = model._get_feat_extract_output_lengths(seq_length) self.assertListEqual( list(hidden_states[0].shape[-2:]), [subsampled_seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", 1) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", 1) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", 1) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() subsampled_encoder_seq_length = model._get_feat_extract_output_lengths(encoder_seq_length) subsampled_encoder_key_length = model._get_feat_extract_output_lengths(encoder_key_length) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) out_len = len(outputs) correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, subsampled_encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) def test_resize_tokens_embeddings(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.vocab_size # Retrieve the embeddings and clone theme model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = model_embed.weight.clone() # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) # make sure that decoder_input_ids are resized if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that adding and removing tokens has not modified the first part of the embedding matrix. models_equal = True for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_resize_embeddings_untied(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return original_config.tie_word_embeddings = False # if model cannot untied embeddings -> leave test if original_config.tie_word_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config).to(torch_device) # if no output embeddings -> leave test if model.get_output_embeddings() is None: continue # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_vocab_size = config.vocab_size model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) def test_generate_without_input_ids(self): pass @staticmethod def _get_encoder_outputs( model, input_ids, attention_mask, output_attentions=None, output_hidden_states=None, num_interleave=1 ): encoder = model.get_encoder() encoder_outputs = encoder( input_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave( num_interleave, dim=0 ) input_ids = input_ids[:, :, 0] input_ids = torch.zeros_like(input_ids[:, :1], dtype=torch.long) + torch.tensor( [model._get_decoder_start_token_id()], device=input_ids.device ) attention_mask = None return encoder_outputs, input_ids, attention_mask def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_sequences=1): batch_size, mel, seq_length = input_ids.shape subsampled_seq_length = self.model_tester.get_subsampled_output_lengths(seq_length) num_sequences_in_output = batch_size * num_return_sequences gen_len = ( output.sequences.shape[-1] - 1 if config.is_encoder_decoder else output.sequences.shape[-1] - seq_length ) # scores self._check_scores(num_sequences_in_output, output.scores, length=gen_len, config=config) # Attentions # encoder self._check_encoder_attention_for_generate( output.encoder_attentions, batch_size, config, subsampled_seq_length ) # decoder self._check_attentions_for_generate( num_sequences_in_output, output.decoder_attentions, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) # Hidden States # encoder self._check_encoder_hidden_states_for_generate( output.encoder_hidden_states, batch_size, config, subsampled_seq_length ) # decoder self._check_hidden_states_for_generate( num_sequences_in_output, output.decoder_hidden_states, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) try: model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored similar to BART afterward input_features = inputs["input_features"] decoder_input_ids = inputs["decoder_input_ids"] decoder_attention_mask = inputs["decoder_attention_mask"] # prepare `attention_mask` with shape (batch_size, sequence_length) attention_mask = torch.ones( input_features.shape[0], input_features.shape[-1], device=input_features.device, dtype=input_features.dtype, ) traced_model = torch.jit.trace( model, (input_features, attention_mask, decoder_input_ids, decoder_attention_mask) ) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=5e-5, name="outputs", attributes=None): # We override with a slightly higher tol value, as test recently became flaky super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol, name, attributes) def check_pt_flax_outputs(self, fx_outputs, pt_outputs, model_class, tol=5e-5, name="outputs", attributes=None): # We override with a slightly higher tol value, as test recently became flaky super().check_pt_flax_outputs(fx_outputs, pt_outputs, model_class, tol, name, attributes) @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() init_shape = (1,) + inputs_dict["input_features"].shape[1:] for model_class in self.all_model_classes: with self.subTest(model_class.__name__): fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): # no flax model exists for this class return # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions fx_model_class = getattr(transformers, fx_model_class_name) # load PyTorch class pt_model = model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False # load Flax class fx_model = fx_model_class(config, input_shape=init_shape, dtype=jnp.float32) # make sure only flax inputs are forward that actually exist in function args fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() # prepare inputs pt_inputs = self._prepare_for_class(inputs_dict, model_class) # remove function args that don't exist in Flax pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} # send pytorch inputs to the correct device pt_inputs = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs.items() } # convert inputs to Flax fx_inputs = {k: np.array(v.to("cpu")) for k, v in pt_inputs.items() if torch.is_tensor(v)} fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state # send pytorch model to the correct device pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = fx_model_class.from_pretrained(tmpdirname, input_shape=init_shape, from_pt=True) fx_outputs_loaded = fx_model_loaded(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs_loaded.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs_loaded, pt_outputs, model_class) @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() init_shape = (1,) + inputs_dict["input_features"].shape[1:] for model_class in self.all_model_classes: with self.subTest(model_class.__name__): fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): # no flax model exists for this class return # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions fx_model_class = getattr(transformers, fx_model_class_name) # load PyTorch class pt_model = model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False # load Flax class fx_model = fx_model_class(config, input_shape=init_shape, dtype=jnp.float32) # make sure only flax inputs are forward that actually exist in function args fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() # prepare inputs pt_inputs = self._prepare_for_class(inputs_dict, model_class) # remove function args that don't exist in Flax pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} # send pytorch inputs to the correct device pt_inputs = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs.items() } # convert inputs to Flax fx_inputs = {k: np.array(v.to("cpu")) for k, v in pt_inputs.items() if torch.is_tensor(v)} pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) # make sure weights are tied in PyTorch pt_model.tie_weights() # send pytorch model to the correct device pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = model_class.from_pretrained(tmpdirname, from_flax=True) # send pytorch model to the correct device pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs_loaded.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs_loaded, model_class) def test_mask_feature_prob(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.mask_feature_prob = 0.2 config.mask_feature_length = 2 for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.train() # forward pass encoder_last_hidden_state = model(**input_dict).encoder_last_hidden_state self.assertTrue(encoder_last_hidden_state.shape, (13, 30, 16)) def test_mask_time_prob(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.mask_time_prob = 0.2 config.mask_time_length = 2 for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.train() # forward pass encoder_last_hidden_state = model(**input_dict).encoder_last_hidden_state self.assertTrue(encoder_last_hidden_state.shape, (13, 30, 16)) def test_generate_with_prompt_ids_and_task_and_language(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() model = WhisperForConditionalGeneration(config).eval().to(torch_device) input_features = input_dict["input_features"] prompt_ids = np.arange(5) language = "<|de|>" task = "translate" lang_id = 6 task_id = 7 model.generation_config.__setattr__("lang_to_id", {language: lang_id}) model.generation_config.__setattr__("task_to_id", {task: task_id}) output = model.generate(input_features, max_new_tokens=5, task=task, language=language, prompt_ids=prompt_ids) expected_output_start = [ *prompt_ids.tolist(), model.generation_config.decoder_start_token_id, lang_id, task_id, ] for row in output.tolist(): self.assertListEqual(row[: len(expected_output_start)], expected_output_start) def test_generate_with_prompt_ids_and_forced_decoder_ids(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() model = WhisperForConditionalGeneration(config).eval().to(torch_device) input_features = input_dict["input_features"] prompt_ids = np.asarray(range(5)) forced_decoder_ids = [(1, 6), (2, 7), (3, 8)] output = model.generate( input_features, max_new_tokens=5, forced_decoder_ids=forced_decoder_ids, prompt_ids=prompt_ids ) expected_output_start = [ *prompt_ids.tolist(), model.generation_config.decoder_start_token_id, *[token for _rank, token in forced_decoder_ids], ] for row in output.tolist(): self.assertListEqual(row[: len(expected_output_start)], expected_output_start) @require_torch @require_torchaudio class WhisperModelIntegrationTests(unittest.TestCase): @cached_property def default_processor(self): return WhisperProcessor.from_pretrained("openai/whisper-base") def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] @slow def test_tiny_logits_librispeech(self): torch_device = "cpu" set_seed(0) model = WhisperModel.from_pretrained("openai/whisper-tiny") model.to(torch_device) input_speech = self._load_datasamples(1) feature_extractor = WhisperFeatureExtractor() input_features = feature_extractor(input_speech, return_tensors="pt").input_features with torch.no_grad(): logits = model( input_features, decoder_input_ids=torch.tensor([[50258, 50259, 50359]]), output_hidden_states=False, output_attentions=False, return_dict=False, use_cache=False, ) # fmt: off EXPECTED_LOGITS = torch.tensor( [ 2.9892, -6.7607, 5.7348, 3.6096, 0.2152, -5.7321, 4.8855, -1.6407, 0.2823, -1.5718, 10.4269, 3.4427, 0.0219, -8.0612, 3.4784, 8.4246, 4.0575, -2.2864, 11.1084, 0.9963, 0.9884, -8.5154, -3.5469, -9.3713, 0.9786, 3.5435, 7.4850, -5.2579, -1.4366, 10.4841 ] ) # fmt: on self.assertTrue(torch.allclose(logits[0][0, 0, :30].cpu(), EXPECTED_LOGITS, atol=1e-4)) # fmt: off EXPECTED_GENERATION = torch.tensor( [ -1.4651, -2.6944, 2.7821, 2.3793, 4.0738, 0.0188, -3.3203, 1.9836, 0.0520, 0.7095, 1.1063, 0.2952, -3.6786, -0.5249, 0.3105, 4.7691, 1.1562, 1.3046, 0.5810, -0.3624, 1.7006, 1.3424, 0.9817, 2.1958, 1.8775, -5.7046, -0.7679, 4.0113, 2.6848, 2.8609 ] ) # fmt: on head_logits = logits[0] @ model.decoder.embed_tokens.weight.T self.assertTrue(torch.allclose(head_logits[0, 0, :30].cpu(), EXPECTED_GENERATION, atol=1e-4)) @slow def test_small_en_logits_librispeech(self): set_seed(0) torch_device = "cpu" model = WhisperModel.from_pretrained("openai/whisper-small.en") model.to(torch_device) input_speech = self._load_datasamples(1) feaure_extractor = WhisperFeatureExtractor() input_features = feaure_extractor(input_speech, return_tensors="pt").input_features.to(torch_device) logits = model( input_features, decoder_input_ids=torch.tensor([[model.config.decoder_start_token_id]]), output_hidden_states=False, output_attentions=False, use_cache=False, ) logits = logits.last_hidden_state @ model.decoder.embed_tokens.weight.T # fmt: off EXPECTED_LOGITS = torch.tensor( [ -3.6784, -7.7211, -9.5070, -11.9286, -7.6489, -9.7026, -5.6188, -8.0104, -4.6238, -5.1833, -9.0485, -3.4079, -5.4874, -2.6935, -6.3479, -7.3398, -6.9558, -7.6867, -7.4748, -8.3463, -9.9781, -10.8389, -10.3105, -11.7201, -9.7261, -7.1590, -5.9272, -12.4509, -11.1146, -8.1918 ] ) # fmt: on self.assertTrue(torch.allclose(logits[0, 0, :30].cpu(), EXPECTED_LOGITS, atol=1e-4)) @slow def test_large_logits_librispeech(self): set_seed(0) torch_device = "cpu" model = WhisperModel.from_pretrained("openai/whisper-large") model.to(torch_device) input_speech = self._load_datasamples(1) processor = WhisperProcessor.from_pretrained("openai/whisper-large") processed_inputs = processor( audio=input_speech, text="This part of the speech", add_special_tokens=False, return_tensors="pt" ) input_features = processed_inputs.input_features.to(torch_device) decoder_input_ids = processed_inputs.labels.to(torch_device) logits = model( input_features, decoder_input_ids=decoder_input_ids, output_hidden_states=False, output_attentions=False, use_cache=False, ) logits = logits.last_hidden_state @ model.decoder.embed_tokens.weight.T # fmt: off EXPECTED_LOGITS = torch.tensor( [ 2.1382, 0.9381, 4.4671, 3.5589, 2.4022, 3.8576, -0.6521, 2.5472, 1.8301, 1.9957, 2.3432, 1.4678, 0.5459, 2.2597, 1.5179, 2.5357, 1.1624, 0.6194, 1.0757, 1.8259, 2.4076, 1.6601, 2.3503, 1.3376, 1.9891, 1.8635, 3.8931, 5.3699, 4.4772, 3.9184 ] ) # fmt: on self.assertTrue(torch.allclose(logits[0, 0, :30].cpu(), EXPECTED_LOGITS, atol=1e-4)) @slow def test_tiny_en_generation(self): torch_device = "cpu" set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") model.to(torch_device) model.config.decoder_start_token_id = 50257 input_speech = self._load_datasamples(1) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features.to( torch_device ) generated_ids = model.generate(input_features, num_beams=5, max_length=20) transcript = processor.tokenizer.batch_decode(generated_ids)[0] EXPECTED_TRANSCRIPT = ( "<|startoftranscript|><|notimestamps|> Mr. Quilter is the apostle of the middle" " classes, and we are glad to" ) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_tiny_generation(self): torch_device = "cpu" set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") model.to(torch_device) input_speech = self._load_datasamples(1) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features.to( torch_device ) generated_ids = model.generate(input_features, num_beams=5, max_length=20) transcript = processor.tokenizer.decode(generated_ids[0]) EXPECTED_TRANSCRIPT = ( "<|startoftranscript|><|en|><|transcribe|><|notimestamps|> Mr. Quilter is the apostle of the middle" " classes and we are glad" ) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_large_generation(self): torch_device = "cpu" set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large") model.to(torch_device) input_speech = self._load_datasamples(1) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features.to( torch_device ) generated_ids = model.generate( input_features, do_sample=False, max_length=20, language="<|en|>", task="transcribe" ) transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = " Mr. Quilter is the apostle of the middle classes and we are glad" self.assertEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_large_generation_multilingual(self): torch_device = "cpu" set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large") model.to(torch_device) ds = load_dataset("common_voice", "ja", split="test", streaming=True) ds = ds.cast_column("audio", datasets.Audio(sampling_rate=16_000)) input_speech = next(iter(ds))["audio"]["array"] input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features.to( torch_device ) generated_ids = model.generate( input_features, do_sample=False, max_length=20, language="<|ja|>", task="transcribe" ) transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = "木村さんに電話を貸してもらいました" self.assertEqual(transcript, EXPECTED_TRANSCRIPT) generated_ids = model.generate( input_features, do_sample=False, max_length=20, language="<|en|>", task="transcribe" ) transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = " Kimura-san called me." self.assertEqual(transcript, EXPECTED_TRANSCRIPT) generated_ids = model.generate( input_features, do_sample=False, max_length=20, language="<|ja|>", task="translate" ) transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = " I borrowed a phone from Kimura san" self.assertEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_large_batched_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large") input_speech = self._load_datasamples(4) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features generated_ids = model.generate(input_features, max_length=20, task="translate") # fmt: off EXPECTED_LOGITS = torch.tensor( [ [50258, 50259, 50358, 50363, 2221, 13, 2326, 388, 391, 307, 264, 50244, 295, 264, 2808, 5359, 293, 321, 366, 5404], [50258, 50259, 50358, 50363, 6966, 307, 2221, 13, 2326, 388, 391, 311, 9060, 1570, 1880, 813, 702, 1871, 13, 50257], [50258, 50259, 50358, 50363, 634, 5112, 505, 300, 412, 341, 42729, 3196, 295, 264, 1064, 11, 365, 5272, 293, 12904], [50258, 50259, 50358, 50363, 634, 575, 12525, 22618, 1968, 6144, 35617, 20084, 1756, 311, 589, 307, 534, 10281, 934, 439] ] ) # fmt: on self.assertTrue(torch.allclose(generated_ids, EXPECTED_LOGITS)) # fmt: off EXPECTED_TRANSCRIPT = [ " Mr. Quilter is the apostle of the middle classes and we are glad", " Nor is Mr. Quilter's manner less interesting than his matter.", " He tells us that at this festive season of the year, with Christmas and roast", " He has grave doubts whether Sir Frederick Layton's work is really Greek after all", ] # fmt: on transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) self.assertListEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_tiny_en_batched_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") model.to(torch_device) input_speech = self._load_datasamples(4) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features.to( torch_device ) generated_ids = model.generate(input_features, max_length=20).to("cpu") # fmt: off EXPECTED_LOGITS = torch.tensor( [ [50257, 50362, 1770, 13, 2264, 346, 353, 318, 262, 46329, 286, 262, 3504, 6097, 11, 290, 356, 389, 9675, 284], [50257, 50362, 5414, 318, 1770, 13, 2264, 346, 353, 338, 5642, 1342, 3499, 621, 465, 2300, 13, 50256, 50256, 50256], [50257, 50362, 679, 4952, 514, 326, 379, 428, 43856, 1622, 286, 262, 614, 11, 351, 6786, 290, 32595, 12023, 28236], [50257, 50362, 679, 468, 12296, 17188, 1771, 7361, 26113, 18881, 1122, 338, 670, 318, 1107, 8312, 706, 477, 290, 460] ] ) # fmt: on self.assertTrue(torch.allclose(generated_ids, EXPECTED_LOGITS)) # fmt: off EXPECTED_TRANSCRIPT = [ " Mr. Quilter is the apostle of the middle classes, and we are glad to", " Nor is Mr. Quilter's manner less interesting than his matter.", " He tells us that at this festive season of the year, with Christmas and roast beef looming", " He has grave doubts whether Sir Frederick Layton's work is really Greek after all and can", ] # fmt: on transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) self.assertListEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_tiny_timestamp_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") model.to(torch_device) input_speech = np.concatenate(self._load_datasamples(4)) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features.to( torch_device ) generated_ids = model.generate(input_features, max_length=448, return_timestamps=True).to("cpu") # fmt: off EXPECTED_OUTPUT = torch.tensor([50258, 50259, 50359, 50364, 2221, 13, 2326, 388, 391, 307, 264, 50244, 295, 264, 2808, 5359, 11, 293, 321, 366, 5404, 281, 2928, 702, 14943, 13, 50692, 50692, 6966, 307, 2221, 13, 2326, 388, 391, 311, 9060, 1570, 1880, 813, 702, 1871, 13, 50926, 50926, 634, 5112, 505, 300, 412, 341, 42729, 3196, 295, 264, 1064, 11, 365, 5272, 293, 12904, 9256, 450, 10539, 51208, 51208, 949, 505, 11, 14138, 10117, 490, 3936, 293, 1080, 3542, 5160, 881, 26336, 281, 264, 1575, 13, 51552, 51552, 634, 575, 12525, 22618, 1968, 6144, 35617, 7354, 1292, 6, 589, 307, 534, 10281, 934, 439, 11, 293, 51836, 51836, 50257]) # fmt: on self.assertTrue(torch.allclose(generated_ids, EXPECTED_OUTPUT)) EXPECTED_TRANSCRIPT = [ { "text": ( " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel. Nor is" " Mr. Quilter's manner less interesting than his matter. He tells us that at this festive season" " of the year, with Christmas and roast beef looming before us, similarly drawn from eating and" " its results occur most readily to the mind. He has grave doubts whether Sir Frederick Latins'" " work is really Greek after all, and" ), "offsets": [ { "text": ( " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel." ), "timestamp": (0.0, 6.5600000000000005), }, { "text": " Nor is Mr. Quilter's manner less interesting than his matter.", "timestamp": (6.5600000000000005, 11.24), }, { "text": ( " He tells us that at this festive season of the year, with Christmas and roast beef" " looming" ), "timestamp": (11.24, 16.88), }, { "text": ( " before us, similarly drawn from eating and its results occur most readily to the mind." ), "timestamp": (16.88, 23.76), }, { "text": ( " He has grave doubts whether Sir Frederick Latins' work is really Greek after all, and" ), "timestamp": (23.76, 29.44), }, ], } ] transcript = processor.batch_decode(generated_ids, skip_special_tokens=True, output_offsets=True) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_tiny_token_timestamp_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") model.to(torch_device) model.generation_config.alignment_heads = [[2, 2], [3, 0], [3, 2], [3, 3], [3, 4], [3, 5]] input_speech = self._load_datasamples(4) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features.to( torch_device ) generate_outputs = model.generate( input_features, max_length=448, return_timestamps=True, return_token_timestamps=True ) self.assertEqual(generate_outputs.sequences.shape, generate_outputs.token_timestamps.shape) # fmt: off EXPECTED_OUTPUT = torch.tensor([ [ 0.0000, 0.0000, 0.0000, 0.0000, 0.4800, 0.8200, 0.9600, 1.1200, 1.1200, 1.2200, 1.5000, 1.7200, 2.0000, 2.3400, 2.5000, 2.6600, 3.1800, 3.5600, 3.6800, 3.8000, 4.1000, 4.3000, 4.5800, 4.9400, 5.3800, 12.4200, 12.8400, 26.9200, 26.9200, 26.9200, 26.9200, 26.9200, 26.9200, 26.9200, 26.9200, 26.9200, 26.9200, 26.9200, 26.9400, 26.9400, 26.9400, 26.9400, 29.8400 ], [ 0.0000, 0.0000, 0.0000, 0.0000, 0.5200, 0.9000, 1.1400, 1.4200, 1.5200, 1.6800, 1.6800, 1.8800, 2.1000, 2.2200, 2.6200, 3.1400, 3.5800, 3.9600, 4.4000, 17.3000, 17.3000, 26.7200, 26.7200, 26.7200, 26.7200, 26.7200, 26.7200, 26.7200, 26.7200, 26.7200, 26.7200, 26.7200, 26.7200, 26.7200, 26.7200, 26.7200, 26.7400, 26.7400, 26.7400, 26.7400, 26.7400, 26.7400, 28.0000 ], [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.7600, 1.0000, 1.4200, 1.8000, 1.9400, 2.1800, 2.5200, 3.0200, 3.3200, 3.5400, 3.9400, 4.5600, 4.9200, 5.2800, 5.5600, 5.9000, 6.1600, 6.3000, 6.4800, 6.4800, 6.6400, 7.8200, 7.9600, 8.2200, 8.6000, 8.9200, 9.2200, 9.5200, 9.7200, 10.0600, 10.5400, 10.8800, 11.2600, 11.5400, 11.7400, 12.0800, 15.6800, 15.6800], [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.7400, 1.0400, 1.3200, 1.6800, 2.1400, 2.4800, 2.7800, 3.0800, 3.1600, 3.4000, 3.6000, 4.0200, 4.2200, 4.8600, 5.2400, 5.7400, 6.3400, 6.6200, 6.7600, 6.7600, 6.8600, 7.2400, 7.4200, 7.6800, 7.9200, 8.4800, 8.7600, 9.2000, 9.2000, 9.4200, 15.8200, 15.8200, 29.6400, 29.6600, 29.6600, 29.6600, 29.6600, 29.7600] ]) # fmt: on self.assertTrue(torch.allclose(generate_outputs.token_timestamps.to("cpu"), EXPECTED_OUTPUT)) @slow def test_tiny_specaugment_librispeech(self): torch_device = "cpu" set_seed(0) # Apply SpecAugment model = WhisperModel.from_pretrained("openai/whisper-tiny", apply_spec_augment=True) # Set model to training mode to enable SpecAugment model.train() model.to(torch_device) input_speech = self._load_datasamples(1) feature_extractor = WhisperFeatureExtractor() input_features = feature_extractor(input_speech, return_tensors="pt").input_features with torch.no_grad(): logits = model( input_features, decoder_input_ids=torch.tensor([[50258, 50259, 50359]]), output_hidden_states=False, output_attentions=False, return_dict=False, use_cache=False, ) # fmt: off EXPECTED_LOGITS = torch.tensor( [ 0.9362, -4.7105, 5.0879, 3.9642, 1.0013, -6.0096, 4.7285, -3.1847, -0.8648, 1.9631, 6.2653, 3.6936, 0.3575, -4.5818, 3.0564, 7.8712, 2.9951, 0.6848, 9.9497, -2.6638, 1.1571, -6.8546, -1.4333, -7.7584, 1.1200, 3.9030, 4.4655, -4.4919, -1.1703, 9.6241 ] ) # fmt: on self.assertTrue(torch.allclose(logits[0][0, 0, :30].cpu(), EXPECTED_LOGITS, atol=1e-4)) @slow def test_generate_with_prompt_ids(self): processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") model.to(torch_device) input_speech = self._load_datasamples(4)[-1:] input_features = processor(input_speech, return_tensors="pt").input_features.to(torch_device) output_without_prompt = model.generate(input_features) prompt_ids = processor.get_prompt_ids("Leighton") output_with_prompt = model.generate(input_features, prompt_ids=prompt_ids) expected_without_prompt = "<|startoftranscript|><|en|><|transcribe|><|notimestamps|> He has grave doubts whether Sir Frederick Layton's work is really Greek after all and can discover in it but little of Rocky Ithaca.<|endoftext|>" expected_with_prompt = "<|startofprev|> Leighton<|startoftranscript|><|en|><|transcribe|><|notimestamps|> He has grave doubts whether Sir Frederick Leighton's work is really Greek after all and can discover in it but little of Rocky Ithaca.<|endoftext|>" self.assertEqual(processor.decode(output_without_prompt[0]), expected_without_prompt) self.assertEqual(processor.decode(output_with_prompt[0]), expected_with_prompt) @slow def test_generate_with_prompt_ids_and_forced_decoder_ids(self): processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") model.to(torch_device) input_speech = self._load_datasamples(1) input_features = processor(input_speech, return_tensors="pt").input_features.to(torch_device) task = "translate" language = "de" expected_tokens = [f"<|{task}|>", f"<|{language}|>"] prompt = "test prompt" prompt_ids = processor.get_prompt_ids(prompt) output = model.generate(input_features, task=task, language=language, prompt_ids=prompt_ids) text = processor.decode(output[0]) self.assertTrue(prompt in text) self.assertTrue(all(token in text for token in expected_tokens)) @slow def test_generate_with_prompt_ids_and_no_non_prompt_forced_decoder_ids(self): processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") model.to(torch_device) input_speech = self._load_datasamples(1) input_features = processor(input_speech, return_tensors="pt").input_features.to(torch_device) prompt = "test prompt" prompt_ids = processor.get_prompt_ids(prompt) model.generation_config.forced_decoder_ids = None model.config.forced_decoder_ids = None output = model.generate(input_features, prompt_ids=prompt_ids, return_timestamps=True) text = processor.decode(output[0]) self.assertTrue(prompt in text) def prepare_whisper_encoder_inputs_dict(config, input_features, head_mask=None): if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) return {"input_features": input_features, "head_mask": head_mask} @require_torch class WhisperEncoderModelTester: def __init__( self, parent, batch_size=2, seq_length=60, is_training=True, use_labels=True, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, input_channels=1, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, max_source_positions=30, num_mel_bins=80, num_conv_layers=1, suppress_tokens=None, begin_suppress_tokens=None, classifier_proj_size=4, num_labels=2, is_encoder_decoder=False, is_decoder=False, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.input_channels = input_channels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_mel_bins = num_mel_bins self.max_position_embeddings = max_position_embeddings self.max_source_positions = max_source_positions self.num_conv_layers = num_conv_layers self.suppress_tokens = suppress_tokens self.begin_suppress_tokens = begin_suppress_tokens self.classifier_proj_size = classifier_proj_size self.num_labels = num_labels self.is_encoder_decoder = is_encoder_decoder self.is_decoder = is_decoder def get_config(self): return WhisperConfig( d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, input_channels=self.input_channels, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, max_source_positions=self.max_source_positions, decoder_ffn_dim=self.hidden_size, encoder_ffn_dim=self.hidden_size, suppress_tokens=self.suppress_tokens, begin_suppress_tokens=self.begin_suppress_tokens, classifier_proj_size=self.classifier_proj_size, num_labels=self.num_labels, is_encoder_decoder=self.is_encoder_decoder, is_decoder=self.is_decoder, ) def prepare_config_and_inputs(self): input_features = floats_tensor([self.batch_size, self.num_mel_bins, self.seq_length]) config = self.get_config() inputs_dict = prepare_whisper_encoder_inputs_dict( config, input_features=input_features, ) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_subsampled_output_lengths(self, input_lengths): """ Computes the output length of the convolutional layers """ for i in range(self.num_conv_layers): input_lengths = (input_lengths - 1) // 2 + 1 return input_lengths @property def encoder_seq_length(self): return self.get_subsampled_output_lengths(self.seq_length) def create_and_check_model_forward(self, config, inputs_dict, freeze_encoder=False): model = WhisperForAudioClassification(config=config).to(torch_device).eval() if freeze_encoder: model.freeze_encoder() input_features = inputs_dict["input_features"] # first forward pass last_hidden_state = model(input_features).logits self.parent.assertTrue(last_hidden_state.shape, (13, 2)) @require_torch class WhisperEncoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (WhisperForAudioClassification,) if is_torch_available() else () is_encoder_decoder = False fx_compatible = False test_pruning = False test_missing_keys = False input_name = "input_features" def setUp(self): self.model_tester = WhisperEncoderModelTester(self) self.config_tester = ConfigTester(self, config_class=WhisperConfig) self.maxDiff = 3000 def test_config(self): self.config_tester.run_common_tests() def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_features", "head_mask", "encoder_outputs"] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) @unittest.skip(reason="Some undefined behavior encountered with tiny versions of this model. Skip for now.") def test_cpu_offload(self): pass @unittest.skip(reason="Some undefined behavior encountered with tiny versions of this model. Skip for now.") def test_disk_offload(self): pass @unittest.skip(reason="Some undefined behavior encountered with tiny versions of this model. Skip for now.") def test_model_parallelism(self): pass # input embeds is meaningless for an encoder-only acoustic model def test_inputs_embeds(self): pass # the equivalent test is passing the encoder outputs directly to the model def test_encoder_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) with torch.no_grad(): outputs = model(**inputs)[0] input_ids = inputs["input_features"] del inputs["input_features"] encoder = model.encoder with torch.no_grad(): inputs["encoder_outputs"] = encoder(input_ids) outputs_embeds = model(**inputs)[0] self.assertTrue((outputs_embeds == outputs).all()) # Needs to override as the encoder input embedding is a Conv1d def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (torch.nn.Conv1d)) model.set_input_embeddings(torch.nn.Conv1d(10, 10, 3)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, torch.nn.Conv1d)) # WhisperEncoder cannot resize token embeddings since it has no tokens embeddings def test_resize_tokens_embeddings(self): pass @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() init_shape = (1,) + inputs_dict["input_features"].shape[1:] for model_class in self.all_model_classes: with self.subTest(model_class.__name__): fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): # no flax model exists for this class return # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions fx_model_class = getattr(transformers, fx_model_class_name) # load PyTorch class pt_model = model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False # load Flax class fx_model = fx_model_class(config, input_shape=init_shape, dtype=jnp.float32) # make sure only flax inputs are forward that actually exist in function args fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() # prepare inputs pt_inputs = self._prepare_for_class(inputs_dict, model_class) # remove function args that don't exist in Flax pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} # send pytorch inputs to the correct device pt_inputs = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs.items() } # convert inputs to Flax fx_inputs = {k: np.array(v.to("cpu")) for k, v in pt_inputs.items() if torch.is_tensor(v)} fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state # send pytorch model to the correct device pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = fx_model_class.from_pretrained(tmpdirname, input_shape=init_shape, from_pt=True) fx_outputs_loaded = fx_model_loaded(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs_loaded.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs_loaded, pt_outputs, model_class) @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() init_shape = (1,) + inputs_dict["input_features"].shape[1:] for model_class in self.all_model_classes: with self.subTest(model_class.__name__): fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): # no flax model exists for this class return # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions fx_model_class = getattr(transformers, fx_model_class_name) # load PyTorch class pt_model = model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False # load Flax class fx_model = fx_model_class(config, input_shape=init_shape, dtype=jnp.float32) # make sure only flax inputs are forward that actually exist in function args fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() # prepare inputs pt_inputs = self._prepare_for_class(inputs_dict, model_class) # remove function args that don't exist in Flax pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} # send pytorch inputs to the correct device pt_inputs = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs.items() } # convert inputs to Flax fx_inputs = {k: np.array(v.to("cpu")) for k, v in pt_inputs.items() if torch.is_tensor(v)} pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) # make sure weights are tied in PyTorch pt_model.tie_weights() # send pytorch model to the correct device pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = model_class.from_pretrained(tmpdirname, from_flax=True) # send pytorch model to the correct device pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs_loaded.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs_loaded, model_class)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/sam/test_modeling_sam.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch SAM model. """ import gc import inspect import unittest import requests from transformers import SamConfig, SamMaskDecoderConfig, SamPromptEncoderConfig, SamVisionConfig, pipeline from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SamModel, SamProcessor from transformers.models.sam.modeling_sam import SAM_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class SamPromptEncoderTester: def __init__( self, hidden_size=32, input_image_size=24, patch_size=2, mask_input_channels=4, num_point_embeddings=4, hidden_act="gelu", ): self.hidden_size = hidden_size self.input_image_size = input_image_size self.patch_size = patch_size self.mask_input_channels = mask_input_channels self.num_point_embeddings = num_point_embeddings self.hidden_act = hidden_act def get_config(self): return SamPromptEncoderConfig( image_size=self.input_image_size, patch_size=self.patch_size, mask_input_channels=self.mask_input_channels, hidden_size=self.hidden_size, num_point_embeddings=self.num_point_embeddings, hidden_act=self.hidden_act, ) def prepare_config_and_inputs(self): dummy_points = floats_tensor([self.batch_size, 3, 2]) config = self.get_config() return config, dummy_points class SamMaskDecoderTester: def __init__( self, hidden_size=32, hidden_act="relu", mlp_dim=64, num_hidden_layers=2, num_attention_heads=4, attention_downsample_rate=2, num_multimask_outputs=3, iou_head_depth=3, iou_head_hidden_dim=32, layer_norm_eps=1e-6, ): self.hidden_size = hidden_size self.hidden_act = hidden_act self.mlp_dim = mlp_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.attention_downsample_rate = attention_downsample_rate self.num_multimask_outputs = num_multimask_outputs self.iou_head_depth = iou_head_depth self.iou_head_hidden_dim = iou_head_hidden_dim self.layer_norm_eps = layer_norm_eps def get_config(self): return SamMaskDecoderConfig( hidden_size=self.hidden_size, hidden_act=self.hidden_act, mlp_dim=self.mlp_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, attention_downsample_rate=self.attention_downsample_rate, num_multimask_outputs=self.num_multimask_outputs, iou_head_depth=self.iou_head_depth, iou_head_hidden_dim=self.iou_head_hidden_dim, layer_norm_eps=self.layer_norm_eps, ) def prepare_config_and_inputs(self): config = self.get_config() dummy_inputs = { "image_embedding": floats_tensor([self.batch_size, self.hidden_size]), } return config, dummy_inputs class SamModelTester: def __init__( self, parent, hidden_size=36, intermediate_size=72, projection_dim=62, output_channels=32, num_hidden_layers=2, num_attention_heads=4, num_channels=3, image_size=24, patch_size=2, hidden_act="gelu", layer_norm_eps=1e-06, dropout=0.0, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, qkv_bias=True, mlp_ratio=4.0, use_abs_pos=True, use_rel_pos=True, rel_pos_zero_init=False, window_size=14, global_attn_indexes=[2, 5, 8, 11], num_pos_feats=16, mlp_dim=None, batch_size=2, ): self.parent = parent self.image_size = image_size self.patch_size = patch_size self.output_channels = output_channels self.num_channels = num_channels self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias self.mlp_ratio = mlp_ratio self.use_abs_pos = use_abs_pos self.use_rel_pos = use_rel_pos self.rel_pos_zero_init = rel_pos_zero_init self.window_size = window_size self.global_attn_indexes = global_attn_indexes self.num_pos_feats = num_pos_feats self.mlp_dim = mlp_dim self.batch_size = batch_size # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 self.prompt_encoder_tester = SamPromptEncoderTester() self.mask_decoder_tester = SamMaskDecoderTester() def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): vision_config = SamVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, initializer_factor=self.initializer_factor, output_channels=self.output_channels, qkv_bias=self.qkv_bias, mlp_ratio=self.mlp_ratio, use_abs_pos=self.use_abs_pos, use_rel_pos=self.use_rel_pos, rel_pos_zero_init=self.rel_pos_zero_init, window_size=self.window_size, global_attn_indexes=self.global_attn_indexes, num_pos_feats=self.num_pos_feats, mlp_dim=self.mlp_dim, ) prompt_encoder_config = self.prompt_encoder_tester.get_config() mask_decoder_config = self.mask_decoder_tester.get_config() return SamConfig( vision_config=vision_config, prompt_encoder_config=prompt_encoder_config, mask_decoder_config=mask_decoder_config, ) def create_and_check_model(self, config, pixel_values): model = SamModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) self.parent.assertEqual(result.iou_scores.shape, (self.batch_size, 1, 3)) self.parent.assertEqual(result.pred_masks.shape[:3], (self.batch_size, 1, 3)) def create_and_check_get_image_features(self, config, pixel_values): model = SamModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model.get_image_embeddings(pixel_values) self.parent.assertEqual(result[0].shape, (self.output_channels, 12, 12)) def create_and_check_get_image_hidden_states(self, config, pixel_values): model = SamModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model.vision_encoder( pixel_values, output_hidden_states=True, return_dict=True, ) # after computing the convolutional features expected_hidden_states_shape = (self.batch_size, 12, 12, 36) self.parent.assertEqual(len(result[1]), self.num_hidden_layers + 1) self.parent.assertEqual(result[1][0].shape, expected_hidden_states_shape) with torch.no_grad(): result = model.vision_encoder( pixel_values, output_hidden_states=True, return_dict=False, ) # after computing the convolutional features expected_hidden_states_shape = (self.batch_size, 12, 12, 36) self.parent.assertEqual(len(result[1]), self.num_hidden_layers + 1) self.parent.assertEqual(result[1][0].shape, expected_hidden_states_shape) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SamModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as SAM's vision encoder does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (SamModel,) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": SamModel, "mask-generation": SamModel} if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False test_torchscript = False # TODO: Fix me @Arthur: `run_batch_test` in `tests/test_pipeline_mixin.py` not working def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return True def setUp(self): self.model_tester = SamModelTester(self) self.vision_config_tester = ConfigTester(self, config_class=SamVisionConfig, has_text_modality=False) self.prompt_encoder_config_tester = ConfigTester( self, config_class=SamPromptEncoderConfig, has_text_modality=False, num_attention_heads=12, num_hidden_layers=2, ) self.mask_decoder_config_tester = ConfigTester( self, config_class=SamMaskDecoderConfig, has_text_modality=False ) def test_config(self): self.vision_config_tester.run_common_tests() self.prompt_encoder_config_tester.run_common_tests() self.mask_decoder_config_tester.run_common_tests() @unittest.skip(reason="SAM's vision encoder does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_get_image_features(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_get_image_features(*config_and_inputs) def test_image_hidden_states(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_get_image_hidden_states(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True expected_vision_attention_shape = ( self.model_tester.batch_size * self.model_tester.num_attention_heads, 196, 196, ) expected_mask_decoder_attention_shape = (self.model_tester.batch_size, 1, 144, 32) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) vision_attentions = outputs.vision_attentions self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers) mask_decoder_attentions = outputs.mask_decoder_attentions self.assertEqual(len(mask_decoder_attentions), self.model_tester.mask_decoder_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) vision_attentions = outputs.vision_attentions self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers) mask_decoder_attentions = outputs.mask_decoder_attentions self.assertEqual(len(mask_decoder_attentions), self.model_tester.mask_decoder_tester.num_hidden_layers) self.assertListEqual( list(vision_attentions[0].shape[-4:]), list(expected_vision_attention_shape), ) self.assertListEqual( list(mask_decoder_attentions[0].shape[-4:]), list(expected_mask_decoder_attention_shape), ) @unittest.skip(reason="SamModel does not support training") def test_training(self): pass @unittest.skip(reason="SamModel does not support training") def test_training_gradient_checkpointing(self): pass @unittest.skip(reason="SamModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="SamModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @unittest.skip(reason="SamModel does not support training") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="Hidden_states is tested in create_and_check_model tests") def test_hidden_states_output(self): pass def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=5e-5, name="outputs", attributes=None): # Use a slightly higher default tol to make the tests non-flaky super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol=tol, name=name, attributes=attributes) @slow def test_model_from_pretrained(self): for model_name in SAM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = SamModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_image(): img_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") return raw_image def prepare_dog_img(): img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/dog-sam.png" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") return raw_image @slow class SamModelIntegrationTest(unittest.TestCase): def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def test_inference_mask_generation_no_point(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() inputs = processor(images=raw_image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() masks = outputs.pred_masks[0, 0, 0, 0, :3] self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.4515), atol=2e-4)) self.assertTrue(torch.allclose(masks, torch.tensor([-4.1800, -3.4948, -3.4481]).to(torch_device), atol=2e-4)) def test_inference_mask_generation_one_point_one_bb(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_boxes = [[[650, 900, 1000, 1250]]] input_points = [[[820, 1080]]] inputs = processor( images=raw_image, input_boxes=input_boxes, input_points=input_points, return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() masks = outputs.pred_masks[0, 0, 0, 0, :3] self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.9566), atol=2e-4)) self.assertTrue( torch.allclose(masks, torch.tensor([-12.7729, -12.3665, -12.6061]).to(torch_device), atol=2e-4) ) def test_inference_mask_generation_batched_points_batched_images(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_points = [ [[[820, 1080]], [[820, 1080]], [[820, 1080]], [[820, 1080]]], [[[510, 1080]], [[820, 1080]], [[820, 1080]], [[820, 1080]]], ] inputs = processor(images=[raw_image, raw_image], input_points=input_points, return_tensors="pt").to( torch_device ) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze().cpu() masks = outputs.pred_masks[0, 0, 0, 0, :3].cpu() EXPECTED_SCORES = torch.tensor( [ [ [0.6765, 0.9379, 0.8803], [0.6765, 0.9379, 0.8803], [0.6765, 0.9379, 0.8803], [0.6765, 0.9379, 0.8803], ], [ [0.3317, 0.7264, 0.7646], [0.6765, 0.9379, 0.8803], [0.6765, 0.9379, 0.8803], [0.6765, 0.9379, 0.8803], ], ] ) EXPECTED_MASKS = torch.tensor([-2.8550, -2.7988, -2.9625]) self.assertTrue(torch.allclose(scores, EXPECTED_SCORES, atol=1e-3)) self.assertTrue(torch.allclose(masks, EXPECTED_MASKS, atol=1e-3)) def test_inference_mask_generation_one_point_one_bb_zero(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_boxes = [[[620, 900, 1000, 1255]]] input_points = [[[820, 1080]]] labels = [[0]] inputs = processor( images=raw_image, input_boxes=input_boxes, input_points=input_points, input_labels=labels, return_tensors="pt", ).to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.7894), atol=1e-4)) def test_inference_mask_generation_one_point(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_points = [[[400, 650]]] input_labels = [[1]] inputs = processor( images=raw_image, input_points=input_points, input_labels=input_labels, return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.9675), atol=1e-4)) # With no label input_points = [[[400, 650]]] inputs = processor(images=raw_image, input_points=input_points, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.9675), atol=1e-4)) def test_inference_mask_generation_two_points(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_points = [[[400, 650], [800, 650]]] input_labels = [[1, 1]] inputs = processor( images=raw_image, input_points=input_points, input_labels=input_labels, return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.9762), atol=1e-4)) # no labels inputs = processor(images=raw_image, input_points=input_points, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.9762), atol=1e-4)) def test_inference_mask_generation_two_points_batched(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_points = [[[400, 650], [800, 650]], [[400, 650]]] input_labels = [[1, 1], [1]] inputs = processor( images=[raw_image, raw_image], input_points=input_points, input_labels=input_labels, return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() self.assertTrue(torch.allclose(scores[0][-1], torch.tensor(0.9762), atol=1e-4)) self.assertTrue(torch.allclose(scores[1][-1], torch.tensor(0.9637), atol=1e-4)) def test_inference_mask_generation_one_box(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() input_boxes = [[[75, 275, 1725, 850]]] inputs = processor(images=raw_image, input_boxes=input_boxes, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores = outputs.iou_scores.squeeze() self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.7937), atol=1e-4)) def test_inference_mask_generation_batched_image_one_point(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() raw_dog_image = prepare_dog_img() input_points = [[[820, 1080]], [[220, 470]]] inputs = processor(images=[raw_image, raw_dog_image], input_points=input_points, return_tensors="pt").to( torch_device ) with torch.no_grad(): outputs = model(**inputs) scores_batched = outputs.iou_scores.squeeze() input_points = [[[220, 470]]] inputs = processor(images=raw_dog_image, input_points=input_points, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) scores_single = outputs.iou_scores.squeeze() self.assertTrue(torch.allclose(scores_batched[1, :], scores_single, atol=1e-4)) def test_inference_mask_generation_two_points_point_batch(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() # fmt: off input_points = torch.Tensor([[[400, 650]], [[220, 470]]]).cpu() # fmt: on input_points = input_points.unsqueeze(0) inputs = processor(raw_image, input_points=input_points, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) iou_scores = outputs.iou_scores.cpu() self.assertTrue(iou_scores.shape == (1, 2, 3)) torch.testing.assert_allclose( iou_scores, torch.tensor([[[0.9105, 0.9825, 0.9675], [0.7646, 0.7943, 0.7774]]]), atol=1e-4, rtol=1e-4 ) def test_inference_mask_generation_three_boxes_point_batch(self): model = SamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") model.to(torch_device) model.eval() raw_image = prepare_image() # fmt: off input_boxes = torch.Tensor([[[620, 900, 1000, 1255]], [[75, 275, 1725, 850]], [[75, 275, 1725, 850]]]).cpu() EXPECTED_IOU = torch.tensor([[[0.9773, 0.9881, 0.9522], [0.5996, 0.7661, 0.7937], [0.5996, 0.7661, 0.7937]]]) # fmt: on input_boxes = input_boxes.unsqueeze(0) inputs = processor(raw_image, input_boxes=input_boxes, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) iou_scores = outputs.iou_scores.cpu() self.assertTrue(iou_scores.shape == (1, 3, 3)) torch.testing.assert_allclose(iou_scores, EXPECTED_IOU, atol=1e-4, rtol=1e-4) def test_dummy_pipeline_generation(self): generator = pipeline( "mask-generation", model="facebook/sam-vit-base", device=0 if torch.cuda.is_available() else -1 ) raw_image = prepare_image() _ = generator(raw_image, points_per_batch=64)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/sam/test_modeling_tf_sam.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the TensorFlow SAM model. """ from __future__ import annotations import inspect import unittest import numpy as np import requests from transformers import SamConfig, SamMaskDecoderConfig, SamPromptEncoderConfig, SamVisionConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import SamProcessor, TFSamModel if is_vision_available(): from PIL import Image class TFSamPromptEncoderTester: def __init__( self, hidden_size=32, input_image_size=24, patch_size=2, mask_input_channels=4, num_point_embeddings=4, hidden_act="gelu", ): self.hidden_size = hidden_size self.input_image_size = input_image_size self.patch_size = patch_size self.mask_input_channels = mask_input_channels self.num_point_embeddings = num_point_embeddings self.hidden_act = hidden_act def get_config(self): return SamPromptEncoderConfig( image_size=self.input_image_size, patch_size=self.patch_size, mask_input_channels=self.mask_input_channels, hidden_size=self.hidden_size, num_point_embeddings=self.num_point_embeddings, hidden_act=self.hidden_act, ) def prepare_config_and_inputs(self): dummy_points = floats_tensor([self.batch_size, 3, 2]) config = self.get_config() return config, dummy_points class TFSamMaskDecoderTester: def __init__( self, hidden_size=32, hidden_act="relu", mlp_dim=64, num_hidden_layers=2, num_attention_heads=4, attention_downsample_rate=2, num_multimask_outputs=3, iou_head_depth=3, iou_head_hidden_dim=32, layer_norm_eps=1e-6, ): self.hidden_size = hidden_size self.hidden_act = hidden_act self.mlp_dim = mlp_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.attention_downsample_rate = attention_downsample_rate self.num_multimask_outputs = num_multimask_outputs self.iou_head_depth = iou_head_depth self.iou_head_hidden_dim = iou_head_hidden_dim self.layer_norm_eps = layer_norm_eps def get_config(self): return SamMaskDecoderConfig( hidden_size=self.hidden_size, hidden_act=self.hidden_act, mlp_dim=self.mlp_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, attention_downsample_rate=self.attention_downsample_rate, num_multimask_outputs=self.num_multimask_outputs, iou_head_depth=self.iou_head_depth, iou_head_hidden_dim=self.iou_head_hidden_dim, layer_norm_eps=self.layer_norm_eps, ) def prepare_config_and_inputs(self): config = self.get_config() dummy_inputs = { "image_embedding": floats_tensor([self.batch_size, self.hidden_size]), } return config, dummy_inputs class TFSamModelTester: def __init__( self, parent, hidden_size=36, intermediate_size=72, projection_dim=62, output_channels=32, num_hidden_layers=2, num_attention_heads=4, num_channels=3, image_size=24, patch_size=2, hidden_act="gelu", layer_norm_eps=1e-06, dropout=0.0, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, qkv_bias=True, mlp_ratio=4.0, use_abs_pos=True, use_rel_pos=True, rel_pos_zero_init=False, window_size=14, global_attn_indexes=[2, 5, 8, 11], num_pos_feats=16, mlp_dim=None, batch_size=2, ): self.parent = parent self.image_size = image_size self.patch_size = patch_size self.output_channels = output_channels self.num_channels = num_channels self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias self.mlp_ratio = mlp_ratio self.use_abs_pos = use_abs_pos self.use_rel_pos = use_rel_pos self.rel_pos_zero_init = rel_pos_zero_init self.window_size = window_size self.global_attn_indexes = global_attn_indexes self.num_pos_feats = num_pos_feats self.mlp_dim = mlp_dim self.batch_size = batch_size # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 self.prompt_encoder_tester = TFSamPromptEncoderTester() self.mask_decoder_tester = TFSamMaskDecoderTester() def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): vision_config = SamVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, initializer_factor=self.initializer_factor, output_channels=self.output_channels, qkv_bias=self.qkv_bias, mlp_ratio=self.mlp_ratio, use_abs_pos=self.use_abs_pos, use_rel_pos=self.use_rel_pos, rel_pos_zero_init=self.rel_pos_zero_init, window_size=self.window_size, global_attn_indexes=self.global_attn_indexes, num_pos_feats=self.num_pos_feats, mlp_dim=self.mlp_dim, ) prompt_encoder_config = self.prompt_encoder_tester.get_config() mask_decoder_config = self.mask_decoder_tester.get_config() return SamConfig( vision_config=vision_config, prompt_encoder_config=prompt_encoder_config, mask_decoder_config=mask_decoder_config, ) def create_and_check_model(self, config, pixel_values): model = TFSamModel(config=config) result = model(pixel_values) self.parent.assertEqual(result.iou_scores.shape, (self.batch_size, 1, 3)) self.parent.assertEqual(result.pred_masks.shape[:3], (self.batch_size, 1, 3)) def create_and_check_get_image_features(self, config, pixel_values): model = TFSamModel(config=config) result = model.get_image_embeddings(pixel_values) self.parent.assertEqual(result[0].shape, (self.output_channels, 12, 12)) def create_and_check_get_image_hidden_states(self, config, pixel_values): model = TFSamModel(config=config) result = model.vision_encoder( pixel_values, output_hidden_states=True, return_dict=True, ) # after computing the convolutional features expected_hidden_states_shape = (self.batch_size, 12, 12, 36) self.parent.assertEqual(len(result[1]), self.num_hidden_layers + 1) self.parent.assertEqual(result[1][0].shape, expected_hidden_states_shape) result = model.vision_encoder( pixel_values, output_hidden_states=True, return_dict=False, ) # after computing the convolutional features expected_hidden_states_shape = (self.batch_size, 12, 12, 36) self.parent.assertEqual(len(result[1]), self.num_hidden_layers + 1) self.parent.assertEqual(result[1][0].shape, expected_hidden_states_shape) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFSamModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as SAM's vision encoder does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (TFSamModel,) if is_tf_available() else () pipeline_model_mapping = ( {"feature-extraction": TFSamModel, "mask-generation": TFSamModel} if is_tf_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False test_onnx = False # TODO: Fix me @Arthur: `run_batch_test` in `tests/test_pipeline_mixin.py` not working def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return True def setUp(self): self.model_tester = TFSamModelTester(self) self.vision_config_tester = ConfigTester(self, config_class=SamVisionConfig, has_text_modality=False) self.prompt_encoder_config_tester = ConfigTester( self, config_class=SamPromptEncoderConfig, has_text_modality=False, num_attention_heads=12, num_hidden_layers=2, ) self.mask_decoder_config_tester = ConfigTester( self, config_class=SamMaskDecoderConfig, has_text_modality=False ) def test_config(self): self.vision_config_tester.run_common_tests() self.prompt_encoder_config_tester.run_common_tests() self.mask_decoder_config_tester.run_common_tests() @unittest.skip(reason="SAM's vision encoder does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, tf.keras.layers.Dense)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_get_image_features(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_get_image_features(*config_and_inputs) def test_image_hidden_states(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_get_image_hidden_states(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True expected_vision_attention_shape = ( self.model_tester.batch_size * self.model_tester.num_attention_heads, 196, 196, ) expected_mask_decoder_attention_shape = (self.model_tester.batch_size, 1, 144, 32) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) vision_attentions = outputs.vision_attentions self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers) mask_decoder_attentions = outputs.mask_decoder_attentions self.assertEqual(len(mask_decoder_attentions), self.model_tester.mask_decoder_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) vision_attentions = outputs.vision_attentions self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers) mask_decoder_attentions = outputs.mask_decoder_attentions self.assertEqual(len(mask_decoder_attentions), self.model_tester.mask_decoder_tester.num_hidden_layers) self.assertListEqual( list(vision_attentions[0].shape[-4:]), list(expected_vision_attention_shape), ) self.assertListEqual( list(mask_decoder_attentions[0].shape[-4:]), list(expected_mask_decoder_attention_shape), ) @unittest.skip(reason="Hidden_states is tested in create_and_check_model tests") def test_hidden_states_output(self): pass @slow def test_model_from_pretrained(self): model = TFSamModel.from_pretrained("facebook/sam-vit-base") # sam-vit-huge blows out our memory self.assertIsNotNone(model) def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=5e-4, name="outputs", attributes=None): super().check_pt_tf_outputs( tf_outputs=tf_outputs, pt_outputs=pt_outputs, model_class=model_class, tol=tol, name=name, attributes=attributes, ) def prepare_image(): img_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") return raw_image def prepare_dog_img(): img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/dog-sam.png" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") return raw_image @require_tf @slow class TFSamModelIntegrationTest(unittest.TestCase): def test_inference_mask_generation_no_point(self): model = TFSamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") raw_image = prepare_image() inputs = processor(images=raw_image, return_tensors="tf") outputs = model(**inputs) scores = tf.squeeze(outputs.iou_scores) masks = outputs.pred_masks[0, 0, 0, 0, :3] self.assertTrue(np.allclose(scores[-1].numpy(), np.array(0.4515), atol=2e-4)) self.assertTrue(np.allclose(masks.numpy(), np.array([-4.1807, -3.4949, -3.4483]), atol=1e-2)) def test_inference_mask_generation_one_point_one_bb(self): model = TFSamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") raw_image = prepare_image() input_boxes = [[[650, 900, 1000, 1250]]] input_points = [[[820, 1080]]] inputs = processor(images=raw_image, input_boxes=input_boxes, input_points=input_points, return_tensors="tf") outputs = model(**inputs) scores = tf.squeeze(outputs.iou_scores) masks = outputs.pred_masks[0, 0, 0, 0, :3] self.assertTrue(np.allclose(scores[-1], np.array(0.9566), atol=2e-4)) self.assertTrue(np.allclose(masks.numpy(), np.array([-12.7657, -12.3683, -12.5985]), atol=2e-2)) def test_inference_mask_generation_batched_points_batched_images(self): model = TFSamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") raw_image = prepare_image() input_points = [ [[[820, 1080]], [[820, 1080]], [[820, 1080]], [[820, 1080]]], [[[510, 1080]], [[820, 1080]], [[820, 1080]], [[820, 1080]]], ] inputs = processor(images=[raw_image, raw_image], input_points=input_points, return_tensors="tf") outputs = model(**inputs) scores = tf.squeeze(outputs.iou_scores) masks = outputs.pred_masks[0, 0, 0, 0, :3] EXPECTED_SCORES = np.array( [ [ [0.6765, 0.9379, 0.8803], [0.6765, 0.9379, 0.8803], [0.6765, 0.9379, 0.8803], [0.6765, 0.9379, 0.8803], ], [ [0.3317, 0.7264, 0.7646], [0.6765, 0.9379, 0.8803], [0.6765, 0.9379, 0.8803], [0.6765, 0.9379, 0.8803], ], ] ) EXPECTED_MASKS = np.array([-2.8552, -2.7990, -2.9612]) self.assertTrue(np.allclose(scores.numpy(), EXPECTED_SCORES, atol=1e-3)) self.assertTrue(np.allclose(masks.numpy(), EXPECTED_MASKS, atol=3e-2)) def test_inference_mask_generation_one_point_one_bb_zero(self): model = TFSamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") raw_image = prepare_image() input_boxes = [[[620, 900, 1000, 1255]]] input_points = [[[820, 1080]]] labels = [[0]] inputs = processor( images=raw_image, input_boxes=input_boxes, input_points=input_points, input_labels=labels, return_tensors="tf", ) outputs = model(**inputs) scores = tf.squeeze(outputs.iou_scores) self.assertTrue(np.allclose(scores[-1].numpy(), np.array(0.7894), atol=1e-4)) def test_inference_mask_generation_one_point(self): model = TFSamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") raw_image = prepare_image() input_points = [[[400, 650]]] input_labels = [[1]] inputs = processor(images=raw_image, input_points=input_points, input_labels=input_labels, return_tensors="tf") outputs = model(**inputs) scores = tf.squeeze(outputs.iou_scores) self.assertTrue(np.allclose(scores[-1], np.array(0.9675), atol=1e-4)) # With no label input_points = [[[400, 650]]] inputs = processor(images=raw_image, input_points=input_points, return_tensors="tf") outputs = model(**inputs) scores = tf.squeeze(outputs.iou_scores) self.assertTrue(np.allclose(scores[-1].numpy(), np.array(0.9675), atol=1e-4)) def test_inference_mask_generation_two_points(self): model = TFSamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") raw_image = prepare_image() input_points = [[[400, 650], [800, 650]]] input_labels = [[1, 1]] inputs = processor(images=raw_image, input_points=input_points, input_labels=input_labels, return_tensors="tf") outputs = model(**inputs) scores = tf.squeeze(outputs.iou_scores) self.assertTrue(np.allclose(scores[-1].numpy(), np.array(0.9762), atol=1e-4)) # no labels inputs = processor(images=raw_image, input_points=input_points, return_tensors="tf") outputs = model(**inputs) scores = tf.squeeze(outputs.iou_scores) self.assertTrue(np.allclose(scores[-1].numpy(), np.array(0.9762), atol=1e-4)) def test_inference_mask_generation_two_points_batched(self): model = TFSamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") raw_image = prepare_image() input_points = [[[400, 650], [800, 650]], [[400, 650]]] input_labels = [[1, 1], [1]] inputs = processor( images=[raw_image, raw_image], input_points=input_points, input_labels=input_labels, return_tensors="tf" ) outputs = model(**inputs) scores = tf.squeeze(outputs.iou_scores) self.assertTrue(np.allclose(scores[0][-1].numpy(), np.array(0.9762), atol=1e-4)) self.assertTrue(np.allclose(scores[1][-1], np.array(0.9637), atol=1e-4)) def test_inference_mask_generation_one_box(self): model = TFSamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") raw_image = prepare_image() input_boxes = [[[75, 275, 1725, 850]]] inputs = processor(images=raw_image, input_boxes=input_boxes, return_tensors="tf") outputs = model(**inputs) scores = tf.squeeze(outputs.iou_scores) self.assertTrue(np.allclose(scores[-1].numpy(), np.array(0.7937), atol=1e-4)) def test_inference_mask_generation_batched_image_one_point(self): model = TFSamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") raw_image = prepare_image() raw_dog_image = prepare_dog_img() input_points = [[[820, 1080]], [[220, 470]]] inputs = processor(images=[raw_image, raw_dog_image], input_points=input_points, return_tensors="tf") outputs = model(**inputs) scores_batched = tf.squeeze(outputs.iou_scores) input_points = [[[220, 470]]] inputs = processor(images=raw_dog_image, input_points=input_points, return_tensors="tf") outputs = model(**inputs) scores_single = tf.squeeze(outputs.iou_scores) self.assertTrue(np.allclose(scores_batched[1, :].numpy(), scores_single.numpy(), atol=1e-4)) def test_inference_mask_generation_two_points_point_batch(self): model = TFSamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") raw_image = prepare_image() # fmt: off input_points = tf.convert_to_tensor([[[400, 650]], [[220, 470]]]) # fmt: on input_points = tf.expand_dims(input_points, 0) inputs = processor(raw_image, input_points=input_points, return_tensors="tf") outputs = model(**inputs) iou_scores = outputs.iou_scores self.assertTrue(iou_scores.shape == (1, 2, 3)) self.assertTrue( np.allclose( iou_scores.numpy(), np.array([[[0.9105, 0.9825, 0.9675], [0.7646, 0.7943, 0.7774]]]), atol=1e-4, rtol=1e-4, ) ) def test_inference_mask_generation_three_boxes_point_batch(self): model = TFSamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") raw_image = prepare_image() # fmt: off input_boxes = tf.convert_to_tensor([[[620, 900, 1000, 1255]], [[75, 275, 1725, 850]], [[75, 275, 1725, 850]]]) EXPECTED_IOU = np.array([[[0.9773, 0.9881, 0.9522], [0.5996, 0.7661, 0.7937], [0.5996, 0.7661, 0.7937]]]) # fmt: on input_boxes = tf.expand_dims(input_boxes, 0) inputs = processor(raw_image, input_boxes=input_boxes, return_tensors="tf") outputs = model(**inputs) iou_scores = outputs.iou_scores self.assertTrue(iou_scores.shape == (1, 3, 3)) self.assertTrue(np.allclose(iou_scores.numpy(), EXPECTED_IOU, atol=1e-4, rtol=1e-4))
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/sam/test_processor_sam.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class SamProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() image_processor = SamImageProcessor() processor = SamProcessor(image_processor) processor.save_pretrained(self.tmpdirname) def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor def tearDown(self): shutil.rmtree(self.tmpdirname) def prepare_image_inputs(self): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. """ image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] return image_inputs def test_save_load_pretrained_additional_features(self): processor = SamProcessor(image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = SamProcessor.from_pretrained(self.tmpdirname, do_normalize=False, padding_value=1.0) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, SamImageProcessor) def test_image_processor(self): image_processor = self.get_image_processor() processor = SamProcessor(image_processor=image_processor) image_input = self.prepare_image_inputs() input_feat_extract = image_processor(image_input, return_tensors="np") input_processor = processor(images=image_input, return_tensors="np") input_feat_extract.pop("original_sizes") # pop original_sizes as it is popped in the processor input_feat_extract.pop("reshaped_input_sizes") # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) @require_torch def test_post_process_masks(self): image_processor = self.get_image_processor() processor = SamProcessor(image_processor=image_processor) dummy_masks = [torch.ones((1, 3, 5, 5))] original_sizes = [[1764, 2646]] reshaped_input_size = [[683, 1024]] masks = processor.post_process_masks(dummy_masks, original_sizes, reshaped_input_size) self.assertEqual(masks[0].shape, (1, 3, 1764, 2646)) masks = processor.post_process_masks( dummy_masks, torch.tensor(original_sizes), torch.tensor(reshaped_input_size) ) self.assertEqual(masks[0].shape, (1, 3, 1764, 2646)) # should also work with np dummy_masks = [np.ones((1, 3, 5, 5))] masks = processor.post_process_masks(dummy_masks, np.array(original_sizes), np.array(reshaped_input_size)) self.assertEqual(masks[0].shape, (1, 3, 1764, 2646)) dummy_masks = [[1, 0], [0, 1]] with self.assertRaises(ValueError): masks = processor.post_process_masks(dummy_masks, np.array(original_sizes), np.array(reshaped_input_size)) @require_vision @require_tf class TFSamProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() image_processor = SamImageProcessor() processor = SamProcessor(image_processor) processor.save_pretrained(self.tmpdirname) def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor def tearDown(self): shutil.rmtree(self.tmpdirname) def prepare_image_inputs(self): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. """ image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] return image_inputs def test_save_load_pretrained_additional_features(self): processor = SamProcessor(image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = SamProcessor.from_pretrained(self.tmpdirname, do_normalize=False, padding_value=1.0) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, SamImageProcessor) def test_image_processor(self): image_processor = self.get_image_processor() processor = SamProcessor(image_processor=image_processor) image_input = self.prepare_image_inputs() input_feat_extract = image_processor(image_input, return_tensors="np") input_processor = processor(images=image_input, return_tensors="np") input_feat_extract.pop("original_sizes") # pop original_sizes as it is popped in the processor input_feat_extract.pop("reshaped_input_sizes") # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) @require_tf def test_post_process_masks(self): image_processor = self.get_image_processor() processor = SamProcessor(image_processor=image_processor) dummy_masks = [tf.ones((1, 3, 5, 5))] original_sizes = [[1764, 2646]] reshaped_input_size = [[683, 1024]] masks = processor.post_process_masks(dummy_masks, original_sizes, reshaped_input_size, return_tensors="tf") self.assertEqual(masks[0].shape, (1, 3, 1764, 2646)) masks = processor.post_process_masks( dummy_masks, tf.convert_to_tensor(original_sizes), tf.convert_to_tensor(reshaped_input_size), return_tensors="tf", ) self.assertEqual(masks[0].shape, (1, 3, 1764, 2646)) # should also work with np dummy_masks = [np.ones((1, 3, 5, 5))] masks = processor.post_process_masks( dummy_masks, np.array(original_sizes), np.array(reshaped_input_size), return_tensors="tf" ) self.assertEqual(masks[0].shape, (1, 3, 1764, 2646)) dummy_masks = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError): masks = processor.post_process_masks( dummy_masks, np.array(original_sizes), np.array(reshaped_input_size), return_tensors="tf" ) @require_vision @require_torchvision class SamProcessorEquivalenceTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() image_processor = SamImageProcessor() processor = SamProcessor(image_processor) processor.save_pretrained(self.tmpdirname) def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor def tearDown(self): shutil.rmtree(self.tmpdirname) def prepare_image_inputs(self): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. """ image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def test_post_process_masks_equivalence(self): image_processor = self.get_image_processor() processor = SamProcessor(image_processor=image_processor) dummy_masks = np.random.randint(0, 2, size=(1, 3, 5, 5)).astype(np.float32) tf_dummy_masks = [tf.convert_to_tensor(dummy_masks)] pt_dummy_masks = [torch.tensor(dummy_masks)] original_sizes = [[1764, 2646]] reshaped_input_size = [[683, 1024]] tf_masks = processor.post_process_masks( tf_dummy_masks, original_sizes, reshaped_input_size, return_tensors="tf" ) pt_masks = processor.post_process_masks( pt_dummy_masks, original_sizes, reshaped_input_size, return_tensors="pt" ) self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy())) @is_pt_tf_cross_test def test_image_processor_equivalence(self): image_processor = self.get_image_processor() processor = SamProcessor(image_processor=image_processor) image_input = self.prepare_image_inputs() pt_input_feat_extract = image_processor(image_input, return_tensors="pt")["pixel_values"].numpy() pt_input_processor = processor(images=image_input, return_tensors="pt")["pixel_values"].numpy() tf_input_feat_extract = image_processor(image_input, return_tensors="tf")["pixel_values"].numpy() tf_input_processor = processor(images=image_input, return_tensors="tf")["pixel_values"].numpy() self.assertTrue(np.allclose(pt_input_feat_extract, pt_input_processor)) self.assertTrue(np.allclose(pt_input_feat_extract, tf_input_feat_extract)) self.assertTrue(np.allclose(pt_input_feat_extract, tf_input_processor))
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/rag/test_tokenization_rag.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class RagTokenizerTest(TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() self.retrieval_vector_size = 8 # DPR tok vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] dpr_tokenizer_path = os.path.join(self.tmpdirname, "dpr_tokenizer") os.makedirs(dpr_tokenizer_path, exist_ok=True) self.vocab_file = os.path.join(dpr_tokenizer_path, DPR_VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) # BART tok vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] self.special_tokens_map = {"unk_token": "<unk>"} bart_tokenizer_path = os.path.join(self.tmpdirname, "bart_tokenizer") os.makedirs(bart_tokenizer_path, exist_ok=True) self.vocab_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) def get_dpr_tokenizer(self) -> DPRQuestionEncoderTokenizer: return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer")) def get_bart_tokenizer(self) -> BartTokenizer: return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, "bart_tokenizer")) def tearDown(self): shutil.rmtree(self.tmpdirname) @require_tokenizers def test_save_load_pretrained_with_saved_config(self): save_dir = os.path.join(self.tmpdirname, "rag_tokenizer") rag_config = RagConfig(question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict()) rag_tokenizer = RagTokenizer(question_encoder=self.get_dpr_tokenizer(), generator=self.get_bart_tokenizer()) rag_config.save_pretrained(save_dir) rag_tokenizer.save_pretrained(save_dir) new_rag_tokenizer = RagTokenizer.from_pretrained(save_dir, config=rag_config) self.assertIsInstance(new_rag_tokenizer.question_encoder, DPRQuestionEncoderTokenizerFast) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab(), rag_tokenizer.question_encoder.get_vocab()) self.assertIsInstance(new_rag_tokenizer.generator, BartTokenizerFast) self.assertEqual(new_rag_tokenizer.generator.get_vocab(), rag_tokenizer.generator.get_vocab()) @slow def test_pretrained_token_nq_tokenizer(self): tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-nq") input_strings = [ "who got the first nobel prize in physics", "when is the next deadpool movie being released", "which mode is used for short wave broadcast service", "who is the owner of reading football club", "when is the next scandal episode coming out", "when is the last time the philadelphia won the superbowl", "what is the most current adobe flash player version", "how many episodes are there in dragon ball z", "what is the first step in the evolution of the eye", "where is gall bladder situated in human body", "what is the main mineral in lithium batteries", "who is the president of usa right now", "where do the greasers live in the outsiders", "panda is a national animal of which country", "what is the name of manchester united stadium", ] input_dict = tokenizer(input_strings) self.assertIsNotNone(input_dict) @slow def test_pretrained_sequence_nq_tokenizer(self): tokenizer = RagTokenizer.from_pretrained("facebook/rag-sequence-nq") input_strings = [ "who got the first nobel prize in physics", "when is the next deadpool movie being released", "which mode is used for short wave broadcast service", "who is the owner of reading football club", "when is the next scandal episode coming out", "when is the last time the philadelphia won the superbowl", "what is the most current adobe flash player version", "how many episodes are there in dragon ball z", "what is the first step in the evolution of the eye", "where is gall bladder situated in human body", "what is the main mineral in lithium batteries", "who is the president of usa right now", "where do the greasers live in the outsiders", "panda is a national animal of which country", "what is the name of manchester united stadium", ] input_dict = tokenizer(input_strings) self.assertIsNotNone(input_dict)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/rag/test_retrieval_rag.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class RagRetrieverTest(TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() self.retrieval_vector_size = 8 # DPR tok vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] dpr_tokenizer_path = os.path.join(self.tmpdirname, "dpr_tokenizer") os.makedirs(dpr_tokenizer_path, exist_ok=True) self.vocab_file = os.path.join(dpr_tokenizer_path, DPR_VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) # BART tok vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] self.special_tokens_map = {"unk_token": "<unk>"} bart_tokenizer_path = os.path.join(self.tmpdirname, "bart_tokenizer") os.makedirs(bart_tokenizer_path, exist_ok=True) self.vocab_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) def get_dpr_tokenizer(self) -> DPRQuestionEncoderTokenizer: return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer")) def get_dpr_ctx_encoder_tokenizer(self) -> DPRContextEncoderTokenizer: return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer")) def get_bart_tokenizer(self) -> BartTokenizer: return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, "bart_tokenizer")) def tearDown(self): shutil.rmtree(self.tmpdirname) def get_dummy_dataset(self): dataset = Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size), 2 * np.ones(self.retrieval_vector_size)], } ) dataset.add_faiss_index("embeddings", string_factory="Flat", metric_type=faiss.METRIC_INNER_PRODUCT) return dataset def get_dummy_canonical_hf_index_retriever(self): dataset = self.get_dummy_dataset() config = RagConfig( retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), ) with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset: mock_load_dataset.return_value = dataset retriever = RagRetriever( config, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), ) return retriever def get_dummy_custom_hf_index_retriever(self, from_disk: bool): dataset = self.get_dummy_dataset() config = RagConfig( retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), index_name="custom", ) if from_disk: config.passages_path = os.path.join(self.tmpdirname, "dataset") config.index_path = os.path.join(self.tmpdirname, "index.faiss") dataset.get_index("embeddings").save(os.path.join(self.tmpdirname, "index.faiss")) dataset.drop_index("embeddings") dataset.save_to_disk(os.path.join(self.tmpdirname, "dataset")) del dataset retriever = RagRetriever( config, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), ) else: retriever = RagRetriever( config, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), index=CustomHFIndex(config.retrieval_vector_size, dataset), ) return retriever def get_dummy_legacy_index_retriever(self): dataset = Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size + 1), 2 * np.ones(self.retrieval_vector_size + 1)], } ) dataset.add_faiss_index("embeddings", string_factory="Flat", metric_type=faiss.METRIC_INNER_PRODUCT) index_file_name = os.path.join(self.tmpdirname, "hf_bert_base.hnswSQ8_correct_phi_128.c_index") dataset.save_faiss_index("embeddings", index_file_name + ".index.dpr") pickle.dump(dataset["id"], open(index_file_name + ".index_meta.dpr", "wb")) passages_file_name = os.path.join(self.tmpdirname, "psgs_w100.tsv.pkl") passages = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset} pickle.dump(passages, open(passages_file_name, "wb")) config = RagConfig( retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), index_name="legacy", index_path=self.tmpdirname, ) retriever = RagRetriever( config, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer() ) return retriever def test_canonical_hf_index_retriever_retrieve(self): n_docs = 1 retriever = self.get_dummy_canonical_hf_index_retriever() hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs) self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size)) self.assertEqual(len(doc_dicts), 2) self.assertEqual(sorted(doc_dicts[0]), ["embeddings", "id", "text", "title"]) self.assertEqual(len(doc_dicts[0]["id"]), n_docs) self.assertEqual(doc_dicts[0]["id"][0], "1") # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0], "0") # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist(), [[1], [0]]) def test_canonical_hf_index_retriever_save_and_from_pretrained(self): retriever = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset: mock_load_dataset.return_value = self.get_dummy_dataset() retriever.save_pretrained(tmp_dirname) retriever = RagRetriever.from_pretrained(tmp_dirname) self.assertIsInstance(retriever, RagRetriever) hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) out = retriever.retrieve(hidden_states, n_docs=1) self.assertTrue(out is not None) def test_custom_hf_index_retriever_retrieve(self): n_docs = 1 retriever = self.get_dummy_custom_hf_index_retriever(from_disk=False) hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs) self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size)) self.assertEqual(len(doc_dicts), 2) self.assertEqual(sorted(doc_dicts[0]), ["embeddings", "id", "text", "title"]) self.assertEqual(len(doc_dicts[0]["id"]), n_docs) self.assertEqual(doc_dicts[0]["id"][0], "1") # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0], "0") # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist(), [[1], [0]]) def test_custom_hf_index_retriever_save_and_from_pretrained(self): retriever = self.get_dummy_custom_hf_index_retriever(from_disk=False) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(tmp_dirname) retriever = RagRetriever.from_pretrained(tmp_dirname) self.assertIsInstance(retriever, RagRetriever) hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) out = retriever.retrieve(hidden_states, n_docs=1) self.assertTrue(out is not None) def test_custom_hf_index_retriever_retrieve_from_disk(self): n_docs = 1 retriever = self.get_dummy_custom_hf_index_retriever(from_disk=True) hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs) self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size)) self.assertEqual(len(doc_dicts), 2) self.assertEqual(sorted(doc_dicts[0]), ["embeddings", "id", "text", "title"]) self.assertEqual(len(doc_dicts[0]["id"]), n_docs) self.assertEqual(doc_dicts[0]["id"][0], "1") # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0], "0") # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist(), [[1], [0]]) def test_custom_hf_index_retriever_save_and_from_pretrained_from_disk(self): retriever = self.get_dummy_custom_hf_index_retriever(from_disk=True) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(tmp_dirname) retriever = RagRetriever.from_pretrained(tmp_dirname) self.assertIsInstance(retriever, RagRetriever) hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) out = retriever.retrieve(hidden_states, n_docs=1) self.assertTrue(out is not None) def test_legacy_index_retriever_retrieve(self): n_docs = 1 retriever = self.get_dummy_legacy_index_retriever() hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs) self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size)) self.assertEqual(len(doc_dicts), 2) self.assertEqual(sorted(doc_dicts[0]), ["text", "title"]) self.assertEqual(len(doc_dicts[0]["text"]), n_docs) self.assertEqual(doc_dicts[0]["text"][0], "bar") # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["text"][0], "foo") # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist(), [[1], [0]]) def test_legacy_hf_index_retriever_save_and_from_pretrained(self): retriever = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(tmp_dirname) retriever = RagRetriever.from_pretrained(tmp_dirname) self.assertIsInstance(retriever, RagRetriever) hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) out = retriever.retrieve(hidden_states, n_docs=1) self.assertTrue(out is not None) @require_torch @require_tokenizers @require_sentencepiece def test_hf_index_retriever_call(self): import torch n_docs = 1 retriever = self.get_dummy_canonical_hf_index_retriever() question_input_ids = [[5, 7], [10, 11]] hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) out = retriever(question_input_ids, hidden_states, prefix=retriever.config.generator.prefix, n_docs=n_docs) context_input_ids, context_attention_mask, retrieved_doc_embeds = ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size)) self.assertIsInstance(context_input_ids, list) self.assertIsInstance(context_attention_mask, list) self.assertIsInstance(retrieved_doc_embeds, np.ndarray) out = retriever( question_input_ids, hidden_states, prefix=retriever.config.generator.prefix, n_docs=n_docs, return_tensors="pt", ) context_input_ids, context_attention_mask, retrieved_doc_embeds, doc_ids = ( # noqa: F841 out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], out["doc_ids"], ) self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size)) self.assertIsInstance(context_input_ids, torch.Tensor) self.assertIsInstance(context_attention_mask, torch.Tensor) self.assertIsInstance(retrieved_doc_embeds, torch.Tensor) @require_torch @require_tokenizers @require_sentencepiece def test_custom_hf_index_end2end_retriever_call(self): context_encoder_tokenizer = self.get_dpr_ctx_encoder_tokenizer() n_docs = 1 retriever = self.get_dummy_custom_hf_index_retriever(from_disk=False) retriever.set_ctx_encoder_tokenizer(context_encoder_tokenizer) question_input_ids = [[5, 7], [10, 11]] hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) out = retriever(question_input_ids, hidden_states, prefix=retriever.config.generator.prefix, n_docs=n_docs) self.assertEqual( len(out), 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask")), True ) # check for doc token related keys in dictionary.
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/rag/test_modeling_tf_rag.py
from __future__ import annotations import json import os import shutil import tempfile import unittest from unittest.mock import patch import numpy as np from transformers import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.tokenization_dpr import DPRQuestionEncoderTokenizer from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property, is_datasets_available, is_faiss_available, is_tf_available if is_tf_available() and is_datasets_available() and is_faiss_available(): import faiss import tensorflow as tf from datasets import Dataset from transformers import ( AutoConfig, RagConfig, RagRetriever, RagTokenizer, TFAutoModel, TFAutoModelForSeq2SeqLM, TFRagModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) from transformers.modeling_tf_outputs import TFBaseModelOutput from ..bart.test_modeling_tf_bart import TFBartModelTester from ..dpr.test_modeling_tf_dpr import TFDPRModelTester TOLERANCE = 1e-3 def require_retrieval(test_case): """ Decorator marking a test that requires a set of dependencies necessary for pefrorm retrieval with [`RagRetriever`]. These tests are skipped when respective libraries are not installed. """ if not (is_tf_available() and is_datasets_available() and is_faiss_available()): test_case = unittest.skip("test requires tensorflow, datasets and faiss")(test_case) return test_case @require_tf @require_retrieval @require_sentencepiece class TFRagTestMixin: all_model_classes = ( (TFRagModel, TFRagTokenForGeneration, TFRagSequenceForGeneration) if is_tf_available() and is_datasets_available() and is_faiss_available() else () ) all_generative_model_classes = ( (TFRagTokenForGeneration, TFRagSequenceForGeneration) if is_tf_available() and is_datasets_available() and is_faiss_available() else () ) retrieval_vector_size = 32 n_docs = 3 max_combined_length = 16 def setUp(self): self.tmpdirname = tempfile.mkdtemp() # DPR tok vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] dpr_tokenizer_path = os.path.join(self.tmpdirname, "dpr_tokenizer") os.makedirs(dpr_tokenizer_path, exist_ok=True) self.vocab_file = os.path.join(dpr_tokenizer_path, DPR_VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) # BART tok vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] self.special_tokens_map = {"unk_token": "<unk>"} bart_tokenizer_path = os.path.join(self.tmpdirname, "bart_tokenizer") os.makedirs(bart_tokenizer_path, exist_ok=True) self.vocab_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) @cached_property def dpr_tokenizer(self) -> DPRQuestionEncoderTokenizer: return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer")) @cached_property def bart_tokenizer(self) -> BartTokenizer: return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, "bart_tokenizer")) def tearDown(self): shutil.rmtree(self.tmpdirname) def get_retriever(self, config): dataset = Dataset.from_dict( { "id": ["0", "1", "3"], "text": ["foo", "bar", "qux"], "title": ["Foo", "Bar", "Qux"], "embeddings": [ np.ones(self.retrieval_vector_size), 2 * np.ones(self.retrieval_vector_size), 3 * np.ones(self.retrieval_vector_size), ], } ) dataset.add_faiss_index("embeddings", string_factory="Flat", metric_type=faiss.METRIC_INNER_PRODUCT) tokenizer = self.bart_tokenizer with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset: mock_load_dataset.return_value = dataset retriever = RagRetriever( config, question_encoder_tokenizer=self.dpr_tokenizer, generator_tokenizer=tokenizer, ) return retriever def check_model_with_retriever( self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs ): self.assertIsNotNone(config.question_encoder) self.assertIsNotNone(config.generator) for model_class in self.all_model_classes: model = model_class(config, retriever=self.get_retriever(config)) self.assertTrue(model.config.is_encoder_decoder) outputs = model( input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) # logits self.assertEqual( outputs.logits.shape, (self.n_docs * decoder_input_ids.shape[0], decoder_input_ids.shape[1], config.generator.vocab_size), ) # generator encoder last hidden states self.assertEqual( outputs.generator_enc_last_hidden_state.shape, (self.n_docs * decoder_input_ids.shape[0], self.max_combined_length, config.generator.hidden_size), ) # doc scores self.assertEqual(outputs.doc_scores.shape, (input_ids.shape[0], self.n_docs)) def check_model_generate_from_context_input_ids( self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs ): self.assertIsNotNone(config.question_encoder) self.assertIsNotNone(config.generator) retriever = self.get_retriever(config) for i, model_class in enumerate(self.all_generative_model_classes): model = model_class(config) self.assertTrue(model.config.is_encoder_decoder) question_hidden_states = model.question_encoder(input_ids, attention_mask=attention_mask)[0] out = retriever( input_ids, question_hidden_states.numpy(), prefix=config.generator.prefix, return_tensors="tf", ) context_input_ids, context_attention_mask, retrieved_doc_embeds = ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) retrieved_doc_embeds = tf.cast(retrieved_doc_embeds, tf.float32) # compute doc_scores doc_scores = tf.squeeze( tf.matmul(tf.expand_dims(question_hidden_states, axis=[1]), retrieved_doc_embeds, transpose_b=True), axis=[1], ) outputs = model.generate( context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, doc_scores=doc_scores, ) self.assertIsNotNone(outputs) def check_model_generate( self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs ): self.assertIsNotNone(config.question_encoder) self.assertIsNotNone(config.generator) for model_class in self.all_generative_model_classes: model = model_class(config, retriever=self.get_retriever(config)) self.assertTrue(model.config.is_encoder_decoder) input_ids = tf.cast(input_ids, tf.int32) outputs = model.generate( input_ids=input_ids, num_beams=2, num_return_sequences=2, decoder_start_token_id=config.generator.eos_token_id, max_new_tokens=5, ) self.assertIsNotNone(outputs) def check_model_without_retriever( self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs ): self.assertIsNotNone(config.question_encoder) self.assertIsNotNone(config.generator) retriever = self.get_retriever(config) for model_class in self.all_model_classes: model = model_class(config) self.assertTrue(model.config.is_encoder_decoder) question_hidden_states = model.question_encoder(input_ids, attention_mask=attention_mask)[0] out = retriever( input_ids, question_hidden_states.numpy(), prefix=config.generator.prefix, return_tensors="tf", ) context_input_ids, context_attention_mask, retrieved_doc_embeds = ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) retrieved_doc_embeds = tf.cast(retrieved_doc_embeds, tf.float32) # compute doc_scores doc_scores = tf.squeeze( tf.matmul(tf.expand_dims(question_hidden_states, axis=[1]), retrieved_doc_embeds, transpose_b=True), axis=[1], ) outputs = model( input_ids=None, context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, doc_scores=doc_scores, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) # logits self.assertEqual( outputs.logits.shape, (self.n_docs * decoder_input_ids.shape[0], decoder_input_ids.shape[1], config.generator.vocab_size), ) # generator encoder last hidden states self.assertEqual( outputs.generator_enc_last_hidden_state.shape, (self.n_docs * decoder_input_ids.shape[0], self.max_combined_length, config.generator.hidden_size), ) # doc scores self.assertEqual(outputs.doc_scores.shape, (input_ids.shape[0], self.n_docs)) def check_model_custom_n_docs( self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, n_docs, **kwargs ): self.assertIsNotNone(config.question_encoder) self.assertIsNotNone(config.generator) retriever = self.get_retriever(config) for model_class in self.all_model_classes: model = model_class(config) self.assertTrue(model.config.is_encoder_decoder) question_hidden_states = model.question_encoder(input_ids, attention_mask=attention_mask)[0] out = retriever( input_ids, question_hidden_states.numpy(), prefix=config.generator.prefix, return_tensors="tf", n_docs=n_docs, ) context_input_ids, context_attention_mask, retrieved_doc_embeds = ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) retrieved_doc_embeds = tf.cast(retrieved_doc_embeds, tf.float32) # compute doc_scores doc_scores = tf.squeeze( tf.matmul(tf.expand_dims(question_hidden_states, axis=[1]), retrieved_doc_embeds, transpose_b=True), axis=[1], ) outputs = model( input_ids=None, context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, doc_scores=doc_scores, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, n_docs=n_docs, ) # logits self.assertEqual( outputs.logits.shape, (n_docs * decoder_input_ids.shape[0], decoder_input_ids.shape[1], config.generator.vocab_size), ) # generator encoder last hidden states self.assertEqual( outputs.generator_enc_last_hidden_state.shape, (n_docs * decoder_input_ids.shape[0], self.max_combined_length, config.generator.hidden_size), ) # doc scores self.assertEqual(outputs.doc_scores.shape, (input_ids.shape[0], n_docs)) def check_model_with_mismatch_n_docs_value( self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, retriever_n_docs, generator_n_docs, **kwargs, ): self.assertIsNotNone(config.question_encoder) self.assertIsNotNone(config.generator) retriever = self.get_retriever(config) for model_class in self.all_model_classes: model = model_class(config) self.assertTrue(model.config.is_encoder_decoder) question_hidden_states = model.question_encoder(input_ids, attention_mask=attention_mask)[0] out = retriever( input_ids, question_hidden_states.numpy(), prefix=config.generator.prefix, return_tensors="tf", n_docs=retriever_n_docs, ) context_input_ids, context_attention_mask, retrieved_doc_embeds = ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) retrieved_doc_embeds = tf.cast(retrieved_doc_embeds, tf.float32) # compute doc_scores doc_scores = tf.squeeze( tf.matmul(tf.expand_dims(question_hidden_states, axis=[1]), retrieved_doc_embeds, transpose_b=True), axis=[1], ) self.assertRaises( AssertionError, model.__call__, input_ids=None, context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, doc_scores=doc_scores, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, n_docs=generator_n_docs, ) def check_model_with_encoder_outputs( self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs ): self.assertIsNotNone(config.question_encoder) self.assertIsNotNone(config.generator) for model_class in self.all_model_classes: model = model_class(config, retriever=self.get_retriever(config)) self.assertTrue(model.config.is_encoder_decoder) outputs = model( input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) encoder_outputs = TFBaseModelOutput(outputs.generator_enc_last_hidden_state) # run only generator outputs = model( input_ids=None, encoder_outputs=encoder_outputs, doc_scores=outputs.doc_scores, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) # logits self.assertEqual( outputs.logits.shape, (self.n_docs * decoder_input_ids.shape[0], decoder_input_ids.shape[1], config.generator.vocab_size), ) # generator encoder last hidden states self.assertEqual( outputs.generator_enc_last_hidden_state.shape, (self.n_docs * decoder_input_ids.shape[0], self.max_combined_length, config.generator.hidden_size), ) # doc scores self.assertEqual(outputs.doc_scores.shape, (input_ids.shape[0], self.n_docs)) def test_model_with_retriever(self): inputs_dict = self.config_and_inputs self.check_model_with_retriever(**inputs_dict) def test_model_without_retriever(self): inputs_dict = self.config_and_inputs self.check_model_without_retriever(**inputs_dict) @slow def test_model_generate_from_context_input_ids(self): inputs_dict = self.config_and_inputs self.check_model_generate_from_context_input_ids(**inputs_dict) def test_model_with_encoder_outputs(self): inputs_dict = self.config_and_inputs self.check_model_with_encoder_outputs(**inputs_dict) @slow def test_model_generate(self): inputs_dict = self.config_and_inputs self.check_model_generate(**inputs_dict) def test_model_with_custom_n_docs(self): inputs_dict = self.config_and_inputs inputs_dict["n_docs"] = 1 self.check_model_custom_n_docs(**inputs_dict) def test_model_with_mismatch_n_docs_value(self): inputs_dict = self.config_and_inputs inputs_dict["retriever_n_docs"] = 3 inputs_dict["generator_n_docs"] = 2 self.check_model_with_mismatch_n_docs_value(**inputs_dict) @require_tf @require_retrieval class TFRagDPRBartTest(TFRagTestMixin, unittest.TestCase): @cached_property def config_and_inputs(self): question_encoder_tester = TFDPRModelTester(self) dpr_config_and_inputs = question_encoder_tester.prepare_config_and_inputs() generator_tester = TFBartModelTester(self) bart_config_and_inputs = generator_tester.prepare_config_and_inputs_for_common() (question_encoder_config, input_ids, _, input_mask, _, _, _) = dpr_config_and_inputs (generator_config, bart_inputs_dict) = bart_config_and_inputs decoder_input_ids, decoder_attention_mask = bart_inputs_dict["input_ids"], bart_inputs_dict["attention_mask"] config = RagConfig.from_question_encoder_generator_configs( question_encoder_config, generator_config, n_docs=self.n_docs, retrieval_vector_size=self.retrieval_vector_size, max_combined_length=self.max_combined_length, ) return { "config": config, "input_ids": input_ids, "attention_mask": input_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } @require_tf @require_retrieval @require_sentencepiece @require_tokenizers class TFRagModelIntegrationTests(unittest.TestCase): @cached_property def token_model(self): return TFRagTokenForGeneration.from_pretrained_question_encoder_generator( "facebook/dpr-question_encoder-single-nq-base", "facebook/bart-large-cnn" ) @cached_property def sequence_model(self): return TFRagSequenceForGeneration.from_pretrained_question_encoder_generator( "facebook/dpr-question_encoder-single-nq-base", "facebook/bart-large-cnn" ) def token_model_nq_checkpoint(self, retriever): return TFRagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever) def get_rag_config(self): question_encoder_config = AutoConfig.from_pretrained("facebook/dpr-question_encoder-single-nq-base") generator_config = AutoConfig.from_pretrained("facebook/bart-large-cnn") return RagConfig.from_question_encoder_generator_configs( question_encoder_config, generator_config, bos_token_id=0, decoder_start_token_id=2, eos_token_id=2, is_encoder_decoder=True, pad_token_id=1, vocab_size=50264, title_sep=" / ", doc_sep=" // ", n_docs=5, max_combined_length=300, dataset="wiki_dpr", dataset_split="train", index_name="exact", index_path=None, use_dummy_dataset=True, retrieval_vector_size=768, retrieval_batch_size=8, ) @slow def test_rag_sequence_inference(self): rag_config = self.get_rag_config() rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained( "facebook/dpr-question_encoder-single-nq-base" ) rag_retriever = RagRetriever( rag_config, question_encoder_tokenizer=rag_question_encoder_tokenizer, generator_tokenizer=rag_decoder_tokenizer, ) rag_sequence = self.sequence_model rag_sequence.set_retriever(rag_retriever) input_ids = rag_question_encoder_tokenizer( "who sings does he love me with reba", return_tensors="tf" ).input_ids decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="tf").input_ids output = rag_sequence( input_ids, labels=decoder_input_ids, ) expected_shape = tf.TensorShape([5, 5, 50264]) self.assertEqual(output.logits.shape, expected_shape) expected_doc_scores = tf.convert_to_tensor([[75.0286, 74.4998, 74.0804, 74.0306, 73.9504]]) expected_loss = tf.convert_to_tensor([36.7368]) tf.debugging.assert_near(output.loss, expected_loss, atol=1e-3) tf.debugging.assert_near(output.doc_scores, expected_doc_scores, atol=1e-3) @slow def test_rag_token_inference(self): rag_config = self.get_rag_config() rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained( "facebook/dpr-question_encoder-single-nq-base" ) rag_retriever = RagRetriever( rag_config, question_encoder_tokenizer=rag_question_encoder_tokenizer, generator_tokenizer=rag_decoder_tokenizer, ) rag_token = self.token_model rag_token.set_retriever(rag_retriever) input_ids = rag_question_encoder_tokenizer( "who sings does he love me with reba", return_tensors="tf" ).input_ids decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="tf").input_ids output = rag_token( input_ids, labels=decoder_input_ids, ) expected_shape = tf.TensorShape([5, 5, 50264]) self.assertEqual(output.logits.shape, expected_shape) expected_doc_scores = tf.convert_to_tensor([[75.0286, 74.4998, 74.0804, 74.0306, 73.9504]]) expected_loss = tf.convert_to_tensor([36.3557]) tf.debugging.assert_near(output.loss, expected_loss, atol=1e-3) tf.debugging.assert_near(output.doc_scores, expected_doc_scores, atol=1e-3) @slow def test_rag_token_inference_nq_checkpoint(self): rag_config = self.get_rag_config() rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained( "facebook/dpr-question_encoder-single-nq-base" ) rag_retriever = RagRetriever( rag_config, question_encoder_tokenizer=rag_question_encoder_tokenizer, generator_tokenizer=rag_decoder_tokenizer, ) rag_token = self.token_model_nq_checkpoint(retriever=rag_retriever) # check that outputs after saving and loading are equal with tempfile.TemporaryDirectory() as tmpdirname: rag_token.save_pretrained(tmpdirname) rag_token = TFRagTokenForGeneration.from_pretrained(tmpdirname, retriever=rag_retriever) input_ids = rag_question_encoder_tokenizer( "who sings does he love me with reba", return_tensors="tf" ).input_ids decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="tf").input_ids output = rag_token( input_ids, labels=decoder_input_ids, ) expected_shape = tf.TensorShape([5, 5, 50265]) self.assertEqual(output.logits.shape, expected_shape) expected_doc_scores = tf.convert_to_tensor([[62.9402, 62.7107, 62.2382, 62.1194, 61.8578]]) expected_loss = tf.convert_to_tensor([32.521812]) tf.debugging.assert_near(output.loss, expected_loss, atol=1e-3) tf.debugging.assert_near(output.doc_scores, expected_doc_scores, atol=1e-3) @slow def test_rag_token_inference_save_pretrained(self): rag_config = self.get_rag_config() rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained( "facebook/dpr-question_encoder-single-nq-base" ) rag_retriever = RagRetriever( rag_config, question_encoder_tokenizer=rag_question_encoder_tokenizer, generator_tokenizer=rag_decoder_tokenizer, ) rag_token = self.token_model rag_token.set_retriever(rag_retriever) input_ids = rag_question_encoder_tokenizer( "who sings does he love me with reba", return_tensors="tf" ).input_ids decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="tf").input_ids # model must run once to be functional before loading/saving works rag_token( input_ids, labels=decoder_input_ids, ) # check that outputs after saving and loading are equal with tempfile.TemporaryDirectory() as tmpdirname: rag_token.save_pretrained(tmpdirname) rag_token = TFRagTokenForGeneration.from_pretrained(tmpdirname, retriever=rag_retriever) output = rag_token( input_ids, labels=decoder_input_ids, ) expected_shape = tf.TensorShape([5, 5, 50264]) self.assertEqual(output.logits.shape, expected_shape) expected_doc_scores = tf.convert_to_tensor([[75.0286, 74.4998, 74.0804, 74.0306, 73.9504]]) expected_loss = tf.convert_to_tensor([36.3557]) tf.debugging.assert_near(output.loss, expected_loss, atol=1e-3) tf.debugging.assert_near(output.doc_scores, expected_doc_scores, atol=1e-3) @slow def test_init_and_from_pretrained(self): rag_config = self.get_rag_config() rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained( "facebook/dpr-question_encoder-single-nq-base" ) rag_retriever = RagRetriever( rag_config, question_encoder_tokenizer=rag_question_encoder_tokenizer, generator_tokenizer=rag_decoder_tokenizer, ) rag_config = RagConfig.from_pretrained("facebook/rag-sequence-base") rag = TFRagTokenForGeneration(rag_config, retriever=rag_retriever) input_ids = rag_question_encoder_tokenizer( "who sings does he love me with reba", return_tensors="tf" ).input_ids decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="tf").input_ids rag( input_ids, decoder_input_ids=decoder_input_ids, ) # this should not give any warnings with tempfile.TemporaryDirectory() as tmpdirname: rag.save_pretrained(tmpdirname) rag = TFRagTokenForGeneration.from_pretrained(tmpdirname, retriever=rag_retriever) @property def test_data_questions(self): return [ "who got the first nobel prize in physics", "when is the next deadpool movie being released", "which mode is used for short wave broadcast service", "who is the owner of reading football club", "when is the next scandal episode coming out", "when is the last time the philadelphia won the superbowl", "what is the most current adobe flash player version", "how many episodes are there in dragon ball z", ] @slow def test_rag_token_greedy_search(self): tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-nq") retriever = RagRetriever.from_pretrained("facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True) rag_token = TFRagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever) # check first two questions input_dict = tokenizer( self.test_data_questions[:2], return_tensors="tf", padding=True, truncation=True, ) input_ids = input_dict.input_ids attention_mask = input_dict.attention_mask # make sure only 1 beam is used rag_token.config.num_beams = 1 output_ids = rag_token.generate( input_ids, attention_mask=attention_mask, ) outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True) EXPECTED_OUTPUTS = [ " albert einstein", " september 22, 2017", ] self.assertListEqual(outputs, EXPECTED_OUTPUTS) @slow def test_rag_token_generate_batch(self): # NOTE: gold labels comes from num_beam=4, so this is effectively beam-search test tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-nq") retriever = RagRetriever.from_pretrained("facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True) rag_token = TFRagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever) input_dict = tokenizer( self.test_data_questions, return_tensors="tf", padding=True, truncation=True, ) input_ids = input_dict.input_ids attention_mask = input_dict.attention_mask EXPECTED_OUTPUTS = [ " albert einstein", " september 22, 2017", " amplitude modulation", " stefan persson", " april 20, 2018", " the 1970s", " 7.1. 2", " 13", ] # Split into 2 batches of 4 examples to avoid GPU OOM. output_ids = rag_token.generate( input_ids[:4], attention_mask=attention_mask[:4], ) outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True) self.assertListEqual(outputs, EXPECTED_OUTPUTS[:4]) output_ids = rag_token.generate( input_ids[4:], attention_mask=attention_mask[4:], ) outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True) self.assertListEqual(outputs, EXPECTED_OUTPUTS[4:]) @slow def test_rag_sequence_generate_batch(self): tokenizer = RagTokenizer.from_pretrained("facebook/rag-sequence-nq") retriever = RagRetriever.from_pretrained( "facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True ) rag_sequence = TFRagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", retriever=retriever) input_dict = tokenizer( self.test_data_questions, return_tensors="tf", padding=True, truncation=True, ) input_ids = input_dict.input_ids attention_mask = input_dict.attention_mask output_ids = rag_sequence.generate( input_ids, attention_mask=attention_mask, ) outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True) EXPECTED_OUTPUTS = [ " albert einstein", " june 22, 2018", " amplitude modulation", " tim besley ( chairman )", " june 20, 2018", " 1980", " 7.0", " 8", ] self.assertListEqual(outputs, EXPECTED_OUTPUTS) @slow def test_rag_sequence_generate_batch_from_context_input_ids(self): tokenizer = RagTokenizer.from_pretrained("facebook/rag-sequence-nq") retriever = RagRetriever.from_pretrained( "facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True ) rag_sequence = TFRagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", retriever=retriever) input_dict = tokenizer( self.test_data_questions, return_tensors="tf", padding=True, truncation=True, ) input_ids = input_dict.input_ids question_hidden_states = rag_sequence.question_encoder(input_ids)[0] docs_dict = retriever(input_ids.numpy(), question_hidden_states.numpy(), return_tensors="tf") doc_scores = tf.squeeze( tf.matmul( tf.expand_dims(question_hidden_states, axis=[1]), docs_dict["retrieved_doc_embeds"], transpose_b=True ), axis=[1], ) output_ids = rag_sequence.generate( context_input_ids=docs_dict["context_input_ids"], context_attention_mask=docs_dict["context_attention_mask"], doc_scores=doc_scores, do_deduplication=True, ) outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True) EXPECTED_OUTPUTS = [ " albert einstein", " june 22, 2018", " amplitude modulation", " tim besley ( chairman )", " june 20, 2018", " 1980", " 7.0", " 8", ] self.assertListEqual(outputs, EXPECTED_OUTPUTS) @require_tf @require_retrieval class TFRagModelSaveLoadTests(unittest.TestCase): def get_rag_config(self): question_encoder_config = AutoConfig.from_pretrained("facebook/dpr-question_encoder-single-nq-base") generator_config = AutoConfig.from_pretrained("facebook/bart-large-cnn") return RagConfig.from_question_encoder_generator_configs( question_encoder_config, generator_config, bos_token_id=0, decoder_start_token_id=2, eos_token_id=2, is_encoder_decoder=True, pad_token_id=1, vocab_size=50264, title_sep=" / ", doc_sep=" // ", n_docs=5, max_combined_length=300, dataset="wiki_dpr", dataset_split="train", index_name="exact", index_path=None, use_dummy_dataset=True, retrieval_vector_size=768, retrieval_batch_size=8, ) @slow def test_rag_sequence_from_pretrained(self): load_weight_prefix = "tf_rag_model_1" rag_config = self.get_rag_config() rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained( "facebook/dpr-question_encoder-single-nq-base" ) rag_retriever = RagRetriever( rag_config, question_encoder_tokenizer=rag_question_encoder_tokenizer, generator_tokenizer=rag_decoder_tokenizer, ) input_ids = rag_question_encoder_tokenizer( "who sings does he love me with reba", return_tensors="tf" ).input_ids decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="tf").input_ids with tempfile.TemporaryDirectory() as tmp_dirname: rag_sequence = TFRagSequenceForGeneration.from_pretrained_question_encoder_generator( "facebook/dpr-question_encoder-single-nq-base", "facebook/bart-large-cnn", retriever=rag_retriever, config=rag_config, ) # check that the from pretrained methods work rag_sequence.save_pretrained(tmp_dirname) rag_sequence.from_pretrained(tmp_dirname, retriever=rag_retriever) output = rag_sequence(input_ids, labels=decoder_input_ids) loss_pretrained = output.loss del rag_sequence question_encoder = TFAutoModel.from_pretrained("facebook/dpr-question_encoder-single-nq-base") generator = TFAutoModelForSeq2SeqLM.from_pretrained( "facebook/bart-large-cnn", load_weight_prefix=load_weight_prefix, name="generator" ) rag_sequence = TFRagSequenceForGeneration( config=rag_config, question_encoder=question_encoder, generator=generator, retriever=rag_retriever ) output = rag_sequence(input_ids, labels=decoder_input_ids) loss_init = output.loss self.assertAlmostEqual(loss_pretrained, loss_init, places=4) @slow def test_rag_token_from_pretrained(self): load_weight_prefix = "tf_rag_model_1" rag_config = self.get_rag_config() rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained( "facebook/dpr-question_encoder-single-nq-base" ) rag_retriever = RagRetriever( rag_config, question_encoder_tokenizer=rag_question_encoder_tokenizer, generator_tokenizer=rag_decoder_tokenizer, ) input_ids = rag_question_encoder_tokenizer( "who sings does he love me with reba", return_tensors="tf" ).input_ids decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="tf").input_ids with tempfile.TemporaryDirectory() as tmp_dirname: rag_token = TFRagTokenForGeneration.from_pretrained_question_encoder_generator( "facebook/dpr-question_encoder-single-nq-base", "facebook/bart-large-cnn", retriever=rag_retriever, config=rag_config, ) # check that the from pretrained methods work rag_token.save_pretrained(tmp_dirname) rag_token.from_pretrained(tmp_dirname, retriever=rag_retriever) output = rag_token(input_ids, labels=decoder_input_ids) loss_pretrained = output.loss del rag_token question_encoder = TFAutoModel.from_pretrained("facebook/dpr-question_encoder-single-nq-base") generator = TFAutoModelForSeq2SeqLM.from_pretrained( "facebook/bart-large-cnn", load_weight_prefix=load_weight_prefix, name="generator" ) rag_token = TFRagTokenForGeneration( config=rag_config, question_encoder=question_encoder, generator=generator, retriever=rag_retriever ) output = rag_token(input_ids, labels=decoder_input_ids) loss_init = output.loss self.assertAlmostEqual(loss_pretrained, loss_init, places=4)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/rag/test_modeling_rag.py
# coding=utf-8 # Copyright 2020, The RAG Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import json import os import shutil import tempfile import unittest from unittest.mock import patch import numpy as np from transformers import BartTokenizer, T5Tokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import ( get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, require_torch_non_multi_gpu, slow, torch_device, ) from transformers.utils import cached_property, is_datasets_available, is_faiss_available, is_torch_available from ..bart.test_modeling_bart import BartModelTester from ..dpr.test_modeling_dpr import DPRModelTester from ..t5.test_modeling_t5 import T5ModelTester TOLERANCE = 1e-3 T5_SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available() and is_datasets_available() and is_faiss_available(): import faiss import torch from datasets import Dataset from transformers import ( AutoConfig, AutoModel, AutoModelForSeq2SeqLM, DPRContextEncoder, RagConfig, RagModel, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration, RagTokenizer, ) from transformers.modeling_outputs import BaseModelOutput def _assert_tensors_equal(a, b, atol=1e-12, prefix=""): """If tensors not close, or a and b arent both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if torch.allclose(a, b, atol=atol): return True raise except Exception: msg = f"{a} != {b}" if prefix: msg = prefix + ": " + msg raise AssertionError(msg) def require_retrieval(test_case): """ Decorator marking a test that requires a set of dependencies necessary for pefrorm retrieval with [`RagRetriever`]. These tests are skipped when respective libraries are not installed. """ if not (is_torch_available() and is_datasets_available() and is_faiss_available()): test_case = unittest.skip("test requires PyTorch, datasets and faiss")(test_case) return test_case @require_torch @require_retrieval @require_sentencepiece class RagTestMixin: all_model_classes = ( (RagModel, RagTokenForGeneration, RagSequenceForGeneration) if is_torch_available() and is_datasets_available() and is_faiss_available() else () ) retrieval_vector_size = 32 n_docs = 3 max_combined_length = 16 def setUp(self): self.tmpdirname = tempfile.mkdtemp() # DPR tok vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] dpr_tokenizer_path = os.path.join(self.tmpdirname, "dpr_tokenizer") os.makedirs(dpr_tokenizer_path, exist_ok=True) self.vocab_file = os.path.join(dpr_tokenizer_path, DPR_VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) # BART tok vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] self.special_tokens_map = {"unk_token": "<unk>"} bart_tokenizer_path = os.path.join(self.tmpdirname, "bart_tokenizer") os.makedirs(bart_tokenizer_path, exist_ok=True) self.vocab_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) t5_tokenizer = T5Tokenizer(T5_SAMPLE_VOCAB) t5_tokenizer_path = os.path.join(self.tmpdirname, "t5_tokenizer") t5_tokenizer.save_pretrained(t5_tokenizer_path) @cached_property def dpr_tokenizer(self) -> DPRQuestionEncoderTokenizer: return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer")) @cached_property def dpr_ctx_encoder_tokenizer(self) -> DPRContextEncoderTokenizer: return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer")) @cached_property def bart_tokenizer(self) -> BartTokenizer: return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, "bart_tokenizer")) @cached_property def t5_tokenizer(self) -> BartTokenizer: return T5Tokenizer.from_pretrained(os.path.join(self.tmpdirname, "t5_tokenizer")) def tearDown(self): shutil.rmtree(self.tmpdirname) # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def get_retriever(self, config): dataset = Dataset.from_dict( { "id": ["0", "1", "3"], "text": ["foo", "bar", "qux"], "title": ["Foo", "Bar", "Qux"], "embeddings": [ np.ones(self.retrieval_vector_size), 2 * np.ones(self.retrieval_vector_size), 3 * np.ones(self.retrieval_vector_size), ], } ) dataset.add_faiss_index("embeddings", string_factory="Flat", metric_type=faiss.METRIC_INNER_PRODUCT) tokenizer = self.bart_tokenizer if config.generator.model_type == "bart" else self.t5_tokenizer with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset: mock_load_dataset.return_value = dataset retriever = RagRetriever( config, question_encoder_tokenizer=self.dpr_tokenizer, generator_tokenizer=tokenizer, ) return retriever def check_model_with_retriever( self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs ): self.assertIsNotNone(config.question_encoder) self.assertIsNotNone(config.generator) for model_class in self.all_model_classes: model = model_class(config, retriever=self.get_retriever(config)).to(torch_device) model.eval() self.assertTrue(model.config.is_encoder_decoder) outputs = model( input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) # logits self.assertEqual( outputs.logits.shape, (self.n_docs * decoder_input_ids.shape[0], decoder_input_ids.shape[1], config.generator.vocab_size), ) # generator encoder last hidden states self.assertEqual( outputs.generator_enc_last_hidden_state.shape, (self.n_docs * decoder_input_ids.shape[0], self.max_combined_length, config.generator.hidden_size), ) # doc scores self.assertEqual(outputs.doc_scores.shape, (input_ids.shape[0], self.n_docs)) def check_model_with_end2end_retriever( self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs ): self.assertIsNotNone(config.question_encoder) self.assertIsNotNone(config.generator) context_encoder_tokenizer = self.dpr_ctx_encoder_tokenizer dpr_context_encoder = DPRContextEncoder(config.question_encoder) # dpr is a twin tower retriever = self.get_retriever(config) retriever.set_ctx_encoder_tokenizer(context_encoder_tokenizer) # setting the ctx_encoder_tokenizer. for model_class in [RagTokenForGeneration, RagSequenceForGeneration]: model = model_class(config, retriever=retriever) model.set_context_encoder_for_training(dpr_context_encoder) # set the context_encoder for training model.to(torch_device) model.eval() self.assertTrue(model.config.is_encoder_decoder) outputs = model( input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) # logits self.assertEqual( outputs.logits.shape, (self.n_docs * decoder_input_ids.shape[0], decoder_input_ids.shape[1], config.generator.vocab_size), ) # generator encoder last hidden states self.assertEqual( outputs.generator_enc_last_hidden_state.shape, (self.n_docs * decoder_input_ids.shape[0], self.max_combined_length, config.generator.hidden_size), ) # doc scores self.assertEqual(outputs.doc_scores.shape, (input_ids.shape[0], self.n_docs)) def check_model_generate_from_context_input_ids( self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs ): self.assertIsNotNone(config.question_encoder) self.assertIsNotNone(config.generator) retriever = self.get_retriever(config) for model_class in self.all_model_classes: model = model_class(config).to(torch_device) model.eval() self.assertTrue(model.config.is_encoder_decoder) question_hidden_states = model.question_encoder(input_ids, attention_mask=attention_mask)[0] out = retriever( input_ids, question_hidden_states.cpu().detach().to(torch.float32).numpy(), prefix=config.generator.prefix, return_tensors="pt", ) context_input_ids, context_attention_mask, retrieved_doc_embeds = ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) # cast retrieved_doc_embeds = retrieved_doc_embeds.to(question_hidden_states) context_input_ids = context_input_ids.to(input_ids) context_attention_mask = context_attention_mask.to(input_ids) # compute doc_scores doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)).squeeze( 1 ) outputs = model.generate( context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, doc_scores=doc_scores, do_deduplication=True, ) self.assertIsNotNone(outputs) def check_model_generate( self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs ): self.assertIsNotNone(config.question_encoder) self.assertIsNotNone(config.generator) for model_class in self.all_model_classes[1:]: model = model_class(config, retriever=self.get_retriever(config)).to(torch_device) model.eval() self.assertTrue(model.config.is_encoder_decoder) outputs = model.generate( input_ids=input_ids, num_beams=2, num_return_sequences=2, decoder_start_token_id=config.generator.eos_token_id, ) self.assertIsNotNone(outputs) def check_model_without_retriever( self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs ): self.assertIsNotNone(config.question_encoder) self.assertIsNotNone(config.generator) retriever = self.get_retriever(config) for model_class in self.all_model_classes: model = model_class(config).to(torch_device) model.eval() self.assertTrue(model.config.is_encoder_decoder) question_hidden_states = model.question_encoder(input_ids, attention_mask=attention_mask)[0] out = retriever( input_ids, question_hidden_states.cpu().detach().to(torch.float32).numpy(), prefix=config.generator.prefix, return_tensors="pt", ) context_input_ids, context_attention_mask, retrieved_doc_embeds = ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) # cast retrieved_doc_embeds = retrieved_doc_embeds.to(question_hidden_states) context_input_ids = context_input_ids.to(input_ids) context_attention_mask = context_attention_mask.to(input_ids) # compute doc_scores doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)).squeeze( 1 ) outputs = model( context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, doc_scores=doc_scores, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) # logits self.assertEqual( outputs.logits.shape, (self.n_docs * decoder_input_ids.shape[0], decoder_input_ids.shape[1], config.generator.vocab_size), ) # generator encoder last hidden states self.assertEqual( outputs.generator_enc_last_hidden_state.shape, (self.n_docs * decoder_input_ids.shape[0], self.max_combined_length, config.generator.hidden_size), ) # doc scores self.assertEqual(outputs.doc_scores.shape, (input_ids.shape[0], self.n_docs)) def check_model_custom_n_docs( self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, n_docs, **kwargs ): self.assertIsNotNone(config.question_encoder) self.assertIsNotNone(config.generator) retriever = self.get_retriever(config) for model_class in self.all_model_classes: model = model_class(config).to(torch_device) model.eval() self.assertTrue(model.config.is_encoder_decoder) question_hidden_states = model.question_encoder(input_ids, attention_mask=attention_mask)[0] out = retriever( input_ids, question_hidden_states.cpu().detach().to(torch.float32).numpy(), prefix=config.generator.prefix, return_tensors="pt", n_docs=n_docs, ) context_input_ids, context_attention_mask, retrieved_doc_embeds = ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) # cast retrieved_doc_embeds = retrieved_doc_embeds.to(question_hidden_states) context_input_ids = context_input_ids.to(input_ids) context_attention_mask = context_attention_mask.to(input_ids) # compute doc_scores doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)).squeeze( 1 ) outputs = model( context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, doc_scores=doc_scores, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, n_docs=n_docs, ) # logits self.assertEqual( outputs.logits.shape, (n_docs * decoder_input_ids.shape[0], decoder_input_ids.shape[1], config.generator.vocab_size), ) # generator encoder last hidden states self.assertEqual( outputs.generator_enc_last_hidden_state.shape, (n_docs * decoder_input_ids.shape[0], self.max_combined_length, config.generator.hidden_size), ) # doc scores self.assertEqual(outputs.doc_scores.shape, (input_ids.shape[0], n_docs)) def check_model_with_mismatch_n_docs_value( self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, retriever_n_docs, generator_n_docs, **kwargs, ): self.assertIsNotNone(config.question_encoder) self.assertIsNotNone(config.generator) retriever = self.get_retriever(config) for model_class in self.all_model_classes: model = model_class(config).to(torch_device) model.eval() self.assertTrue(model.config.is_encoder_decoder) question_hidden_states = model.question_encoder(input_ids, attention_mask=attention_mask)[0] out = retriever( input_ids, question_hidden_states.cpu().detach().to(torch.float32).numpy(), prefix=config.generator.prefix, return_tensors="pt", n_docs=retriever_n_docs, ) context_input_ids, context_attention_mask, retrieved_doc_embeds = ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) # cast retrieved_doc_embeds = retrieved_doc_embeds.to(question_hidden_states) context_input_ids = context_input_ids.to(input_ids) context_attention_mask = context_attention_mask.to(input_ids) # compute doc_scores doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)).squeeze( 1 ) self.assertRaises( AssertionError, model.__call__, context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, doc_scores=doc_scores, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, n_docs=generator_n_docs, ) def check_model_with_encoder_outputs( self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs ): self.assertIsNotNone(config.question_encoder) self.assertIsNotNone(config.generator) for model_class in self.all_model_classes: model = model_class(config, retriever=self.get_retriever(config)).to(torch_device) model.eval() self.assertTrue(model.config.is_encoder_decoder) outputs = model( input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) encoder_outputs = BaseModelOutput(outputs.generator_enc_last_hidden_state) # run only generator outputs = model( encoder_outputs=encoder_outputs, doc_scores=outputs.doc_scores, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) # logits self.assertEqual( outputs.logits.shape, (self.n_docs * decoder_input_ids.shape[0], decoder_input_ids.shape[1], config.generator.vocab_size), ) # generator encoder last hidden states self.assertEqual( outputs.generator_enc_last_hidden_state.shape, (self.n_docs * decoder_input_ids.shape[0], self.max_combined_length, config.generator.hidden_size), ) # doc scores self.assertEqual(outputs.doc_scores.shape, (input_ids.shape[0], self.n_docs)) def test_model_with_retriever(self): inputs_dict = self.config_and_inputs self.check_model_with_retriever(**inputs_dict) def test_model_with_end2end_retriever(self): inputs_dict = self.config_and_inputs self.check_model_with_end2end_retriever(**inputs_dict) def test_model_without_retriever(self): inputs_dict = self.config_and_inputs self.check_model_without_retriever(**inputs_dict) def test_model_with_encoder_outputs(self): inputs_dict = self.config_and_inputs self.check_model_with_encoder_outputs(**inputs_dict) def test_model_generate(self): inputs_dict = self.config_and_inputs self.check_model_generate(**inputs_dict) def test_model_with_custom_n_docs(self): inputs_dict = self.config_and_inputs inputs_dict["n_docs"] = 1 self.check_model_custom_n_docs(**inputs_dict) def test_model_with_mismatch_n_docs_value(self): inputs_dict = self.config_and_inputs inputs_dict["retriever_n_docs"] = 3 inputs_dict["generator_n_docs"] = 2 self.check_model_with_mismatch_n_docs_value(**inputs_dict) @require_torch @require_retrieval class RagDPRBartTest(RagTestMixin, unittest.TestCase): @cached_property def config_and_inputs(self): question_encoder_tester = DPRModelTester(self) dpr_config_and_inputs = question_encoder_tester.prepare_config_and_inputs() generator_tester = BartModelTester(self) bart_config_and_inputs = generator_tester.prepare_config_and_inputs_for_common() (question_encoder_config, input_ids, _, input_mask, _, _, _) = dpr_config_and_inputs (generator_config, bart_inputs_dict) = bart_config_and_inputs decoder_input_ids, decoder_attention_mask = bart_inputs_dict["input_ids"], bart_inputs_dict["attention_mask"] config = RagConfig.from_question_encoder_generator_configs( question_encoder_config, generator_config, n_docs=self.n_docs, retrieval_vector_size=self.retrieval_vector_size, max_combined_length=self.max_combined_length, ) return { "config": config, "input_ids": input_ids, "attention_mask": input_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } @require_torch @require_retrieval class RagDPRT5Test(RagTestMixin, unittest.TestCase): @cached_property def config_and_inputs(self): question_encoder_tester = DPRModelTester(self) dpr_config_and_inputs = question_encoder_tester.prepare_config_and_inputs() generator_tester = T5ModelTester(self, vocab_size=1100) t5_config_and_inputs = generator_tester.prepare_config_and_inputs() (question_encoder_config, input_ids, _, input_mask, _, _, _) = dpr_config_and_inputs (generator_config, _, decoder_input_ids, _, decoder_attention_mask, _) = t5_config_and_inputs config = RagConfig.from_question_encoder_generator_configs( question_encoder_config, generator_config, n_docs=self.n_docs, retrieval_vector_size=self.retrieval_vector_size, max_combined_length=self.max_combined_length, ) return { "config": config, "input_ids": input_ids, "attention_mask": input_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } @require_torch @require_retrieval @require_sentencepiece @require_tokenizers @require_torch_non_multi_gpu class RagModelIntegrationTests(unittest.TestCase): def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @cached_property def sequence_model(self): return ( RagSequenceForGeneration.from_pretrained_question_encoder_generator( "facebook/dpr-question_encoder-single-nq-base", "facebook/bart-large-cnn" ) .to(torch_device) .eval() ) @cached_property def token_model(self): return ( RagTokenForGeneration.from_pretrained_question_encoder_generator( "facebook/dpr-question_encoder-single-nq-base", "facebook/bart-large-cnn" ) .to(torch_device) .eval() ) def get_rag_config(self): question_encoder_config = AutoConfig.from_pretrained("facebook/dpr-question_encoder-single-nq-base") generator_config = AutoConfig.from_pretrained("facebook/bart-large-cnn") return RagConfig.from_question_encoder_generator_configs( question_encoder_config, generator_config, bos_token_id=0, decoder_start_token_id=2, eos_token_id=2, is_encoder_decoder=True, pad_token_id=1, vocab_size=50264, title_sep=" / ", doc_sep=" // ", n_docs=5, max_combined_length=300, dataset="wiki_dpr", dataset_split="train", index_name="exact", index_path=None, use_dummy_dataset=True, retrieval_vector_size=768, retrieval_batch_size=8, ) @slow def test_rag_sequence_inference(self): rag_config = self.get_rag_config() rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained( "facebook/dpr-question_encoder-single-nq-base" ) rag_retriever = RagRetriever( rag_config, question_encoder_tokenizer=rag_question_encoder_tokenizer, generator_tokenizer=rag_decoder_tokenizer, ) rag_sequence = self.sequence_model rag_sequence.set_retriever(rag_retriever) input_ids = rag_question_encoder_tokenizer( "who sings does he love me with reba", return_tensors="pt" ).input_ids decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="pt").input_ids input_ids = input_ids.to(torch_device) decoder_input_ids = decoder_input_ids.to(torch_device) with torch.no_grad(): output = rag_sequence( input_ids, labels=decoder_input_ids, ) expected_shape = torch.Size([5, 5, 50264]) self.assertEqual(output.logits.shape, expected_shape) expected_doc_scores = torch.tensor([[75.0286, 74.4998, 74.0804, 74.0306, 73.9504]]).to(torch_device) _assert_tensors_equal(expected_doc_scores, output.doc_scores, atol=TOLERANCE) expected_loss = torch.tensor([36.7368]).to(torch_device) _assert_tensors_equal(expected_loss, output.loss, atol=TOLERANCE) @slow def test_rag_token_inference(self): rag_config = self.get_rag_config() rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained( "facebook/dpr-question_encoder-single-nq-base" ) rag_retriever = RagRetriever( rag_config, question_encoder_tokenizer=rag_question_encoder_tokenizer, generator_tokenizer=rag_decoder_tokenizer, ) rag_token = self.token_model rag_token.set_retriever(rag_retriever) input_ids = rag_question_encoder_tokenizer( "who sings does he love me with reba", return_tensors="pt" ).input_ids decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="pt").input_ids input_ids = input_ids.to(torch_device) decoder_input_ids = decoder_input_ids.to(torch_device) with torch.no_grad(): output = rag_token( input_ids, labels=decoder_input_ids, ) expected_shape = torch.Size([5, 5, 50264]) self.assertEqual(output.logits.shape, expected_shape) expected_doc_scores = torch.tensor([[75.0286, 74.4998, 74.0804, 74.0306, 73.9504]]).to(torch_device) _assert_tensors_equal(expected_doc_scores, output.doc_scores, atol=TOLERANCE) expected_loss = torch.tensor([36.3557]).to(torch_device) _assert_tensors_equal(expected_loss, output.loss, atol=TOLERANCE) @slow def test_rag_token_generate_beam(self): rag_config = self.get_rag_config() rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained( "facebook/dpr-question_encoder-single-nq-base" ) rag_retriever = RagRetriever( rag_config, question_encoder_tokenizer=rag_question_encoder_tokenizer, generator_tokenizer=rag_decoder_tokenizer, ) rag_token = self.token_model rag_token.set_retriever(rag_retriever) input_ids = rag_question_encoder_tokenizer( "who sings does he love me with reba", return_tensors="pt" ).input_ids input_ids = input_ids.to(torch_device) output_ids = rag_token.generate( input_ids, decoder_start_token_id=rag_token.generator.config.decoder_start_token_id, num_beams=2, num_return_sequences=2, ) # sequence generate test output_text_1 = rag_decoder_tokenizer.decode(output_ids[0], skip_special_tokens=True) output_text_2 = rag_decoder_tokenizer.decode(output_ids[1], skip_special_tokens=True) # Expected outputs as given by model at integration time. EXPECTED_OUTPUT_TEXT_1 = "\"She's My Kind of Girl" EXPECTED_OUTPUT_TEXT_2 = "\"She's My Kind of Love" self.assertEqual(output_text_1, EXPECTED_OUTPUT_TEXT_1) self.assertEqual(output_text_2, EXPECTED_OUTPUT_TEXT_2) @slow def test_rag_sequence_generate_beam(self): rag_config = self.get_rag_config() rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained( "facebook/dpr-question_encoder-single-nq-base" ) rag_retriever = RagRetriever( rag_config, question_encoder_tokenizer=rag_question_encoder_tokenizer, generator_tokenizer=rag_decoder_tokenizer, ) rag_sequence = self.sequence_model rag_sequence.set_retriever(rag_retriever) input_ids = rag_question_encoder_tokenizer( "who sings does he love me with reba", return_tensors="pt" ).input_ids input_ids = input_ids.to(torch_device) output_ids = rag_sequence.generate( input_ids, decoder_start_token_id=rag_sequence.generator.config.decoder_start_token_id, num_beams=2, num_return_sequences=2, ) # sequence generate test output_text_1 = rag_decoder_tokenizer.decode(output_ids[0], skip_special_tokens=True) output_text_2 = rag_decoder_tokenizer.decode(output_ids[1], skip_special_tokens=True) # Expected outputs as given by model at integration time. EXPECTED_OUTPUT_TEXT_1 = """\"She's My Kind of Girl\" was released through Epic Records in Japan in March 1972, giving the duo a Top 10 hit. Two more singles were released in Japan, \"En Carousel\" and \"Love Has Its Ways\" Ulvaeus and Andersson persevered with their songwriting and experimented with new sounds and vocal arrangements.""" EXPECTED_OUTPUT_TEXT_2 = """In September 2018, Björn Ulvaeus revealed that the two new songs, \"I Still Have Faith In You\" and \"Don't Shut Me Down\", would be released no earlier than March 2019. The two new tracks will feature in a TV special set to air later in the year.""" self.assertEqual(output_text_1, EXPECTED_OUTPUT_TEXT_1) self.assertEqual(output_text_2, EXPECTED_OUTPUT_TEXT_2) @property def test_data_questions(self): return [ "who got the first nobel prize in physics", "when is the next deadpool movie being released", "which mode is used for short wave broadcast service", "who is the owner of reading football club", "when is the next scandal episode coming out", "when is the last time the philadelphia won the superbowl", "what is the most current adobe flash player version", "how many episodes are there in dragon ball z", ] @slow def test_rag_sequence_generate_batch(self): tokenizer = RagTokenizer.from_pretrained("facebook/rag-sequence-nq") retriever = RagRetriever.from_pretrained( "facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True ) rag_sequence = RagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", retriever=retriever).to( torch_device ) input_dict = tokenizer( self.test_data_questions, return_tensors="pt", padding=True, truncation=True, ) input_ids = input_dict.input_ids.to(torch_device) attention_mask = input_dict.attention_mask.to(torch_device) output_ids = rag_sequence.generate( input_ids, attention_mask=attention_mask, ) outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True) EXPECTED_OUTPUTS = [ " albert einstein", " june 22, 2018", " amplitude modulation", " tim besley ( chairman )", " june 20, 2018", " 1980", " 7.0", " 8", ] self.assertListEqual(outputs, EXPECTED_OUTPUTS) @slow def test_rag_sequence_generate_batch_from_context_input_ids(self): tokenizer = RagTokenizer.from_pretrained("facebook/rag-sequence-nq") retriever = RagRetriever.from_pretrained( "facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True ) rag_sequence = RagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", retriever=retriever).to( torch_device ) input_dict = tokenizer( self.test_data_questions, return_tensors="pt", padding=True, truncation=True, ) input_ids = input_dict.input_ids.to(torch_device) attention_mask = input_dict.attention_mask.to(torch_device) question_hidden_states = rag_sequence.question_encoder(input_ids, attention_mask=attention_mask)[0] docs_dict = retriever( input_ids.cpu().detach().numpy(), question_hidden_states.cpu().detach().numpy(), return_tensors="pt" ) doc_scores = torch.bmm( question_hidden_states.unsqueeze(1), docs_dict["retrieved_doc_embeds"].to(torch_device).float().transpose(1, 2), ).squeeze(1) output_ids = rag_sequence.generate( context_input_ids=docs_dict["context_input_ids"].to(torch_device), context_attention_mask=docs_dict["context_attention_mask"].to(torch_device), doc_scores=doc_scores.to(torch_device), do_deduplication=True, ) outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True) EXPECTED_OUTPUTS = [ " albert einstein", " june 22, 2018", " amplitude modulation", " tim besley ( chairman )", " june 20, 2018", " 1980", " 7.0", " 8", ] self.assertListEqual(outputs, EXPECTED_OUTPUTS) @slow def test_rag_token_generate_batch(self): tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-nq") retriever = RagRetriever.from_pretrained("facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True) rag_token = RagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever).to( torch_device ) if torch_device == "cuda": rag_token.half() input_dict = tokenizer( self.test_data_questions, return_tensors="pt", padding=True, truncation=True, ) input_ids = input_dict.input_ids.to(torch_device) attention_mask = input_dict.attention_mask.to(torch_device) output_ids = rag_token.generate( input_ids, attention_mask=attention_mask, ) outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True) EXPECTED_OUTPUTS = [ " albert einstein", " september 22, 2017", " amplitude modulation", " stefan persson", " april 20, 2018", " the 1970s", " 7.1. 2", " 13", ] self.assertListEqual(outputs, EXPECTED_OUTPUTS) @require_torch @require_retrieval class RagModelSaveLoadTests(unittest.TestCase): def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def get_rag_config(self): question_encoder_config = AutoConfig.from_pretrained("facebook/dpr-question_encoder-single-nq-base") generator_config = AutoConfig.from_pretrained("facebook/bart-large-cnn") return RagConfig.from_question_encoder_generator_configs( question_encoder_config, generator_config, bos_token_id=0, decoder_start_token_id=2, eos_token_id=2, is_encoder_decoder=True, pad_token_id=1, vocab_size=50264, title_sep=" / ", doc_sep=" // ", n_docs=5, max_combined_length=300, dataset="wiki_dpr", dataset_split="train", index_name="exact", index_path=None, use_dummy_dataset=True, retrieval_vector_size=768, retrieval_batch_size=8, ) @slow def test_rag_sequence_from_pretrained(self): rag_config = self.get_rag_config() rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained( "facebook/dpr-question_encoder-single-nq-base" ) rag_retriever = RagRetriever( rag_config, question_encoder_tokenizer=rag_question_encoder_tokenizer, generator_tokenizer=rag_decoder_tokenizer, ) input_ids = rag_question_encoder_tokenizer( "who sings does he love me with reba", return_tensors="pt" ).input_ids decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="pt").input_ids input_ids = input_ids.to(torch_device) decoder_input_ids = decoder_input_ids.to(torch_device) with tempfile.TemporaryDirectory() as tmp_dirname: rag_sequence = RagSequenceForGeneration.from_pretrained_question_encoder_generator( "facebook/dpr-question_encoder-single-nq-base", "facebook/bart-large-cnn", retriever=rag_retriever, config=rag_config, ).to(torch_device) # check that the from pretrained methods work rag_sequence.save_pretrained(tmp_dirname) rag_sequence.from_pretrained(tmp_dirname, retriever=rag_retriever) rag_sequence.to(torch_device) with torch.no_grad(): output = rag_sequence( input_ids, labels=decoder_input_ids, ) loss_pretrained = output.loss del rag_sequence question_encoder = AutoModel.from_pretrained("facebook/dpr-question_encoder-single-nq-base") generator = AutoModelForSeq2SeqLM.from_pretrained("facebook/bart-large-cnn") rag_sequence = RagSequenceForGeneration( config=rag_config, question_encoder=question_encoder, generator=generator, retriever=rag_retriever ) rag_sequence.to(torch_device) with torch.no_grad(): output = rag_sequence( input_ids, labels=decoder_input_ids, ) loss_init = output.loss self.assertAlmostEqual(loss_pretrained.item(), loss_init.item(), places=4) @slow def test_rag_token_from_pretrained(self): rag_config = self.get_rag_config() rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained( "facebook/dpr-question_encoder-single-nq-base" ) rag_retriever = RagRetriever( rag_config, question_encoder_tokenizer=rag_question_encoder_tokenizer, generator_tokenizer=rag_decoder_tokenizer, ) input_ids = rag_question_encoder_tokenizer( "who sings does he love me with reba", return_tensors="pt" ).input_ids decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="pt").input_ids input_ids = input_ids.to(torch_device) decoder_input_ids = decoder_input_ids.to(torch_device) with tempfile.TemporaryDirectory() as tmp_dirname: rag_token = RagTokenForGeneration.from_pretrained_question_encoder_generator( "facebook/dpr-question_encoder-single-nq-base", "facebook/bart-large-cnn", retriever=rag_retriever, config=rag_config, question_encoder_max_length=200, generator_max_length=200, ).to(torch_device) # check that the from pretrained methods work rag_token.save_pretrained(tmp_dirname) rag_token.from_pretrained(tmp_dirname, retriever=rag_retriever) rag_token.to(torch_device) self.assertTrue(rag_token.question_encoder.config.max_length == 200) self.assertTrue(rag_token.generator.config.max_length == 200) with torch.no_grad(): output = rag_token( input_ids, labels=decoder_input_ids, ) loss_pretrained = output.loss del rag_token question_encoder = AutoModel.from_pretrained("facebook/dpr-question_encoder-single-nq-base") generator = AutoModelForSeq2SeqLM.from_pretrained("facebook/bart-large-cnn") rag_token = RagTokenForGeneration( config=rag_config, question_encoder=question_encoder, generator=generator, retriever=rag_retriever ) rag_token.to(torch_device) with torch.no_grad(): output = rag_token( input_ids, labels=decoder_input_ids, ) loss_init = output.loss self.assertAlmostEqual(loss_pretrained.item(), loss_init.item(), places=4)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/videomae/test_modeling_videomae.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch VideoMAE model. """ import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class VideoMAEModelTester: def __init__( self, parent, batch_size=13, image_size=10, num_channels=3, patch_size=2, tubelet_size=2, num_frames=2, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, mask_ratio=0.9, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.patch_size = patch_size self.tubelet_size = tubelet_size self.num_frames = num_frames self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.mask_ratio = mask_ratio self.scope = scope # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame self.num_patches_per_frame = (image_size // patch_size) ** 2 self.seq_length = (num_frames // tubelet_size) * self.num_patches_per_frame # use this variable to define bool_masked_pos self.num_masks = int(mask_ratio * self.seq_length) def prepare_config_and_inputs(self): pixel_values = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return VideoMAEConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_frames=self.num_frames, tubelet_size=self.tubelet_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, decoder_hidden_size=self.hidden_size, decoder_intermediate_size=self.intermediate_size, decoder_num_attention_heads=self.num_attention_heads, decoder_num_hidden_layers=self.num_hidden_layers, ) def create_and_check_model(self, config, pixel_values, labels): model = VideoMAEModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_pretraining(self, config, pixel_values, labels): model = VideoMAEForPreTraining(config) model.to(torch_device) model.eval() # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch mask = torch.ones((self.num_masks,)) mask = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0))]) bool_masked_pos = mask.expand(self.batch_size, -1).bool() result = model(pixel_values, bool_masked_pos) # model only returns predictions for masked patches num_masked_patches = mask.sum().item() decoder_num_labels = 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape, (self.batch_size, num_masked_patches, decoder_num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class VideoMAEModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as VideoMAE does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) pipeline_model_mapping = ( {"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification} if is_torch_available() else {} ) test_pruning = False test_torchscript = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = VideoMAEModelTester(self) self.config_tester = ConfigTester(self, config_class=VideoMAEConfig, has_text_modality=False, hidden_size=37) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if model_class == VideoMAEForPreTraining: # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch mask = torch.ones((self.model_tester.num_masks,)) mask = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0))]) bool_masked_pos = mask.expand(self.model_tester.batch_size, -1).bool() inputs_dict["bool_masked_pos"] = bool_masked_pos.to(torch_device) if return_labels: if model_class in [ *get_values(MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING), ]: inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="VideoMAE does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = VideoMAEModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_attention_outputs(self): if not self.has_attentions: pass else: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: num_visible_patches = self.model_tester.seq_length - self.model_tester.num_masks seq_len = ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(hidden_states), expected_num_layers) num_visible_patches = self.model_tester.seq_length - self.model_tester.num_masks seq_length = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) # We will verify our results on a video of eating spaghetti # Frame indices used: [164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227] def prepare_video(): file = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy", repo_type="dataset" ) video = np.load(file) return list(video) @require_torch @require_vision class VideoMAEModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5]) if is_vision_available() else None ) @slow def test_inference_for_video_classification(self): model = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics").to( torch_device ) image_processor = self.default_image_processor video = prepare_video() inputs = image_processor(video, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 400)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([0.3669, -0.0688, -0.2421]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) @slow def test_inference_for_pretraining(self): model = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short").to(torch_device) image_processor = self.default_image_processor video = prepare_video() inputs = image_processor(video, return_tensors="pt").to(torch_device) # add boolean mask, indicating which patches to mask local_path = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos", filename="bool_masked_pos.pt") inputs["bool_masked_pos"] = torch.load(local_path) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size([1, 1408, 1536]) expected_slice = torch.tensor( [[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]], device=torch_device ) self.assertEqual(outputs.logits.shape, expected_shape) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice, atol=1e-4)) # verify the loss (`config.norm_pix_loss` = `True`) expected_loss = torch.tensor([0.5142], device=torch_device) self.assertTrue(torch.allclose(outputs.loss, expected_loss, atol=1e-4)) # verify the loss (`config.norm_pix_loss` = `False`) model = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short", norm_pix_loss=False).to( torch_device ) with torch.no_grad(): outputs = model(**inputs) expected_loss = torch.tensor(torch.tensor([0.6469]), device=torch_device) self.assertTrue(torch.allclose(outputs.loss, expected_loss, atol=1e-4))
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/videomae/test_image_processing_videomae.py
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VideoMAEImageProcessor class VideoMAEImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, num_frames=10, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], crop_size=None, ): size = size if size is not None else {"shortest_edge": 18} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.num_frames = num_frames self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.crop_size = crop_size def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class VideoMAEImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = VideoMAEImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = VideoMAEImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 18}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) def test_batch_feature(self): pass def test_call_pil(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL videos video_inputs = prepare_video_inputs(self.image_processor_tester, equal_resolution=False) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], Image.Image) # Test not batched input encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) # Test batched encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) def test_call_numpy(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors video_inputs = prepare_video_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], np.ndarray) # Test not batched input encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) # Test batched encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) def test_call_pytorch(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors video_inputs = prepare_video_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], torch.Tensor) # Test not batched input encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) # Test batched encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), )
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/bert_japanese/test_tokenization_bert_japanese.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class BertJapaneseTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = BertJapaneseTokenizer test_rust_tokenizer = False space_between_special_tokens = True def setUp(self): super().setUp() vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは", "世界", "##世界", "、", "##、", "。", "##。", ] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def get_input_output_texts(self, tokenizer): input_text = "こんにちは、世界。 \nこんばんは、世界。" output_text = "こんにちは 、 世界 。 こんばんは 、 世界 。" return input_text, output_text def get_clean_sequence(self, tokenizer): input_text, output_text = self.get_input_output_texts(tokenizer) ids = tokenizer.encode(output_text, add_special_tokens=False) text = tokenizer.decode(ids, clean_up_tokenization_spaces=False) return text, ids def test_pretokenized_inputs(self): pass # TODO add if relevant def test_maximum_encoding_length_pair_input(self): pass # TODO add if relevant def test_maximum_encoding_length_single_input(self): pass # TODO add if relevant def test_full_tokenizer(self): tokenizer = self.tokenizer_class(self.vocab_file) tokens = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。") self.assertListEqual(tokens, ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [3, 12, 10, 14, 4, 9, 12, 10, 14]) def test_pickle_mecab_tokenizer(self): tokenizer = self.tokenizer_class(self.vocab_file, word_tokenizer_type="mecab") self.assertIsNotNone(tokenizer) text = "こんにちは、世界。\nこんばんは、世界。" tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [3, 12, 10, 14, 4, 9, 12, 10, 14]) filename = os.path.join(self.tmpdirname, "tokenizer.bin") with open(filename, "wb") as handle: pickle.dump(tokenizer, handle) with open(filename, "rb") as handle: tokenizer_new = pickle.load(handle) tokens_loaded = tokenizer_new.tokenize(text) self.assertListEqual(tokens, tokens_loaded) def test_mecab_tokenizer_ipadic(self): tokenizer = MecabTokenizer(mecab_dic="ipadic") self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"], ) def test_mecab_tokenizer_unidic_lite(self): try: tokenizer = MecabTokenizer(mecab_dic="unidic_lite") except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"], ) def test_mecab_tokenizer_unidic(self): try: tokenizer = MecabTokenizer(mecab_dic="unidic") except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"], ) def test_mecab_tokenizer_lower(self): tokenizer = MecabTokenizer(do_lower_case=True, mecab_dic="ipadic") self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"], ) def test_mecab_tokenizer_with_option(self): try: tokenizer = MecabTokenizer( do_lower_case=True, normalize_text=False, mecab_option="-d /usr/local/lib/mecab/dic/jumandic" ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"], ) def test_mecab_tokenizer_no_normalize(self): tokenizer = MecabTokenizer(normalize_text=False, mecab_dic="ipadic") self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"], ) @require_sudachi def test_pickle_sudachi_tokenizer(self): tokenizer = self.tokenizer_class(self.vocab_file, word_tokenizer_type="sudachi") self.assertIsNotNone(tokenizer) text = "こんにちは、世界。\nこんばんは、世界。" tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [3, 12, 10, 14, 4, 9, 12, 10, 14]) filename = os.path.join(self.tmpdirname, "tokenizer.bin") with open(filename, "wb") as handle: pickle.dump(tokenizer, handle) with open(filename, "rb") as handle: tokenizer_new = pickle.load(handle) tokens_loaded = tokenizer_new.tokenize(text) self.assertListEqual(tokens, tokens_loaded) @require_sudachi def test_sudachi_tokenizer_core(self): tokenizer = SudachiTokenizer(sudachi_dict_type="core") self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), # fmt: off [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "], # fmt: on ) @require_sudachi def test_sudachi_tokenizer_split_mode_A(self): tokenizer = SudachiTokenizer(sudachi_dict_type="core", sudachi_split_mode="A") self.assertListEqual(tokenizer.tokenize("外国人参政権"), ["外国", "人", "参政", "権"]) @require_sudachi def test_sudachi_tokenizer_split_mode_B(self): tokenizer = SudachiTokenizer(sudachi_dict_type="core", sudachi_split_mode="B") self.assertListEqual(tokenizer.tokenize("外国人参政権"), ["外国人", "参政権"]) @require_sudachi def test_sudachi_tokenizer_split_mode_C(self): tokenizer = SudachiTokenizer(sudachi_dict_type="core", sudachi_split_mode="C") self.assertListEqual(tokenizer.tokenize("外国人参政権"), ["外国人参政権"]) @require_sudachi def test_sudachi_tokenizer_lower(self): tokenizer = SudachiTokenizer(do_lower_case=True, sudachi_dict_type="core") self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), # fmt: off [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "], # fmt: on ) @require_sudachi def test_sudachi_tokenizer_no_normalize(self): tokenizer = SudachiTokenizer(normalize_text=False, sudachi_dict_type="core") self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), # fmt: off [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "], # fmt: on ) @require_sudachi def test_sudachi_tokenizer_trim_whitespace(self): tokenizer = SudachiTokenizer(trim_whitespace=True, sudachi_dict_type="core") self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"], ) @require_jumanpp def test_pickle_jumanpp_tokenizer(self): tokenizer = self.tokenizer_class(self.vocab_file, word_tokenizer_type="jumanpp") self.assertIsNotNone(tokenizer) text = "こんにちは、世界。\nこんばんは、世界。" tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [3, 12, 10, 14, 4, 9, 12, 10, 14]) filename = os.path.join(self.tmpdirname, "tokenizer.bin") with open(filename, "wb") as handle: pickle.dump(tokenizer, handle) with open(filename, "rb") as handle: tokenizer_new = pickle.load(handle) tokens_loaded = tokenizer_new.tokenize(text) self.assertListEqual(tokens, tokens_loaded) @require_jumanpp def test_jumanpp_tokenizer(self): tokenizer = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), # fmt: off ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"], # fmt: on ) @require_jumanpp def test_jumanpp_tokenizer_lower(self): tokenizer = JumanppTokenizer(do_lower_case=True) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), # fmt: off ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"], # fmt: on ) @require_jumanpp def test_jumanpp_tokenizer_no_normalize(self): tokenizer = JumanppTokenizer(normalize_text=False) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), # fmt: off ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"], # fmt: on ) @require_jumanpp def test_jumanpp_tokenizer_trim_whitespace(self): tokenizer = JumanppTokenizer(trim_whitespace=True) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"], ) @require_jumanpp def test_jumanpp_tokenizer_ext(self): tokenizer = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。"), ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"], ) def test_wordpiece_tokenizer(self): vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"] vocab = {} for i, token in enumerate(vocab_tokens): vocab[token] = i tokenizer = WordpieceTokenizer(vocab=vocab, unk_token="[UNK]") self.assertListEqual(tokenizer.tokenize(""), []) self.assertListEqual(tokenizer.tokenize("こんにちは"), ["こんにちは"]) self.assertListEqual(tokenizer.tokenize("こんばんは"), ["こん", "##ばんは"]) self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは"), ["こん", "##ばんは", "[UNK]", "こんにちは"]) def test_sentencepiece_tokenizer(self): tokenizer = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp") subword_tokenizer = tokenizer.subword_tokenizer tokens = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。") self.assertListEqual(tokens, ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"]) tokens = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは") self.assertListEqual(tokens, ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"]) def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese") text = tokenizer.encode("ありがとう。", add_special_tokens=False) text_2 = tokenizer.encode("どういたしまして。", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_2 + [3] @custom_tokenizers class BertJapaneseCharacterTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = BertJapaneseTokenizer test_rust_tokenizer = False def setUp(self): super().setUp() vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def get_tokenizer(self, **kwargs): return BertJapaneseTokenizer.from_pretrained(self.tmpdirname, subword_tokenizer_type="character", **kwargs) def get_input_output_texts(self, tokenizer): input_text = "こんにちは、世界。 \nこんばんは、世界。" output_text = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。" return input_text, output_text def test_pretokenized_inputs(self): pass # TODO add if relevant def test_maximum_encoding_length_pair_input(self): pass # TODO add if relevant def test_maximum_encoding_length_single_input(self): pass # TODO add if relevant def test_full_tokenizer(self): tokenizer = self.tokenizer_class(self.vocab_file, subword_tokenizer_type="character") tokens = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。") self.assertListEqual( tokens, ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] ) def test_character_tokenizer(self): vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"] vocab = {} for i, token in enumerate(vocab_tokens): vocab[token] = i tokenizer = CharacterTokenizer(vocab=vocab, unk_token="[UNK]") self.assertListEqual(tokenizer.tokenize(""), []) self.assertListEqual(tokenizer.tokenize("こんにちは"), ["こ", "ん", "に", "ち", "は"]) self.assertListEqual(tokenizer.tokenize("こんにちほ"), ["こ", "ん", "に", "ち", "[UNK]"]) def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char") text = tokenizer.encode("ありがとう。", add_special_tokens=False) text_2 = tokenizer.encode("どういたしまして。", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_2 + [3] @custom_tokenizers class AutoTokenizerCustomTest(unittest.TestCase): def test_tokenizer_bert_japanese(self): EXAMPLE_BERT_JAPANESE_ID = "cl-tohoku/bert-base-japanese" tokenizer = AutoTokenizer.from_pretrained(EXAMPLE_BERT_JAPANESE_ID) self.assertIsInstance(tokenizer, BertJapaneseTokenizer) class BertTokenizerMismatchTest(unittest.TestCase): def test_tokenizer_mismatch_warning(self): EXAMPLE_BERT_JAPANESE_ID = "cl-tohoku/bert-base-japanese" with self.assertLogs("transformers", level="WARNING") as cm: BertTokenizer.from_pretrained(EXAMPLE_BERT_JAPANESE_ID) self.assertTrue( cm.records[0].message.startswith( "The tokenizer class you load from this checkpoint is not the same type as the class this function" " is called from." ) ) EXAMPLE_BERT_ID = "bert-base-cased" with self.assertLogs("transformers", level="WARNING") as cm: BertJapaneseTokenizer.from_pretrained(EXAMPLE_BERT_ID) self.assertTrue( cm.records[0].message.startswith( "The tokenizer class you load from this checkpoint is not the same type as the class this function" " is called from." ) )
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/data2vec/test_modeling_data2vec_text.py
# coding=utf-8 # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Data2VecAudio model. """ import unittest from tests.test_modeling_common import floats_tensor, ids_tensor, random_attention_mask from transformers import Data2VecTextConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( Data2VecTextForCausalLM, Data2VecTextForMaskedLM, Data2VecTextForMultipleChoice, Data2VecTextForQuestionAnswering, Data2VecTextForSequenceClassification, Data2VecTextForTokenClassification, Data2VecTextModel, ) from transformers.models.data2vec.modeling_data2vec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, Data2VecTextForTextEmbeddings, create_position_ids_from_input_ids, ) class Data2VecTextModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return Data2VecTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = Data2VecTextModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = Data2VecTextModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = Data2VecTextForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = Data2VecTextForCausalLM(config=config).to(torch_device).eval() # make sure that ids don't start with pad token mask = input_ids.ne(config.pad_token_id).long() input_ids = input_ids * mask # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) # make sure that ids don't start with pad token mask = next_tokens.ne(config.pad_token_id).long() next_tokens = next_tokens * mask next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = Data2VecTextForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = Data2VecTextForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = Data2VecTextForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = Data2VecTextForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class Data2VecTextModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( Data2VecTextForCausalLM, Data2VecTextForMaskedLM, Data2VecTextModel, Data2VecTextForSequenceClassification, Data2VecTextForTokenClassification, Data2VecTextForMultipleChoice, Data2VecTextForQuestionAnswering, ) if is_torch_available() else () ) all_generative_model_classes = (Data2VecTextForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": Data2VecTextModel, "fill-mask": Data2VecTextForMaskedLM, "question-answering": Data2VecTextForQuestionAnswering, "text-classification": Data2VecTextForSequenceClassification, "text-generation": Data2VecTextForCausalLM, "token-classification": Data2VecTextForTokenClassification, "zero-shot": Data2VecTextForSequenceClassification, } if is_torch_available() else {} ) def setUp(self): self.model_tester = Data2VecTextModelTester(self) self.config_tester = ConfigTester(self, config_class=Data2VecTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): # This regression test was failing with PyTorch < 1.3 ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_decoder_model_past_with_large_inputs_relative_pos_emb(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() config_and_inputs[0].position_embedding_type = "relative_key" self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = Data2VecTextModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_create_position_ids_respects_padding_index(self): """Ensure that the default position ids only assign a sequential . This is a regression test for https://github.com/huggingface/transformers/issues/1761 The position ids should be masked with the embedding object's padding index. Therefore, the first available non-padding position index is Data2VecTextForTextEmbeddings.padding_idx + 1 """ config = self.model_tester.prepare_config_and_inputs()[0] model = Data2VecTextForTextEmbeddings(config=config) input_ids = torch.as_tensor([[12, 31, 13, model.padding_idx]]) expected_positions = torch.as_tensor( [[0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx]] ) position_ids = create_position_ids_from_input_ids(input_ids, model.padding_idx) self.assertEqual(position_ids.shape, expected_positions.shape) self.assertTrue(torch.all(torch.eq(position_ids, expected_positions))) def test_create_position_ids_from_inputs_embeds(self): """Ensure that the default position ids only assign a sequential . This is a regression test for https://github.com/huggingface/transformers/issues/1761 The position ids should be masked with the embedding object's padding index. Therefore, the first available non-padding position index is Data2VecTextForTextEmbeddings.padding_idx + 1 """ config = self.model_tester.prepare_config_and_inputs()[0] embeddings = Data2VecTextForTextEmbeddings(config=config) inputs_embeds = torch.empty(2, 4, 30) expected_single_positions = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] expected_positions = torch.as_tensor([expected_single_positions, expected_single_positions]) position_ids = embeddings.create_position_ids_from_inputs_embeds(inputs_embeds) self.assertEqual(position_ids.shape, expected_positions.shape) self.assertTrue(torch.all(torch.eq(position_ids, expected_positions))) @require_torch class Data2VecTextModelIntegrationTest(TestCasePlus): @slow def test_inference_masked_lm(self): model = Data2VecTextForMaskedLM.from_pretrained("facebook/data2vec-text-base") input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) with torch.no_grad(): output = model(input_ids)[0] expected_shape = torch.Size((1, 11, 50265)) self.assertEqual(output.shape, expected_shape) # compare the actual values for a slice. expected_slice = torch.tensor([[[0.2328, 0.0000, 1.1710], [2.2525, 0.0000, 1.9937], [2.1280, 0.0000, 1.8691]]]) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4)) @slow def test_inference_no_head(self): model = Data2VecTextModel.from_pretrained("facebook/data2vec-text-base") input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) with torch.no_grad(): output = model(input_ids)[0] # compare the actual values for a slice. expected_slice = torch.tensor( [[[0.1998, -0.0379, 0.0024], [-0.0971, -0.2214, -0.1798], [-0.0789, -0.2400, -0.1898]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/data2vec/test_modeling_data2vec_audio.py
# coding=utf-8 # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Data2VecAudio model. """ import math import unittest import numpy as np from datasets import load_dataset from tests.test_modeling_common import floats_tensor, ids_tensor, random_attention_mask from transformers import Data2VecAudioConfig, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_soundfile, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( Data2VecAudioForAudioFrameClassification, Data2VecAudioForCTC, Data2VecAudioForSequenceClassification, Data2VecAudioForXVector, Data2VecAudioModel, Wav2Vec2Processor, ) from transformers.models.data2vec.modeling_data2vec_audio import _compute_mask_indices class Data2VecAudioModelTester: def __init__( self, parent, batch_size=13, seq_length=1024, # speech is longer is_training=False, hidden_size=16, feat_extract_dropout=0.0, feat_extract_activation="gelu", conv_dim=(32, 32, 32), conv_stride=(4, 4, 4), conv_kernel=(8, 8, 8), conv_bias=False, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, num_hidden_layers=4, num_attention_heads=2, hidden_dropout_prob=0.1, intermediate_size=20, layer_norm_eps=1e-5, hidden_act="gelu", initializer_range=0.02, mask_time_prob=0.5, mask_time_length=2, vocab_size=32, num_adapter_layers=1, adapter_stride=2, tdnn_dim=(32, 32), tdnn_kernel=(5, 3), tdnn_dilation=(1, 2), xvector_output_dim=32, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.feat_extract_dropout = feat_extract_dropout self.feat_extract_activation = feat_extract_activation self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.intermediate_size = intermediate_size self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.vocab_size = vocab_size self.num_adapter_layers = num_adapter_layers self.adapter_stride = adapter_stride self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.scope = scope self.tdnn_dim = tdnn_dim self.tdnn_kernel = tdnn_kernel self.tdnn_dilation = tdnn_dilation self.xvector_output_dim = xvector_output_dim output_seq_length = self.seq_length for kernel, stride in zip(self.conv_kernel, self.conv_stride): output_seq_length = (output_seq_length - (kernel - 1)) / stride self.output_seq_length = int(math.ceil(output_seq_length)) self.encoder_seq_length = self.output_seq_length self.adapter_output_seq_length = (self.output_seq_length - 1) // adapter_stride + 1 def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.seq_length], scale=1.0) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() return config, input_values, attention_mask def get_config(self): return Data2VecAudioConfig( hidden_size=self.hidden_size, feat_extract_dropout=self.feat_extract_dropout, feat_extract_activation=self.feat_extract_activation, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, mask_time_prob=self.mask_time_prob, mask_time_length=self.mask_time_length, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_dropout_prob=self.hidden_dropout_prob, intermediate_size=self.intermediate_size, layer_norm_eps=self.layer_norm_eps, hidden_act=self.hidden_act, initializer_range=self.initializer_range, vocab_size=self.vocab_size, num_adapter_layers=self.num_adapter_layers, adapter_stride=self.adapter_stride, tdnn_dim=self.tdnn_dim, tdnn_kernel=self.tdnn_kernel, tdnn_dilation=self.tdnn_dilation, xvector_output_dim=self.xvector_output_dim, ) def create_and_check_model(self, config, input_values, attention_mask): model = Data2VecAudioModel(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_model_with_adapter(self, config, input_values, attention_mask): config.add_adapter = True model = Data2VecAudioModel(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.adapter_output_seq_length, self.hidden_size) ) def create_and_check_model_with_adapter_proj_dim(self, config, input_values, attention_mask): config.add_adapter = True config.output_hidden_size = 8 model = Data2VecAudioModel(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.adapter_output_seq_length, config.output_hidden_size), ) def create_and_check_batch_inference(self, config, input_values, *args): # test does not pass for models making use of `group_norm` # check: https://github.com/pytorch/fairseq/issues/3227 model = Data2VecAudioModel(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0.0 batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state for i in range(input_values.shape[0]): input_slice = input_values[i : i + 1, : input_lengths[i]] output = model(input_slice).last_hidden_state batch_output = batch_outputs[i : i + 1, : output.shape[1]] self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3)) def check_ctc_loss(self, config, input_values, *args): model = Data2VecAudioForCTC(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 model.config.ctc_loss_reduction = "sum" sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() model.config.ctc_loss_reduction = "mean" mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() self.parent.assertTrue(isinstance(sum_loss, float)) self.parent.assertTrue(isinstance(mean_loss, float)) def check_seq_classifier_loss(self, config, input_values, *args): model = Data2VecAudioForSequenceClassification(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 masked_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() unmasked_loss = model(input_values, labels=labels).loss.item() self.parent.assertTrue(isinstance(masked_loss, float)) self.parent.assertTrue(isinstance(unmasked_loss, float)) self.parent.assertTrue(masked_loss != unmasked_loss) def check_ctc_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = Data2VecAudioForCTC(config=config) model.to(torch_device) model.train() # freeze feature encoder model.freeze_feature_encoder() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 if max_length_labels[i] < labels.shape[-1]: # it's important that we make sure that target lenghts are at least # one shorter than logit lenghts to prevent -inf labels[i, max_length_labels[i] - 1 :] = -100 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_seq_classifier_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = Data2VecAudioForSequenceClassification(config=config) model.to(torch_device) model.train() # freeze everything but the classification head model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_xvector_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = Data2VecAudioForXVector(config=config) model.to(torch_device) model.train() # freeze everything but the classification head model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_labels_out_of_vocab(self, config, input_values, *args): model = Data2VecAudioForCTC(config) model.to(torch_device) model.train() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100) with self.parent.assertRaises(ValueError): model(input_values, labels=labels) def prepare_config_and_inputs_for_common(self): config, input_values, attention_mask = self.prepare_config_and_inputs() inputs_dict = {"input_values": input_values, "attention_mask": attention_mask} return config, inputs_dict @require_torch class Data2VecAudioModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( Data2VecAudioForCTC, Data2VecAudioModel, Data2VecAudioForSequenceClassification, Data2VecAudioForAudioFrameClassification, Data2VecAudioForXVector, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "audio-classification": Data2VecAudioForSequenceClassification, "automatic-speech-recognition": Data2VecAudioForCTC, "feature-extraction": Data2VecAudioModel, } if is_torch_available() else {} ) test_pruning = False test_headmasking = False def setUp(self): self.model_tester = Data2VecAudioModelTester(self) self.config_tester = ConfigTester(self, config_class=Data2VecAudioConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_adapter(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter(*config_and_inputs) def test_model_with_adapter_proj_dim(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter_proj_dim(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_xvector_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_xvector_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) # Data2VecAudio has no inputs_embeds def test_inputs_embeds(self): pass # `input_ids` is renamed to `input_values` def test_forward_signature(self): pass # Data2VecAudio cannot resize token embeddings # since it has no tokens embeddings def test_resize_tokens_embeddings(self): pass # Data2VecAudio has no inputs_embeds # and thus the `get_input_embeddings` fn # is not implemented def test_model_common_attributes(self): pass @is_pt_flax_cross_test # non-robust architecture does not exist in Flax def test_equivalence_flax_to_pt(self): pass @is_pt_flax_cross_test # non-robust architecture does not exist in Flax def test_equivalence_pt_to_flax(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) # set layer drop to 0 model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "feature_projection.projection.weight", "feature_projection.projection.bias", "objective.weight", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "codevectors") and module.codevectors is not None: module.codevectors.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) def test_mask_feature_prob_ctc(self): model = Data2VecAudioForCTC.from_pretrained( "hf-internal-testing/tiny-random-data2vec-seq-class", mask_feature_prob=0.2, mask_feature_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_prob_ctc(self): model = Data2VecAudioForCTC.from_pretrained( "facebook/data2vec-audio-base-960h", mask_time_prob=0.2, mask_time_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 299, 32)) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): model = Data2VecAudioModel.from_pretrained("facebook/data2vec-audio-base") self.assertIsNotNone(model) @require_torch class Data2VecAudioUtilsTest(unittest.TestCase): def test_compute_mask_indices(self): batch_size = 4 sequence_length = 60 mask_prob = 0.5 mask_length = 1 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length for _ in range(batch_size)]) def test_compute_mask_indices_low_prob(self): # with these settings num_masked_spans=0.5, which means probabilistic rounding # ensures that in 5 out of 10 method calls, num_masked_spans=0, and in # the other 5 out of 10, cases num_masked_spans=1 n_trials = 100 batch_size = 4 sequence_length = 100 mask_prob = 0.05 mask_length = 10 count_dimensions_masked = 0 count_dimensions_not_masked = 0 for _ in range(n_trials): mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) num_masks = torch.sum(mask).item() if num_masks > 0: count_dimensions_masked += 1 else: count_dimensions_not_masked += 1 # as we test for at least 10 masked dimension and at least # 10 non-masked dimension, this test could fail with probability: # P(100 coin flips, at most 9 heads) = 1.66e-18 self.assertGreater(count_dimensions_masked, int(n_trials * 0.1)) self.assertGreater(count_dimensions_not_masked, int(n_trials * 0.1)) def test_compute_mask_indices_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) # because of overlap mask don't have to add up exactly to `mask_prob * sequence_length`, but have to be smaller or equal for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) def test_compute_mask_indices_attn_mask_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) attention_mask[:2, sequence_length // 2 :] = 0 mask = _compute_mask_indices( (batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask ) mask = torch.from_numpy(mask).to(torch_device) for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) self.assertTrue(mask[:2, sequence_length // 2 :].sum() == 0) def test_compute_mask_indices_short_audio(self): batch_size = 4 sequence_length = 100 mask_prob = 0.05 mask_length = 10 attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) # force one example to be heavily padded attention_mask[0, 5:] = 0 mask = _compute_mask_indices( (batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask, min_masks=2 ) # make sure that non-padded examples cannot be padded self.assertFalse(mask[0][attention_mask[0].to(torch.bool).cpu()].any()) @require_torch @require_soundfile @slow class Data2VecAudioModelIntegrationTest(unittest.TestCase): def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").filter( lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)] )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def _load_superb(self, task, num_samples): ds = load_dataset("anton-l/superb_dummy", task, split="test") return ds[:num_samples] def test_inference_ctc_normal(self): model = Data2VecAudioForCTC.from_pretrained("facebook/data2vec-audio-base-960h") model.to(torch_device) processor = Wav2Vec2Processor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2", do_lower_case=True) input_speech = self._load_datasamples(1) input_values = processor(input_speech, return_tensors="pt").input_values.to(torch_device) with torch.no_grad(): logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = ["a man said to the universe sir i exist"] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) def test_inference_ctc_batched(self): model = Data2VecAudioForCTC.from_pretrained("facebook/data2vec-audio-base-960h").to(torch_device) processor = Wav2Vec2Processor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2", do_lower_case=True) input_speech = self._load_datasamples(4) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) with torch.no_grad(): logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "sweat covered brion's body trickling into the tight loin cloth that was the only garment he wore", "the cut on his chest still dripping blood the ache of his overstrained eyes even the soaring arena around" " him with thousands of spectators were trivialities not worth thinking about", "his instant of panic was followed by a small sharp blow high on his chest", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/data2vec/test_modeling_tf_data2vec_vision.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the TensorFlow Data2VecVision model. """ from __future__ import annotations import collections.abc import inspect import unittest import numpy as np from transformers import Data2VecVisionConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFData2VecVisionForImageClassification, TFData2VecVisionForSemanticSegmentation, TFData2VecVisionModel, ) from transformers.models.data2vec.modeling_tf_data2vec_vision import ( TF_DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class TFData2VecVisionModelTester: def __init__( self, parent, vocab_size=100, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, num_labels=3, scope=None, out_indices=[0, 1, 2, 3], ): self.parent = parent self.vocab_size = 100 self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.out_indices = out_indices self.num_labels = num_labels def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None pixel_labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) pixel_labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels, pixel_labels def get_config(self): return Data2VecVisionConfig( vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, out_indices=self.out_indices, ) def create_and_check_model(self, config, pixel_values, labels, pixel_labels): model = TFData2VecVisionModel(config=config) result = model(pixel_values, training=False) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = ( self.image_size if isinstance(self.image_size, collections.abc.Iterable) else (self.image_size, self.image_size) ) patch_size = ( self.patch_size if isinstance(self.image_size, collections.abc.Iterable) else (self.patch_size, self.patch_size) ) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.type_sequence_label_size model = TFData2VecVisionForImageClassification(config) result = model(pixel_values, labels=labels, training=False) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def create_and_check_for_image_segmentation(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = TFData2VecVisionForSemanticSegmentation(config) result = model(pixel_values, training=False) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) result = model(pixel_values, labels=pixel_labels) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels, pixel_labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict def prepare_config_and_inputs_for_keras_fit(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, _, _ = config_and_inputs inputs_dict = {"pixel_values": pixel_values, "labels": tf.zeros((self.batch_size))} return config, inputs_dict @require_tf class TFData2VecVisionModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as Data2VecVision does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( (TFData2VecVisionModel, TFData2VecVisionForImageClassification, TFData2VecVisionForSemanticSegmentation) if is_tf_available() else () ) pipeline_model_mapping = ( {"feature-extraction": TFData2VecVisionModel, "image-classification": TFData2VecVisionForImageClassification} if is_tf_available() else {} ) test_pruning = False test_onnx = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = TFData2VecVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=Data2VecVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="Data2VecVision does not use inputs_embeds") def test_inputs_embeds(self): # Data2VecVision does not use inputs_embeds pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, tf.keras.layers.Layer)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True # in Data2VecVision, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) patch_size = ( self.model_tester.patch_size if isinstance(self.model_tester.patch_size, collections.abc.Iterable) else (self.model_tester.patch_size, self.model_tester.patch_size) ) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 1 encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) # Data2VecVision has a different seq_length image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) patch_size = ( self.model_tester.patch_size if isinstance(self.model_tester.patch_size, collections.abc.Iterable) else (self.model_tester.patch_size, self.model_tester.patch_size) ) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_length = num_patches + 1 self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) # Overriding this method since the base method won't be compatible with Data2VecVision. @slow def test_keras_fit(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Since `TFData2VecVisionModel` cannot operate with the default `fit()` method. if model_class.__name__ != "TFData2VecVisionModel": model = model_class(config) if getattr(model, "hf_compute_loss", None): # Test that model correctly compute the loss with kwargs _, prepared_for_class = self.model_tester.prepare_config_and_inputs_for_keras_fit() label_names = {"labels"} self.assertGreater(len(label_names), 0, msg="No matching label names found!") labels = {key: val for key, val in prepared_for_class.items() if key in label_names} inputs_minus_labels = { key: val for key, val in prepared_for_class.items() if key not in label_names } self.assertGreater(len(inputs_minus_labels), 0) model.compile(optimizer=tf.keras.optimizers.SGD(0.0), run_eagerly=True) # Make sure the model fits without crashing regardless of where we pass the labels history1 = model.fit( prepared_for_class, validation_data=prepared_for_class, steps_per_epoch=1, validation_steps=1, shuffle=False, ) val_loss1 = history1.history["val_loss"][0] history2 = model.fit( inputs_minus_labels, labels, validation_data=(inputs_minus_labels, labels), steps_per_epoch=1, validation_steps=1, shuffle=False, ) val_loss2 = history2.history["val_loss"][0] self.assertTrue(np.allclose(val_loss1, val_loss2, atol=1e-2, rtol=1e-3)) def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=2e-4, name="outputs", attributes=None): # We override with a slightly higher tol value, as semseg models tend to diverge a bit more super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol, name, attributes) # Overriding this method since the base method won't be compatible with Data2VecVision. def test_loss_computation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Since `TFData2VecVisionModel` won't have labels against which we # could compute loss. if model_class.__name__ != "TFData2VecVisionModel": model = model_class(config) if getattr(model, "hf_compute_loss", None): # The number of elements in the loss should be the same as the number of elements in the label _, prepared_for_class = self.model_tester.prepare_config_and_inputs_for_keras_fit() added_label = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys(), reverse=True)[0] ] loss_size = tf.size(added_label) # Test that model correctly compute the loss with kwargs possible_input_names = {"input_ids", "pixel_values", "input_features"} input_name = possible_input_names.intersection(set(prepared_for_class)).pop() model_input = prepared_for_class.pop(input_name) loss = model(model_input, **prepared_for_class)[0] self.assertEqual(loss.shape, [loss_size]) # Test that model correctly compute the loss with a dict _, prepared_for_class = self.model_tester.prepare_config_and_inputs_for_keras_fit() loss = model(**prepared_for_class)[0] self.assertEqual(loss.shape, [loss_size]) # Test that model correctly compute the loss with a tuple label_keys = prepared_for_class.keys() - inputs_dict.keys() signature = inspect.signature(model.call).parameters signature_names = list(signature.keys()) # Create a dictionary holding the location of the tensors in the tuple tuple_index_mapping = {0: input_name} for label_key in label_keys: label_key_index = signature_names.index(label_key) tuple_index_mapping[label_key_index] = label_key sorted_tuple_index_mapping = sorted(tuple_index_mapping.items()) # Initialize a list with their default values, update the values and convert to a tuple list_input = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default) for index, value in sorted_tuple_index_mapping: list_input[index] = prepared_for_class[value] tuple_input = tuple(list_input) # Send to model loss = model(tuple_input[:-1])[0] self.assertEqual(loss.shape, [loss_size]) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in TF_DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFData2VecVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf @require_vision class TFData2VecVisionModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( BeitImageProcessor.from_pretrained("facebook/data2vec-vision-base-ft1k") if is_vision_available() else None ) @slow def test_inference_image_classification_head_imagenet_1k(self): model = TFData2VecVisionForImageClassification.from_pretrained("facebook/data2vec-vision-base-ft1k") image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="tf") # forward pass outputs = model(**inputs) logits = outputs.logits # verify the logits expected_shape = tf.convert_to_tensor([1, 1000]) self.assertEqual(logits.shape, expected_shape) expected_slice = tf.convert_to_tensor([0.3277, -0.1395, 0.0911]) tf.debugging.assert_near(logits[0, :3], expected_slice, atol=1e-4) expected_top2 = [model.config.label2id[i] for i in ["remote control, remote", "tabby, tabby cat"]] self.assertEqual(tf.nn.top_k(outputs.logits[0], 2).indices.numpy().tolist(), expected_top2)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/data2vec/test_modeling_data2vec_vision.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Data2VecVision model. """ import inspect import unittest from transformers import Data2VecVisionConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, Data2VecVisionForImageClassification, Data2VecVisionForSemanticSegmentation, Data2VecVisionModel, ) from transformers.models.data2vec.modeling_data2vec_vision import DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class Data2VecVisionModelTester: def __init__( self, parent, vocab_size=100, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=4, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, num_labels=3, scope=None, out_indices=[0, 1, 2, 3], ): self.parent = parent self.vocab_size = 100 self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.out_indices = out_indices self.num_labels = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None pixel_labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) pixel_labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels, pixel_labels def get_config(self): return Data2VecVisionConfig( vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, out_indices=self.out_indices, ) def create_and_check_model(self, config, pixel_values, labels, pixel_labels): model = Data2VecVisionModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) num_patches = (self.image_size // self.patch_size) ** 2 self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.type_sequence_label_size model = Data2VecVisionForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def create_and_check_for_image_segmentation(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = Data2VecVisionForSemanticSegmentation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) result = model(pixel_values, labels=pixel_labels) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels, pixel_labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class Data2VecVisionModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as Data2VecVision does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( (Data2VecVisionModel, Data2VecVisionForImageClassification, Data2VecVisionForSemanticSegmentation) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": Data2VecVisionModel, "image-classification": Data2VecVisionForImageClassification, "image-segmentation": Data2VecVisionForSemanticSegmentation, } if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = Data2VecVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=Data2VecVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() def test_inputs_embeds(self): # Data2VecVision does not use inputs_embeds pass @require_torch_multi_gpu @unittest.skip( reason="Data2VecVision has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def test_multi_gpu_data_parallel_forward(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*config_and_inputs) def test_training(self): if not self.model_tester.is_training: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: if model_class in [*get_values(MODEL_MAPPING)]: continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return config.use_cache = False config.return_dict = True for model_class in self.all_model_classes: if model_class in [*get_values(MODEL_MAPPING)] or not model_class.supports_gradient_checkpointing: continue # TODO: remove the following 3 lines once we have a MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING # this can then be incorporated into _prepare_for_class in test_modeling_common.py elif model_class.__name__ == "Data2VecVisionForSemanticSegmentation": batch_size, num_channels, height, width = inputs_dict["pixel_values"].shape inputs_dict["labels"] = torch.zeros( [self.model_tester.batch_size, height, width], device=torch_device ).long() model = model_class(config) model.gradient_checkpointing_enable() model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=2e-4, name="outputs", attributes=None): # We override with a slightly higher tol value, as semseg models tend to diverge a bit more super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol, name, attributes) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = Data2VecVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class Data2VecVisionModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( BeitImageProcessor.from_pretrained("facebook/data2vec-vision-base-ft1k") if is_vision_available() else None ) @slow def test_inference_image_classification_head_imagenet_1k(self): model = Data2VecVisionForImageClassification.from_pretrained("facebook/data2vec-vision-base-ft1k").to( torch_device ) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor([0.3277, -0.1395, 0.0911]).to(torch_device) self.assertTrue(torch.allclose(logits[0, :3], expected_slice, atol=1e-4)) expected_top2 = [model.config.label2id[i] for i in ["remote control, remote", "tabby, tabby cat"]] self.assertEqual(logits[0].topk(2).indices.cpu().tolist(), expected_top2)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/transfo_xl/test_modeling_transfo_xl.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import random import unittest from transformers import TransfoXLConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel from transformers.models.transfo_xl.modeling_transfo_xl import TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST class TransfoXLModelTester: def __init__( self, parent, batch_size=14, seq_length=7, mem_len=30, clamp_len=15, is_training=False, use_labels=True, vocab_size=99, cutoffs=[10, 50, 80], hidden_size=32, d_embed=32, num_attention_heads=4, d_head=8, d_inner=128, div_val=2, num_hidden_layers=5, scope=None, seed=1, eos_token_id=0, num_labels=3, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.mem_len = mem_len self.key_length = self.seq_length + self.mem_len self.clamp_len = clamp_len self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.cutoffs = cutoffs self.hidden_size = hidden_size self.d_embed = d_embed self.num_attention_heads = num_attention_heads self.d_head = d_head self.d_inner = d_inner self.div_val = div_val self.num_hidden_layers = num_hidden_layers self.scope = scope self.seed = seed self.eos_token_id = eos_token_id self.num_labels = num_labels self.pad_token_id = self.vocab_size - 1 def prepare_config_and_inputs(self): input_ids_1 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_ids_2 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.get_config() return (config, input_ids_1, input_ids_2, lm_labels) def get_config(self): return TransfoXLConfig( vocab_size=self.vocab_size, mem_len=self.mem_len, clamp_len=self.clamp_len, cutoffs=self.cutoffs, d_model=self.hidden_size, d_embed=self.d_embed, n_head=self.num_attention_heads, d_head=self.d_head, d_inner=self.d_inner, div_val=self.div_val, n_layer=self.num_hidden_layers, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, ) def set_seed(self): random.seed(self.seed) torch.manual_seed(self.seed) def create_transfo_xl_model(self, config, input_ids_1, input_ids_2, lm_labels): model = TransfoXLModel(config) model.to(torch_device) model.eval() outputs1 = model(input_ids_1) outputs2 = model(input_ids_2, outputs1["mems"]) outputs = { "hidden_states_1": outputs1["last_hidden_state"], "mems_1": outputs1["mems"], "hidden_states_2": outputs2["last_hidden_state"], "mems_2": outputs2["mems"], } return outputs def check_transfo_xl_model_output(self, result): self.parent.assertEqual(result["hidden_states_1"].shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result["hidden_states_2"].shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertListEqual( [mem.shape for mem in result["mems_1"]], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, ) self.parent.assertListEqual( [mem.shape for mem in result["mems_2"]], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, ) def create_transfo_xl_lm_head(self, config, input_ids_1, input_ids_2, lm_labels): model = TransfoXLLMHeadModel(config) model.to(torch_device) model.eval() lm_logits_1 = model(input_ids_1)["prediction_scores"] outputs1 = model(input_ids_1, labels=lm_labels) lm_logits_2 = model(input_ids_2, mems=outputs1["mems"])["prediction_scores"] outputs2 = model(input_ids_2, labels=lm_labels, mems=outputs1["mems"]) outputs = { "loss_1": outputs1["loss"], "losses_1": outputs1["losses"], "mems_1": outputs1["mems"], "lm_logits_1": lm_logits_1, "loss_2": outputs2["loss"], "losses_2": outputs2["losses"], "mems_2": outputs2["mems"], "lm_logits_2": lm_logits_2, } return outputs def check_transfo_xl_lm_head_output(self, result): self.parent.assertEqual(result["loss_1"].shape, ()) self.parent.assertEqual(result["losses_1"].shape, (self.batch_size, self.seq_length - 1)) self.parent.assertEqual(result["lm_logits_1"].shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertListEqual( [mem.shape for mem in result["mems_1"]], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, ) self.parent.assertEqual(result["loss_2"].shape, ()) self.parent.assertEqual(result["losses_2"].shape, (self.batch_size, self.seq_length - 1)) self.parent.assertEqual(result["lm_logits_2"].shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertListEqual( [mem.shape for mem in result["mems_2"]], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, ) def create_transfo_xl_lm_head_trainer_compatible_tuple(self, config, input_ids_1, input_ids_2, lm_labels): config.trainer_compatible = True model = TransfoXLLMHeadModel(config) model.to(torch_device) model.eval() lm_logits_1 = model(input_ids_1, return_dict=False)[0] outputs1 = model(input_ids_1, labels=lm_labels, return_dict=False) loss_1, _, losses_1, mems_1 = outputs1[:4] lm_logits_2 = model(input_ids_2, mems=mems_1, return_dict=False)[0] outputs2 = model(input_ids_2, labels=lm_labels, mems=mems_1, return_dict=False) loss_2, _, losses_2, mems_2 = outputs2[:4] outputs = { "losses_1": losses_1, "mems_1": mems_1, "lm_logits_1": lm_logits_1, "loss_1": loss_1, "losses_2": losses_2, "mems_2": mems_2, "lm_logits_2": lm_logits_2, "loss_2": loss_2, } config.trainer_compatible = None return outputs def create_transfo_xl_lm_head_trainer_incompatible_tuple(self, config, input_ids_1, input_ids_2, lm_labels): config.trainer_compatible = False model = TransfoXLLMHeadModel(config) model.to(torch_device) model.eval() lm_logits_1 = model(input_ids_1, return_dict=False)[0] outputs1 = model(input_ids_1, labels=lm_labels, return_dict=False) losses_1, _, mems_1 = outputs1[:3] loss_1 = outputs1[-1] lm_logits_2 = model(input_ids_2, mems=mems_1, return_dict=False)[0] outputs2 = model(input_ids_2, labels=lm_labels, mems=mems_1) losses_2, _, mems_2 = outputs2[:3] loss_2 = outputs2[-1] outputs = { "losses_1": losses_1, "mems_1": mems_1, "lm_logits_1": lm_logits_1, "loss_1": loss_1, "losses_2": losses_2, "mems_2": mems_2, "lm_logits_2": lm_logits_2, "loss_2": loss_2, } config.trainer_compatible = None return outputs def create_and_check_transfo_xl_for_sequence_classification(self, config, input_ids_1, input_ids_2, lm_labels): config.num_labels = self.num_labels model = TransfoXLForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids_1) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids_1, input_ids_2, lm_labels) = config_and_inputs inputs_dict = {"input_ids": input_ids_1} return config, inputs_dict @require_torch class TransfoXLModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (TransfoXLModel, TransfoXLLMHeadModel, TransfoXLForSequenceClassification) if is_torch_available() else () ) all_generative_model_classes = (TransfoXLLMHeadModel,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": TransfoXLModel, "text-classification": TransfoXLForSequenceClassification, "text-generation": TransfoXLLMHeadModel, "zero-shot": TransfoXLForSequenceClassification, } if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = True test_mismatched_shapes = False # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def check_cutoffs_and_n_token( self, copied_cutoffs, layer, model_embed, model, model_class, resized_value, vocab_size ): # Check that the cutoffs were modified accordingly for i in range(len(copied_cutoffs)): if i < layer: self.assertEqual(model_embed.cutoffs[i], copied_cutoffs[i]) if model_class == TransfoXLLMHeadModel: self.assertEqual(model.crit.cutoffs[i], copied_cutoffs[i]) if i < len(model.config.cutoffs): self.assertEqual(model.config.cutoffs[i], copied_cutoffs[i]) else: self.assertEqual(model_embed.cutoffs[i], copied_cutoffs[i] + resized_value) if model_class == TransfoXLLMHeadModel: self.assertEqual(model.crit.cutoffs[i], copied_cutoffs[i] + resized_value) if i < len(model.config.cutoffs): self.assertEqual(model.config.cutoffs[i], copied_cutoffs[i] + resized_value) self.assertEqual(model_embed.n_token, vocab_size + resized_value) if model_class == TransfoXLLMHeadModel: self.assertEqual(model.crit.n_token, vocab_size + resized_value) def setUp(self): self.model_tester = TransfoXLModelTester(self) self.config_tester = ConfigTester(self, config_class=TransfoXLConfig, d_embed=37) def test_config(self): self.config_tester.run_common_tests() def test_transfo_xl_model(self): self.model_tester.set_seed() config_and_inputs = self.model_tester.prepare_config_and_inputs() output_result = self.model_tester.create_transfo_xl_model(*config_and_inputs) self.model_tester.check_transfo_xl_model_output(output_result) def test_transfo_xl_lm_head(self): self.model_tester.set_seed() config_and_inputs = self.model_tester.prepare_config_and_inputs() output_result = self.model_tester.create_transfo_xl_lm_head(*config_and_inputs) self.model_tester.check_transfo_xl_lm_head_output(output_result) output_result = self.model_tester.create_transfo_xl_lm_head_trainer_compatible_tuple(*config_and_inputs) self.model_tester.check_transfo_xl_lm_head_output(output_result) output_result = self.model_tester.create_transfo_xl_lm_head_trainer_incompatible_tuple(*config_and_inputs) self.model_tester.check_transfo_xl_lm_head_output(output_result) def test_transfo_xl_sequence_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*config_and_inputs) def test_retain_grad_hidden_states_attentions(self): # xlnet cannot keep gradients in attentions or hidden states return @require_torch_multi_gpu def test_multi_gpu_data_parallel_forward(self): # Opt-out of this test. pass @slow def test_model_from_pretrained(self): for model_name in TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TransfoXLModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_resize_tokens_embeddings(self): (original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.vocab_size # Retrieve the embeddings and clone theme model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = [emb.weight.clone() for emb in model_embed.emb_layers] # Retrieve the cutoffs and copy them copied_cutoffs = copy.copy(model_embed.cutoffs) test_layers = list(range(config.div_val)) for layer in test_layers: # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size + 10, layer) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.emb_layers[layer].weight.shape[0], cloned_embeddings[layer].shape[0] + 10) # Check that the cutoffs were modified accordingly self.check_cutoffs_and_n_token( copied_cutoffs, layer, model_embed, model, model_class, 10, model_vocab_size ) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**inputs_dict) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size - 5, layer) self.assertEqual(model.config.vocab_size, model_vocab_size - 5) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.emb_layers[layer].weight.shape[0], cloned_embeddings[layer].shape[0] - 5) # Check that the cutoffs were modified accordingly self.check_cutoffs_and_n_token( copied_cutoffs, layer, model_embed, model, model_class, -5, model_vocab_size ) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary inputs_dict["input_ids"].clamp_(max=model_vocab_size - 5 - 1) model(**inputs_dict) # Check that adding and removing tokens has not modified the first part of the embedding matrix. models_equal = True for p1, p2 in zip(cloned_embeddings[layer], model_embed.emb_layers[layer].weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) # Reset model embeddings to original size model.resize_token_embeddings(model_vocab_size, layer) self.assertEqual(model_vocab_size, model.config.vocab_size) self.assertEqual(model_embed.emb_layers[layer].weight.shape[0], cloned_embeddings[layer].shape[0]) def test_resize_embeddings_untied(self): # transfo-xl requires special resize for lm-head return def _check_attentions_for_generate( self, batch_size, attentions, min_length, max_length, config, use_cache=False, num_beam_groups=1 ): self.assertIsInstance(attentions, tuple) self.assertListEqual( [isinstance(iter_attentions, tuple) for iter_attentions in attentions], [True] * len(attentions) ) self.assertEqual(len(attentions), (max_length - min_length) * num_beam_groups) for idx, iter_attentions in enumerate(attentions): tgt_len = min_length if idx == 0 else (min_length - 2) src_len = (min_length + config.mem_len) if idx == 0 else (min_length + config.mem_len - 2) expected_shape = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(iter_attentions) ) def _check_hidden_states_for_generate( self, batch_size, hidden_states, min_length, max_length, config, use_cache=False, num_beam_groups=1 ): self.assertIsInstance(hidden_states, tuple) self.assertListEqual( [isinstance(iter_hidden_states, tuple) for iter_hidden_states in hidden_states], [True] * len(hidden_states), ) self.assertEqual(len(hidden_states), (max_length - min_length) * num_beam_groups) for idx, iter_hidden_states in enumerate(hidden_states): seq_len = min_length if idx == 0 else min_length - 2 expected_shape = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(iter_hidden_states), ) # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "cluster_weight") and module.cluster_weight is not None: module.cluster_weight.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "cluster_bias") and module.cluster_bias is not None: module.cluster_bias.data.fill_(3) if hasattr(module, "emb_projs"): for i in range(len(module.emb_projs)): if module.emb_projs[i] is not None: nn.init.constant_(module.emb_projs[i], 0.0003) if hasattr(module, "out_projs"): for i in range(len(module.out_projs)): if module.out_projs[i] is not None: nn.init.constant_(module.out_projs[i], 0.0003) for param in ["r_emb", "r_w_bias", "r_r_bias", "r_bias"]: if hasattr(module, param) and getattr(module, param) is not None: weight = getattr(module, param) weight.data.fill_(3) @unittest.skip("The model doesn't support left padding") # and it's not used enough to be worth fixing :) def test_left_padding_compatibility(self): pass @require_torch class TransfoXLModelLanguageGenerationTest(unittest.TestCase): @slow def test_lm_generate_transfo_xl_wt103(self): model = TransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103") model.to(torch_device) # fmt: off input_ids = torch.tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]],dtype=torch.long,device=torch_device) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off expected_output_ids = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,142,1298,188,2,29546,113,8,3654,4,1,1109,7136,833,3,13,1645,4,29546,11,104,7,1,1109,532,7129,2,10,83507,2,1162,1123,2,6,7245,10,2,5,11,104,7,1,1109,532,7129,2,10,24,24,10,22,10,13,770,5863,4,7245,10] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( except for # Alexei and Maria ) are discovered. The voice of young son, Tsarevich Alexei # Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young # Grigori Rasputin is asked by his father and a group of men to perform magic. # Rasputin has a vision and denounces one of the men as a horse thief. Although # his father initially slaps him for making such an accusation, Rasputin watches # as the man is chased outside and beaten. Twenty years later, Rasputin sees a # vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly # becomes famous, with people, even a bishop, begging for his blessing. In the # early 20th century, Rasputin became a symbol of the Russian Orthodox Church. # The image of Rasputin was used in the Russian national anthem, " Nearer, My God, # to Heaven ", and was used in the Russian national anthem, " " ( " The Great Spirit # of Heaven " output_ids = model.generate(input_ids, max_length=200, do_sample=False) self.assertListEqual(output_ids[0].tolist(), expected_output_ids)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/transfo_xl/test_tokenization_transfo_xl.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class TransfoXLTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = TransfoXLTokenizer test_rust_tokenizer = False test_seq2seq = False def setUp(self): super().setUp() vocab_tokens = [ "<unk>", "[CLS]", "[SEP]", "want", "unwanted", "wa", "un", "running", ",", "low", "l", ] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def get_tokenizer(self, **kwargs): kwargs["lower_case"] = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self, tokenizer): input_text = "<unk> UNwanted , running" output_text = "<unk> unwanted, running" return input_text, output_text def test_full_tokenizer(self): tokenizer = TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=True) tokens = tokenizer.tokenize("<unk> UNwanted , running") self.assertListEqual(tokens, ["<unk>", "unwanted", ",", "running"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [0, 4, 8, 7]) def test_full_tokenizer_lower(self): tokenizer = TransfoXLTokenizer(lower_case=True) self.assertListEqual( tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? "), ["hello", "!", "how", "are", "you", "?"] ) def test_full_tokenizer_no_lower(self): tokenizer = TransfoXLTokenizer(lower_case=False) self.assertListEqual( tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? "), ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def test_full_tokenizer_moses_numbers(self): tokenizer = TransfoXLTokenizer(lower_case=False) text_in = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?" tokens_out = [ "Hello", "(", "bracket", ")", "and", "side", "@-@", "scrolled", "[", "and", "]", "Henry", "'s", "$", "5", "@,@", "000", "with", "3", "@.@", "34", "m", ".", "What", "'s", "up", "!", "?", ] self.assertListEqual(tokenizer.tokenize(text_in), tokens_out) self.assertEqual(tokenizer.convert_tokens_to_string(tokens_out), text_in) def test_move_added_token(self): tokenizer = self.get_tokenizer() original_len = len(tokenizer) tokenizer.add_tokens(["new1", "new2"]) tokenizer.move_added_token("new1", 1) # Check that moved token is not copied (duplicate) self.assertEqual(len(tokenizer), original_len + 2) # Check that token is moved to specified id self.assertEqual(tokenizer.encode("new1"), [1]) self.assertEqual(tokenizer.decode([1]), "new1")
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/transfo_xl/test_modeling_tf_transfo_xl.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class TFTransfoXLModelTester: def __init__( self, parent, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.mem_len = 30 self.key_length = self.seq_length + self.mem_len self.clamp_len = 15 self.is_training = True self.use_labels = True self.vocab_size = 99 self.cutoffs = [10, 50, 80] self.hidden_size = 32 self.d_embed = 32 self.num_attention_heads = 4 self.d_head = 8 self.d_inner = 128 self.div_val = 2 self.num_hidden_layers = 2 self.scope = None self.seed = 1 self.eos_token_id = 0 self.num_labels = 3 self.pad_token_id = self.vocab_size - 1 self.init_range = 0.01 def prepare_config_and_inputs(self): input_ids_1 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_ids_2 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = TransfoXLConfig( vocab_size=self.vocab_size, mem_len=self.mem_len, clamp_len=self.clamp_len, cutoffs=self.cutoffs, d_model=self.hidden_size, d_embed=self.d_embed, n_head=self.num_attention_heads, d_head=self.d_head, d_inner=self.d_inner, div_val=self.div_val, n_layer=self.num_hidden_layers, eos_token_id=self.eos_token_id, pad_token_id=self.vocab_size - 1, init_range=self.init_range, num_labels=self.num_labels, ) return (config, input_ids_1, input_ids_2, lm_labels) def set_seed(self): random.seed(self.seed) tf.random.set_seed(self.seed) def create_and_check_transfo_xl_model(self, config, input_ids_1, input_ids_2, lm_labels): model = TFTransfoXLModel(config) hidden_states_1, mems_1 = model(input_ids_1).to_tuple() inputs = {"input_ids": input_ids_2, "mems": mems_1} hidden_states_2, mems_2 = model(inputs).to_tuple() self.parent.assertEqual(hidden_states_1.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(hidden_states_2.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertListEqual( [mem.shape for mem in mems_1], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, ) self.parent.assertListEqual( [mem.shape for mem in mems_2], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, ) def create_and_check_transfo_xl_lm_head(self, config, input_ids_1, input_ids_2, lm_labels): model = TFTransfoXLLMHeadModel(config) lm_logits_1, mems_1 = model(input_ids_1).to_tuple() inputs = {"input_ids": input_ids_1, "labels": lm_labels} _, mems_1 = model(inputs).to_tuple() lm_logits_2, mems_2 = model([input_ids_2, mems_1]).to_tuple() inputs = {"input_ids": input_ids_1, "mems": mems_1, "labels": lm_labels} _, mems_2 = model(inputs).to_tuple() self.parent.assertEqual(lm_logits_1.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertListEqual( [mem.shape for mem in mems_1], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, ) self.parent.assertEqual(lm_logits_2.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertListEqual( [mem.shape for mem in mems_2], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, ) def create_and_check_transfo_xl_for_sequence_classification(self, config, input_ids_1, input_ids_2, lm_labels): model = TFTransfoXLForSequenceClassification(config) result = model(input_ids_1) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids_1, input_ids_2, lm_labels) = config_and_inputs inputs_dict = {"input_ids": input_ids_1} return config, inputs_dict @require_tf class TFTransfoXLModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) all_generative_model_classes = () if is_tf_available() else () pipeline_model_mapping = ( { "feature-extraction": TFTransfoXLModel, "text-classification": TFTransfoXLForSequenceClassification, "text-generation": TFTransfoXLLMHeadModel, "zero-shot": TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented test_resize_embeddings = False test_head_masking = False test_onnx = False test_mismatched_shapes = False # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def setUp(self): self.model_tester = TFTransfoXLModelTester(self) self.config_tester = ConfigTester(self, config_class=TransfoXLConfig, d_embed=37) def test_config(self): self.config_tester.run_common_tests() def test_transfo_xl_model(self): self.model_tester.set_seed() config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*config_and_inputs) def test_transfo_xl_lm_head(self): self.model_tester.set_seed() config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*config_and_inputs) def test_transfo_xl_sequence_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*config_and_inputs) def test_model_common_attributes(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() list_other_models_with_output_ebd = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: model = model_class(config) assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer) if model_class in list_other_models_with_output_ebd: x = model.get_output_embeddings() assert isinstance(x, tf.keras.layers.Layer) name = model.get_bias() assert name is None else: x = model.get_output_embeddings() assert x is None name = model.get_bias() assert name is None def test_xla_mode(self): # TODO JP: Make TransfoXL XLA compliant pass @slow def test_model_from_pretrained(self): for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFTransfoXLModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss.") def test_dataset_conversion(self): pass @require_tf class TFTransfoXLModelLanguageGenerationTest(unittest.TestCase): @unittest.skip("Skip test until #12651 is resolved.") @slow def test_lm_generate_transfo_xl_wt103(self): model = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103") # fmt: off input_ids = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]],dtype=tf.int32) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off expected_output_ids = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> output_ids = model.generate(input_ids, max_length=200, do_sample=False) self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/gpt_neo/test_modeling_gpt_neo.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch GPT Neo model. """ import unittest from transformers import GPTNeoConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPT2Tokenizer, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, ) class GPTNeoModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_token_type_ids=True, use_input_mask=True, use_labels=True, use_mc_token_ids=True, vocab_size=99, hidden_size=32, num_hidden_layers=4, attention_types=[[["global", "local"], 2]], num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, window_size=7, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_token_type_ids = use_token_type_ids self.use_input_mask = use_input_mask self.use_labels = use_labels self.use_mc_token_ids = use_mc_token_ids self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.window_size = window_size self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.bos_token_id = vocab_size - 1 self.eos_token_id = vocab_size - 1 self.pad_token_id = vocab_size - 1 self.attention_types = attention_types def get_large_model_config(self): return GPTNeoConfig.from_pretrained("gpt-neo-125M") def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) mc_token_ids = None if self.use_mc_token_ids: mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def get_config(self): return GPTNeoConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, max_position_embeddings=self.max_position_embeddings, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, window_size=self.window_size, attention_types=self.attention_types, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 return config def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, input_mask, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_gpt_neo_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = GPTNeoModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) # past_key_values is not implemented # self.parent.assertEqual(len(result.past_key_values), config.n_layer) def create_and_check_gpt_neo_model_past(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = GPTNeoModel(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model(input_ids, token_type_ids=token_type_ids, use_cache=True) outputs_use_cache_conf = model(input_ids, token_type_ids=token_type_ids) outputs_no_past = model(input_ids, token_type_ids=token_type_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) output, past = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) next_token_types = ids_tensor([self.batch_size, 1], self.type_vocab_size) # append to next input_ids and token_type_ids next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_token_type_ids = torch.cat([token_type_ids, next_token_types], dim=-1) output_from_no_past = model(next_input_ids, token_type_ids=next_token_type_ids)["last_hidden_state"] output_from_past = model(next_tokens, token_type_ids=next_token_types, past_key_values=past)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_gpt_neo_model_attention_mask_past( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): model = GPTNeoModel(config=config) model.to(torch_device) model.eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = self.seq_length // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass output, past = model(input_ids, attention_mask=attn_mask).to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_gpt_neo_model_past_large_inputs( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): model = GPTNeoModel(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model(input_ids, token_type_ids=token_type_ids, attention_mask=input_mask, use_cache=True) output, past = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_token_types = ids_tensor([self.batch_size, 3], self.type_vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and token_type_ids next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_token_type_ids = torch.cat([token_type_ids, next_token_types], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, token_type_ids=next_token_type_ids, attention_mask=next_attention_mask )["last_hidden_state"] output_from_past = model( next_tokens, token_type_ids=next_token_types, attention_mask=next_attention_mask, past_key_values=past )["last_hidden_state"] self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1]) # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = GPTNeoForCausalLM(config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_gpt_neo_for_question_answering( self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, *args ): config.num_labels = self.num_labels model = GPTNeoForQuestionAnswering(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_gpt_neo_for_sequence_classification( self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, *args ): config.num_labels = self.num_labels model = GPTNeoForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_gpt_neo_for_token_classification( self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, *args ): config.num_labels = self.num_labels model = GPTNeoForTokenClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_forward_and_backwards( self, config, input_ids, input_mask, head_mask, token_type_ids, *args, gradient_checkpointing=False ): model = GPTNeoForCausalLM(config) if gradient_checkpointing: model.gradient_checkpointing_enable() model.to(torch_device) result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) result.loss.backward() def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class GPTNeoModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( GPTNeoModel, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, ) if is_torch_available() else () ) all_generative_model_classes = (GPTNeoForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": GPTNeoModel, "question-answering": GPTNeoForQuestionAnswering, "text-classification": GPTNeoForSequenceClassification, "text-generation": GPTNeoForCausalLM, "token-classification": GPTNeoForTokenClassification, "zero-shot": GPTNeoForSequenceClassification, } if is_torch_available() else {} ) fx_compatible = True test_missing_keys = False test_pruning = False test_model_parallel = False # special case for DoubleHeads model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) return inputs_dict def setUp(self): self.model_tester = GPTNeoModelTester(self) self.config_tester = ConfigTester(self, config_class=GPTNeoConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_gpt_neo_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_neo_model(*config_and_inputs) def test_gpt_neo_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_neo_model_past(*config_and_inputs) def test_gpt_neo_model_att_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_neo_model_attention_mask_past(*config_and_inputs) def test_gpt_neo_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_neo_model_past_large_inputs(*config_and_inputs) def test_gpt_neo_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*config_and_inputs) def test_gpt_neo_question_answering_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_neo_for_question_answering(*config_and_inputs) def test_gpt_neo_sequence_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_neo_for_sequence_classification(*config_and_inputs) def test_gpt_neo_token_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_neo_for_token_classification(*config_and_inputs) def test_gpt_neo_gradient_checkpointing(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True) def _get_hidden_states(self): return torch.tensor( [ [ [0.4983, -0.7584, -1.6944, 0.5440], [2.6918, 0.4206, 0.4176, 0.2055], [-0.0071, -0.0405, -1.4920, -0.3630], [1.0492, 0.1599, -1.7648, 0.2419], [-1.8348, 2.0514, -0.1946, 0.3203], [0.7672, -1.1600, -1.7118, -0.9056], [0.2986, 0.5372, 0.7729, -0.1927], [0.0285, 0.2629, -1.1156, -1.1992], ] ], dtype=torch.float32, device=torch_device, ) def test_local_attn_probs(self): model = GPTNeoModel.from_pretrained("valhalla/gpt-neo-random-tiny").eval() layer = model.h[1].attn.attention.to(torch_device) hidden_states = self._get_hidden_states() hidden_states = torch.cat([hidden_states, hidden_states - 0.5], dim=2) batch_size, seq_length, _ = hidden_states.shape mask_tokens = 2 attention_mask = torch.ones(batch_size, seq_length, device=torch_device, dtype=torch.long) attention_mask[:, -mask_tokens:] = 0 # dont attend last mask_tokens attention_mask = attention_mask.view(batch_size, -1) attention_mask = attention_mask[:, None, None, :] attention_mask = (1.0 - attention_mask) * -10000.0 attn_probs = layer(hidden_states, attention_mask=attention_mask, output_attentions=True)[-1] # the last 2 tokens are masked, and should have 0 attn_probs self.assertTrue(torch.all(attn_probs[:, :, -mask_tokens:, -mask_tokens:] == 0)) # in loacal attention each token can only attend to the previous window_size tokens (inlcuding itself) # here window_size is 4, so a token at index 5 can only attend to indcies [2, 3, 4, 5] # and the attn_probs should be 0 for token [0, 1] self.assertTrue(torch.all(attn_probs[:, :, 5, 2:6] != 0)) self.assertTrue(torch.all(attn_probs[:, :, 5, :2] == 0)) @require_torch class GPTNeoModelLanguageGenerationTest(unittest.TestCase): @cached_property def model(self): return GPTNeoForCausalLM.from_pretrained("EleutherAI/gpt-neo-1.3B").to(torch_device) @cached_property def tokenizer(self): return GPT2Tokenizer.from_pretrained("EleutherAI/gpt-neo-1.3B") @slow def test_lm_generate_gpt_neo(self): for checkpointing in [True, False]: model = self.model if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() input_ids = torch.tensor([[464, 3290]], dtype=torch.long, device=torch_device) # The dog # fmt: off # The dog-eared copy of the book, which is a collection of essays by the late author, expected_output_ids = [464, 3290, 12, 3380, 4866, 286, 262, 1492, 11, 543, 318, 257, 4947, 286, 27126, 416, 262, 2739, 1772, 11] # fmt: on output_ids = model.generate(input_ids, do_sample=False) self.assertListEqual(output_ids[0].tolist(), expected_output_ids) @slow def test_gpt_neo_sample(self): model = self.model tokenizer = self.tokenizer torch.manual_seed(0) tokenized = tokenizer("Today is a nice day and", return_tensors="pt", return_token_type_ids=True) input_ids = tokenized.input_ids.to(torch_device) output_ids = model.generate(input_ids, do_sample=True) output_str = tokenizer.decode(output_ids[0], skip_special_tokens=True) EXPECTED_OUTPUT_STR = "Today is a nice day and if you don’t get the memo here is what you can" self.assertEqual(output_str, EXPECTED_OUTPUT_STR) @slow def test_batch_generation(self): model = self.model tokenizer = self.tokenizer tokenizer.padding_side = "left" # Define PAD Token = EOS Token = 50256 tokenizer.pad_token = tokenizer.eos_token model.config.pad_token_id = model.config.eos_token_id # use different length sentences to test batching sentences = [ "Hello, my dog is a little", "Today, I am", ] inputs = tokenizer(sentences, return_tensors="pt", padding=True) input_ids = inputs["input_ids"].to(torch_device) outputs = model.generate( input_ids=input_ids, attention_mask=inputs["attention_mask"].to(torch_device), ) inputs_non_padded = tokenizer(sentences[0], return_tensors="pt").input_ids.to(torch_device) output_non_padded = model.generate(input_ids=inputs_non_padded) num_paddings = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item() inputs_padded = tokenizer(sentences[1], return_tensors="pt").input_ids.to(torch_device) output_padded = model.generate(input_ids=inputs_padded, max_length=model.config.max_length - num_paddings) batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True) non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True) padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True) expected_output_sentence = [ "Hello, my dog is a little bit of a kitty. She is a very sweet and loving", "Today, I am going to talk about the best way to get a job in the", ] self.assertListEqual(expected_output_sentence, batch_out_sentence) self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence]) @slow def test_model_from_pretrained(self): for model_name in GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = GPTNeoModel.from_pretrained(model_name) self.assertIsNotNone(model)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/gpt_neo/test_modeling_flax_gpt_neo.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import numpy as np import transformers from transformers import GPT2Tokenizer, GPTNeoConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gpt_neo.modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel if is_torch_available(): import torch class FlaxGPTNeoModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=4, num_attention_heads=4, attention_types=[[["global", "local"], 2]], intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, window_size=7, initializer_range=0.02, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.attention_types = attention_types self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.window_size = window_size self.initializer_range = initializer_range self.scope = None self.bos_token_id = vocab_size - 1 self.eos_token_id = vocab_size - 1 self.pad_token_id = vocab_size - 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) config = GPTNeoConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, max_position_embeddings=self.max_position_embeddings, use_cache=False, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, window_size=self.window_size, attention_types=self.attention_types, ) return (config, input_ids, input_mask) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict def check_use_cache_forward(self, model_class_name, config, input_ids, attention_mask): max_decoder_length = 20 model = model_class_name(config) past_key_values = model.init_cache(input_ids.shape[0], max_decoder_length) attention_mask = jnp.ones((input_ids.shape[0], max_decoder_length), dtype="i4") position_ids = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) ) outputs_cache = model( input_ids[:, :-1], attention_mask=attention_mask, past_key_values=past_key_values, position_ids=position_ids, ) position_ids = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model( input_ids[:, -1:], attention_mask=attention_mask, past_key_values=outputs_cache.past_key_values, position_ids=position_ids, ) outputs = model(input_ids) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") def check_use_cache_forward_with_attn_mask(self, model_class_name, config, input_ids, attention_mask): max_decoder_length = 20 model = model_class_name(config) attention_mask_cache = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))], axis=-1, ) past_key_values = model.init_cache(input_ids.shape[0], max_decoder_length) position_ids = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) ) outputs_cache = model( input_ids[:, :-1], attention_mask=attention_mask_cache, past_key_values=past_key_values, position_ids=position_ids, ) position_ids = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model( input_ids[:, -1:], past_key_values=outputs_cache.past_key_values, attention_mask=attention_mask_cache, position_ids=position_ids, ) outputs = model(input_ids, attention_mask=attention_mask) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") @require_flax class FlaxGPTNeoModelTest(FlaxModelTesterMixin, FlaxGenerationTesterMixin, unittest.TestCase): all_model_classes = (FlaxGPTNeoModel, FlaxGPTNeoForCausalLM) if is_flax_available() else () all_generative_model_classes = (FlaxGPTNeoForCausalLM,) if is_flax_available() else () def setUp(self): self.model_tester = FlaxGPTNeoModelTester(self) def test_use_cache_forward(self): for model_class_name in self.all_model_classes: config, input_ids, attention_mask = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(model_class_name, config, input_ids, attention_mask) def test_use_cache_forward_with_attn_mask(self): for model_class_name in self.all_model_classes: config, input_ids, attention_mask = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( model_class_name, config, input_ids, attention_mask ) @slow def test_batch_generation(self): tokenizer = GPT2Tokenizer.from_pretrained("gpt2", pad_token="<|endoftext|>", padding_side="left") inputs = tokenizer(["Hello this is a long string", "Hey"], return_tensors="np", padding=True, truncation=True) model = FlaxGPTNeoForCausalLM.from_pretrained("EleutherAI/gpt-neo-125M") model.do_sample = False model.config.pad_token_id = model.config.eos_token_id jit_generate = jax.jit(model.generate) output_sequences = jit_generate( inputs["input_ids"], attention_mask=inputs["attention_mask"], pad_token_id=tokenizer.pad_token_id ).sequences output_string = tokenizer.batch_decode(output_sequences, skip_special_tokens=True) expected_string = [ "Hello this is a long string of text.\n\nI'm trying to get the text of the", "Hey, I'm a little late to the party. I'm going to", ] self.assertListEqual(output_string, expected_string) # overwrite from common since `attention_mask` in combination # with `causal_mask` behaves slighly differently @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) batch_size, seq_length = pt_inputs["input_ids"].shape rnd_start_indices = np.random.randint(0, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): pt_inputs["attention_mask"][batch_idx, :start_index] = 0 pt_inputs["attention_mask"][batch_idx, start_index:] = 1 prepared_inputs_dict["attention_mask"][batch_idx, :start_index] = 0 prepared_inputs_dict["attention_mask"][batch_idx, start_index:] = 1 pt_model = pt_model_class(config).eval() fx_model = model_class(config, dtype=jnp.float32) fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**prepared_inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs, pt_outputs): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = model_class.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**prepared_inputs_dict).to_tuple() self.assertEqual( len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded, pt_outputs): self.assert_almost_equals(fx_output_loaded[:, -1], pt_output[:, -1].numpy(), 4e-2) # overwrite from common since `attention_mask` in combination # with `causal_mask` behaves slighly differently @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() fx_model = model_class(config, dtype=jnp.float32) pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) batch_size, seq_length = pt_inputs["input_ids"].shape rnd_start_indices = np.random.randint(0, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): pt_inputs["attention_mask"][batch_idx, :start_index] = 0 pt_inputs["attention_mask"][batch_idx, start_index:] = 1 prepared_inputs_dict["attention_mask"][batch_idx, :start_index] = 0 prepared_inputs_dict["attention_mask"][batch_idx, start_index:] = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**prepared_inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs, pt_outputs): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = pt_model_class.from_pretrained(tmpdirname, from_flax=True) with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple() self.assertEqual( len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(fx_outputs, pt_outputs_loaded): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("EleutherAI/gpt-neo-125M") outputs = model(np.ones((1, 1))) self.assertIsNotNone(outputs)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/blenderbot_small/test_tokenization_blenderbot_small.py
#!/usr/bin/env python3 # coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the Blenderbot small tokenizer.""" import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class BlenderbotSmallTokenizerTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = BlenderbotSmallTokenizer test_rust_tokenizer = False def setUp(self): super().setUp() vocab = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""] self.special_tokens_map = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"} self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) def get_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self, tokenizer): input_text = "adapt act apte" output_text = "adapt act apte" return input_text, output_text def test_full_blenderbot_small_tokenizer(self): tokenizer = BlenderbotSmallTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map) text = "adapt act apte" bpe_tokens = ["adapt", "act", "ap@@", "te"] tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] input_bpe_tokens = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) def test_special_tokens_small_tok(self): tok = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M") assert tok("sam").input_ids == [1384] src_text = "I am a small frog." encoded = tok([src_text], padding=False, truncation=False)["input_ids"] decoded = tok.batch_decode(encoded, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def test_empty_word_small_tok(self): tok = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M") src_text = "I am a small frog ." src_text_dot = "." encoded = tok(src_text)["input_ids"] encoded_dot = tok(src_text_dot)["input_ids"] assert encoded[-1] == encoded_dot[0]
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/blenderbot_small/test_modeling_blenderbot_small.py
# coding=utf-8 # Copyright 2021, The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch BlenderbotSmall model. """ import tempfile import unittest from transformers import BlenderbotSmallConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallTokenizer from transformers.models.blenderbot_small.modeling_blenderbot_small import ( BlenderbotSmallDecoder, BlenderbotSmallEncoder, BlenderbotSmallForCausalLM, ) def prepare_blenderbot_small_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class BlenderbotSmallModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id # forcing a certain token to be generated, sets all other tokens to -inf # if however the token to be generated is already at -inf then it can lead token # `nan` values and thus break generation self.forced_bos_token_id = None self.forced_eos_token_id = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp( 3, ) input_ids[:, -1] = self.eos_token_id # Eos Token decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.get_config() inputs_dict = prepare_blenderbot_small_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def get_config(self): return BlenderbotSmallConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, forced_bos_token_id=self.forced_bos_token_id, forced_eos_token_id=self.forced_eos_token_id, ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = BlenderbotSmallModel(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] head_mask = inputs_dict["head_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = BlenderbotSmallModel(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = BlenderbotSmallEncoder.from_pretrained(tmpdirname).to(torch_device) encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = BlenderbotSmallDecoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, encoder_attention_mask=inputs_dict["attention_mask"], )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class BlenderbotSmallModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (BlenderbotSmallModel, BlenderbotSmallForConditionalGeneration) if is_torch_available() else () all_generative_model_classes = (BlenderbotSmallForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "conversational": BlenderbotSmallForConditionalGeneration, "feature-extraction": BlenderbotSmallModel, "summarization": BlenderbotSmallForConditionalGeneration, "text-generation": BlenderbotSmallForCausalLM, "text2text-generation": BlenderbotSmallForConditionalGeneration, "translation": BlenderbotSmallForConditionalGeneration, } if is_torch_available() else {} ) is_encoder_decoder = True fx_compatible = True test_pruning = False test_missing_keys = False # TODO: Fix the failed tests when this model gets more usage def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def setUp(self): self.model_tester = BlenderbotSmallModelTester(self) self.config_tester = ConfigTester(self, config_class=BlenderbotSmallConfig) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = BlenderbotSmallForConditionalGeneration(config).eval().to(torch_device) if torch_device == "cuda": model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def assert_tensors_close(a, b, atol=1e-12, prefix=""): """If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if torch.allclose(a, b, atol=atol): return True raise except Exception: pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item() if a.numel() > 100: msg = f"tensor values are {pct_different:.1%} percent different." else: msg = f"{a} != {b}" if prefix: msg = prefix + ": " + msg raise AssertionError(msg) @require_torch class Blenderbot90MIntegrationTests(unittest.TestCase): ckpt = "facebook/blenderbot-90M" @cached_property def model(self): model = BlenderbotSmallForConditionalGeneration.from_pretrained(self.ckpt).to(torch_device) if torch_device == "cuda": model = model.half() return model @cached_property def tokenizer(self): return BlenderbotSmallTokenizer.from_pretrained(self.ckpt) @slow def test_90_generation_from_long_input(self): src_text = [ "Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel" " like i'm going to throw up.\nand why is that?" ] model_inputs = self.tokenizer(src_text, return_tensors="pt").to(torch_device) assert isinstance(self.tokenizer, BlenderbotSmallTokenizer) generated_ids = self.model.generate(**model_inputs)[0] reply = self.tokenizer.decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True) assert reply in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", ) @slow def test_90_generation_from_short_input(self): model_inputs = self.tokenizer(["sam"], return_tensors="pt").to(torch_device) generated_utterances = self.model.generate(**model_inputs) clean_txt = self.tokenizer.decode( generated_utterances[0], skip_special_tokens=True, clean_up_tokenization_spaces=True ) assert clean_txt in ( "have you ever been to a sam club? it's a great club in the south.", "have you ever heard of sam harris? he's an american singer, songwriter, and actor.", ) class BlenderbotSmallStandaloneDecoderModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, d_model=16, decoder_seq_length=7, is_training=True, is_decoder=True, use_attention_mask=True, use_cache=False, use_labels=True, decoder_start_token_id=2, decoder_ffn_dim=32, decoder_layers=4, encoder_attention_heads=4, decoder_attention_heads=4, max_position_embeddings=30, is_encoder_decoder=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, scope=None, ): self.parent = parent self.batch_size = batch_size self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.d_model = d_model self.hidden_size = d_model self.num_hidden_layers = decoder_layers self.decoder_layers = decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.encoder_attention_heads = encoder_attention_heads self.decoder_attention_heads = decoder_attention_heads self.num_attention_heads = decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.use_cache = use_cache self.max_position_embeddings = max_position_embeddings self.is_encoder_decoder = is_encoder_decoder self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 2 self.decoder_attention_idx = 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = BlenderbotSmallConfig( vocab_size=self.vocab_size, d_model=self.d_model, decoder_layers=self.decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, encoder_attention_heads=self.encoder_attention_heads, decoder_attention_heads=self.decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, max_position_embeddings=self.max_position_embeddings, is_encoder_decoder=self.is_encoder_decoder, ) return ( config, input_ids, attention_mask, lm_labels, ) def create_and_check_decoder_model_past( self, config, input_ids, attention_mask, lm_labels, ): config.use_cache = True model = BlenderbotSmallDecoder(config=config).to(torch_device).eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, attention_mask, lm_labels, ): model = BlenderbotSmallDecoder(config=config).to(torch_device).eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values, attention_mask=attn_mask)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class BlenderbotSmallStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (BlenderbotSmallDecoder, BlenderbotSmallForCausalLM) if is_torch_available() else () all_generative_model_classes = (BlenderbotSmallForCausalLM,) if is_torch_available() else () test_pruning = False is_encoder_decoder = False def setUp( self, ): self.model_tester = BlenderbotSmallStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=BlenderbotSmallConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_attn_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) def test_retain_grad_hidden_states_attentions(self): # decoder cannot keep gradients return @unittest.skip("The model doesn't support left padding") # and it's not used enough to be worth fixing :) def test_left_padding_compatibility(self): pass
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/blenderbot_small/test_modeling_flax_blenderbot_small.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotSmallConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html os.environ["XLA_PYTHON_CLIENT_ALLOCATOR"] = "platform" import jax import jax.numpy as jnp from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, shift_tokens_right, ) def prepare_blenderbot_inputs_dict( config, input_ids, decoder_input_ids=None, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = np.where(input_ids != config.pad_token_id, 1, 0) if decoder_attention_mask is None: decoder_attention_mask = np.where(decoder_input_ids != config.pad_token_id, 1, 0) if head_mask is None: head_mask = np.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: decoder_head_mask = np.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: cross_attn_head_mask = np.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class FlaxBlenderbotSmallModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=32, eos_token_id=2, pad_token_id=1, bos_token_id=0, initializer_range=0.02, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.initializer_range = initializer_range def prepare_config_and_inputs(self): input_ids = np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size), 3, self.vocab_size) input_ids = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1), dtype=np.int64)), -1) decoder_input_ids = shift_tokens_right(input_ids, 1, 2) config = BlenderbotSmallConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, initializer_range=self.initializer_range, use_cache=False, ) inputs_dict = prepare_blenderbot_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def check_use_cache_forward(self, model_class_name, config, inputs_dict): max_decoder_length = 20 model = model_class_name(config) encoder_outputs = model.encode(inputs_dict["input_ids"]) decoder_input_ids, decoder_attention_mask = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) past_key_values = model.init_cache(decoder_input_ids.shape[0], max_decoder_length, encoder_outputs) decoder_attention_mask = jnp.ones((decoder_input_ids.shape[0], max_decoder_length), dtype="i4") decoder_position_ids = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), ) outputs_cache = model.decode( decoder_input_ids[:, :-1], encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, decoder_position_ids=decoder_position_ids, ) decoder_position_ids = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model.decode( decoder_input_ids[:, -1:], encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=outputs_cache.past_key_values, decoder_position_ids=decoder_position_ids, ) outputs = model.decode(decoder_input_ids, encoder_outputs) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") def check_use_cache_forward_with_attn_mask(self, model_class_name, config, inputs_dict): max_decoder_length = 20 model = model_class_name(config) encoder_outputs = model.encode(inputs_dict["input_ids"]) decoder_input_ids, decoder_attention_mask = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) decoder_attention_mask_cache = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])), ], axis=-1, ) past_key_values = model.init_cache(decoder_input_ids.shape[0], max_decoder_length, encoder_outputs) decoder_position_ids = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), ) outputs_cache = model.decode( decoder_input_ids[:, :-1], encoder_outputs, decoder_attention_mask=decoder_attention_mask_cache, past_key_values=past_key_values, decoder_position_ids=decoder_position_ids, ) decoder_position_ids = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model.decode( decoder_input_ids[:, -1:], encoder_outputs, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=decoder_attention_mask_cache, decoder_position_ids=decoder_position_ids, ) outputs = model.decode(decoder_input_ids, encoder_outputs, decoder_attention_mask=decoder_attention_mask) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") @require_flax class BlenderbotHeadTests(unittest.TestCase): vocab_size = 99 def _get_config_and_data(self): input_ids = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ], dtype=np.int64, ) batch_size = input_ids.shape[0] config = BlenderbotSmallConfig( vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, ) return config, input_ids, batch_size # @timeout_decorator.timeout(1) # not working with the decorator so far def test_lm_forward(self): config, input_ids, batch_size = self._get_config_and_data() lm_model = FlaxBlenderbotSmallForConditionalGeneration(config) outputs = lm_model(input_ids=input_ids) expected_shape = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["logits"].shape, expected_shape) def test_lm_uneven_forward(self): config = BlenderbotSmallConfig( vocab_size=self.vocab_size, d_model=14, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=8, decoder_ffn_dim=8, max_position_embeddings=48, ) lm_model = FlaxBlenderbotSmallForConditionalGeneration(config) context = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], dtype=np.int64) summary = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], dtype=np.int64) outputs = lm_model(input_ids=context, decoder_input_ids=summary) expected_shape = (*summary.shape, config.vocab_size) self.assertEqual(outputs["logits"].shape, expected_shape) def test_shift_tokens_right(self): input_ids = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=np.int64) shifted = shift_tokens_right(input_ids, 1, 2) n_pad_before = np.equal(input_ids, 1).astype(np.float32).sum() n_pad_after = np.equal(shifted, 1).astype(np.float32).sum() self.assertEqual(shifted.shape, input_ids.shape) self.assertEqual(n_pad_after, n_pad_before - 1) self.assertTrue(np.equal(shifted[:, 0], 2).all()) @require_flax class FlaxBlenderbotSmallModelTest(FlaxModelTesterMixin, unittest.TestCase, FlaxGenerationTesterMixin): is_encoder_decoder = True all_model_classes = ( ( FlaxBlenderbotSmallModel, FlaxBlenderbotSmallForConditionalGeneration, ) if is_flax_available() else () ) all_generative_model_classes = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else () def setUp(self): self.model_tester = FlaxBlenderbotSmallModelTester(self) def test_use_cache_forward(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(model_class, config, inputs_dict) def test_use_cache_forward_with_attn_mask(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(model_class, config, inputs_dict) def test_encode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def encode_jitted(input_ids, attention_mask=None, **kwargs): return model.encode(input_ids=input_ids, attention_mask=attention_mask) with self.subTest("JIT Enabled"): jitted_outputs = encode_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = encode_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_decode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): model = model_class(config) encoder_outputs = model.encode(inputs_dict["input_ids"], inputs_dict["attention_mask"]) prepared_inputs_dict = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(decoder_input_ids, decoder_attention_mask, encoder_outputs): return model.decode( decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, ) with self.subTest("JIT Enabled"): jitted_outputs = decode_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = decode_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("facebook/blenderbot_small-90M") # FlaxBlenderbotForSequenceClassification expects eos token in input_ids input_ids = np.ones((1, 1)) * model.config.eos_token_id outputs = model(input_ids) self.assertIsNotNone(outputs)
0