repo_id
stringlengths
15
89
file_path
stringlengths
27
180
content
stringlengths
1
2.23M
__index_level_0__
int64
0
0
hf_public_repos/transformers
hf_public_repos/transformers/tests/test_modeling_utils.py
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import gc import glob import json import os import os.path import sys import tempfile import unittest import unittest.mock as mock import uuid from pathlib import Path import requests from huggingface_hub import HfApi, HfFolder, delete_repo from huggingface_hub.file_download import http_get from pytest import mark from requests.exceptions import HTTPError from transformers import ( AutoConfig, AutoModel, OwlViTForObjectDetection, PretrainedConfig, is_torch_available, logging, ) from transformers.testing_utils import ( TOKEN, USER, CaptureLogger, TestCasePlus, is_flaky, is_staging_test, require_accelerate, require_flax, require_safetensors, require_tf, require_torch, require_torch_accelerator, require_torch_gpu, require_torch_multi_accelerator, require_usr_bin_time, slow, torch_device, ) from transformers.utils import ( SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from transformers.utils.import_utils import ( is_flash_attn_2_available, is_flax_available, is_tf_available, is_torch_sdpa_available, is_torchdynamo_available, ) sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_configuration import CustomConfig, NoSuperInitConfig # noqa E402 if is_torch_available(): import torch from safetensors.torch import save_file as safe_save_file from test_module.custom_modeling import CustomModel, NoSuperInitModel from torch import nn from transformers import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, AutoModelForCausalLM, AutoTokenizer, BertConfig, BertModel, CLIPTextModel, PreTrainedModel, T5Config, T5ForConditionalGeneration, ) from transformers.modeling_attn_mask_utils import ( AttentionMaskConverter, _create_4d_causal_attention_mask, _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask, ) from transformers.modeling_utils import shard_checkpoint # Fake pretrained models for tests class BaseModel(PreTrainedModel): base_model_prefix = "base" config_class = PretrainedConfig def __init__(self, config): super().__init__(config) self.linear = nn.Linear(5, 5) self.linear_2 = nn.Linear(5, 5) def forward(self, x): return self.linear_2(self.linear(x)) class BaseModelWithTiedWeights(PreTrainedModel): config_class = PretrainedConfig def __init__(self, config): super().__init__(config) self.linear = nn.Linear(5, 5) self.linear_2 = nn.Linear(5, 5) def forward(self, x): return self.linear_2(self.linear(x)) def tie_weights(self): self.linear_2.weight = self.linear.weight class ModelWithHead(PreTrainedModel): base_model_prefix = "base" config_class = PretrainedConfig def _init_weights(self, module): pass def __init__(self, config): super().__init__(config) self.base = BaseModel(config) # linear is a common name between Base and Head on purpose. self.linear = nn.Linear(5, 5) self.linear2 = nn.Linear(5, 5) def forward(self, x): return self.linear2(self.linear(self.base(x))) class ModelWithHeadAndTiedWeights(PreTrainedModel): base_model_prefix = "base" config_class = PretrainedConfig def _init_weights(self, module): pass def __init__(self, config): super().__init__(config) self.base = BaseModel(config) self.decoder = nn.Linear(5, 5) def forward(self, x): return self.decoder(self.base(x)) def tie_weights(self): self.decoder.weight = self.base.linear.weight class Prepare4dCausalAttentionMaskModel(nn.Module): def forward(self, inputs_embeds): batch_size, seq_length, _ = inputs_embeds.shape past_key_values_length = 4 attention_mask = _prepare_4d_causal_attention_mask( None, (batch_size, seq_length), inputs_embeds, past_key_values_length ) return attention_mask class Create4dCausalAttentionMaskModel(nn.Module): def forward(self, inputs_embeds): batch_size, seq_length, _ = inputs_embeds.shape past_key_values_length = 4 attention_mask = _create_4d_causal_attention_mask( (batch_size, seq_length), dtype=inputs_embeds.dtype, device=inputs_embeds.device, past_key_values_length=past_key_values_length, ) return attention_mask class Prepare4dAttentionMaskModel(nn.Module): def forward(self, mask, inputs_embeds): attention_mask = _prepare_4d_attention_mask(mask, dtype=inputs_embeds.dtype) return attention_mask if is_flax_available(): from transformers import FlaxBertModel if is_tf_available(): from transformers import TFBertModel TINY_T5 = "patrickvonplaten/t5-tiny-random" TINY_BERT_FOR_TOKEN_CLASSIFICATION = "hf-internal-testing/tiny-bert-for-token-classification" def check_models_equal(model1, model2): models_are_equal = True for model1_p, model2_p in zip(model1.parameters(), model2.parameters()): if model1_p.data.ne(model2_p.data).sum() > 0: models_are_equal = False return models_are_equal @require_torch class ModelUtilsTest(TestCasePlus): @slow def test_model_from_pretrained(self): for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = BertConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, PretrainedConfig) model = BertModel.from_pretrained(model_name) model, loading_info = BertModel.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, PreTrainedModel) self.assertEqual(len(loading_info["missing_keys"]), 0) self.assertEqual(len(loading_info["unexpected_keys"]), 8) self.assertEqual(len(loading_info["mismatched_keys"]), 0) self.assertEqual(len(loading_info["error_msgs"]), 0) config = BertConfig.from_pretrained(model_name, output_attentions=True, output_hidden_states=True) # Not sure this is the intended behavior. TODO fix Lysandre & Thom config.name_or_path = model_name model = BertModel.from_pretrained(model_name, output_attentions=True, output_hidden_states=True) self.assertEqual(model.config.output_hidden_states, True) self.assertEqual(model.config, config) def test_model_from_pretrained_subfolder(self): config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") model = BertModel(config) subfolder = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(tmp_dir, subfolder)) with self.assertRaises(OSError): _ = BertModel.from_pretrained(tmp_dir) model_loaded = BertModel.from_pretrained(tmp_dir, subfolder=subfolder) self.assertTrue(check_models_equal(model, model_loaded)) def test_model_from_pretrained_subfolder_sharded(self): config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") model = BertModel(config) subfolder = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(tmp_dir, subfolder), max_shard_size="10KB") with self.assertRaises(OSError): _ = BertModel.from_pretrained(tmp_dir) model_loaded = BertModel.from_pretrained(tmp_dir, subfolder=subfolder) self.assertTrue(check_models_equal(model, model_loaded)) def test_model_from_pretrained_hub_subfolder(self): subfolder = "bert" model_id = "hf-internal-testing/tiny-random-bert-subfolder" with self.assertRaises(OSError): _ = BertModel.from_pretrained(model_id) model = BertModel.from_pretrained(model_id, subfolder=subfolder) self.assertIsNotNone(model) def test_model_from_pretrained_hub_subfolder_sharded(self): subfolder = "bert" model_id = "hf-internal-testing/tiny-random-bert-sharded-subfolder" with self.assertRaises(OSError): _ = BertModel.from_pretrained(model_id) model = BertModel.from_pretrained(model_id, subfolder=subfolder) self.assertIsNotNone(model) @is_flaky( description="Capturing logs is flaky: https://app.circleci.com/pipelines/github/huggingface/transformers/81004/workflows/4919e5c9-0ea2-457b-ad4f-65371f79e277/jobs/1038999" ) def test_model_from_pretrained_with_different_pretrained_model_name(self): model = T5ForConditionalGeneration.from_pretrained(TINY_T5) self.assertIsNotNone(model) logger = logging.get_logger("transformers.configuration_utils") with CaptureLogger(logger) as cl: BertModel.from_pretrained(TINY_T5) self.assertTrue("You are using a model of type t5 to instantiate a model of type bert" in cl.out) def test_model_from_config_torch_dtype(self): # test that the model can be instantiated with dtype of user's choice - as long as it's a # float dtype. To make it happen config.torch_dtype needs to be set before instantiating the # model from the config object. config = T5Config.from_pretrained(TINY_T5) model = AutoModel.from_config(config) # XXX: isn't supported # model = T5ForConditionalGeneration.from_config(config) self.assertEqual(model.dtype, torch.float32) model = AutoModel.from_config(config, torch_dtype=torch.float16) self.assertEqual(model.dtype, torch.float16) # torch.set_default_dtype() supports only float dtypes, so will fail with non-float type with self.assertRaises(ValueError): model = AutoModel.from_config(config, torch_dtype=torch.int64) def test_model_from_pretrained_torch_dtype(self): # test that the model can be instantiated with dtype of either # 1. explicit from_pretrained's torch_dtype argument # 2. via autodiscovery by looking at model weights (torch_dtype="auto") # so if a model.half() was saved, we want it to be instantiated as such. # # test an explicit model class, but also AutoModel separately as the latter goes through a different code path model_path = self.get_auto_remove_tmp_dir() # baseline - we know TINY_T5 is fp32 model model = T5ForConditionalGeneration.from_pretrained(TINY_T5) self.assertEqual(model.dtype, torch.float32) def remove_torch_dtype(model_path): file = f"{model_path}/config.json" with open(file, "r", encoding="utf-8") as f: s = json.load(f) s.pop("torch_dtype") with open(file, "w", encoding="utf-8") as f: json.dump(s, f) # test the default fp32 save_pretrained => from_pretrained cycle model.save_pretrained(model_path) model = T5ForConditionalGeneration.from_pretrained(model_path) self.assertEqual(model.dtype, torch.float32) # 1. test torch_dtype="auto" via `config.torch_dtype` model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto") self.assertEqual(model.dtype, torch.float32) # 2. test torch_dtype="auto" via auto-derivation # now remove the torch_dtype entry from config.json and try "auto" again which should # perform auto-derivation from weights remove_torch_dtype(model_path) model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto") self.assertEqual(model.dtype, torch.float32) # test forced loading in fp16 (even though the weights are in fp32) model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype=torch.float16) self.assertEqual(model.dtype, torch.float16) # test fp16 save_pretrained, loaded with auto-detection model = model.half() model.save_pretrained(model_path) # 1. test torch_dtype="auto" via `config.torch_dtype` model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto") self.assertEqual(model.config.torch_dtype, torch.float16) self.assertEqual(model.dtype, torch.float16) # tests `config.torch_dtype` saving with open(f"{model_path}/config.json") as f: config_dict = json.load(f) self.assertEqual(config_dict["torch_dtype"], "float16") # 2. test torch_dtype="auto" via auto-derivation # now same with using config info remove_torch_dtype(model_path) model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto") self.assertEqual(model.dtype, torch.float16) # 3. now retest that AutoModel behaves the same wrt torch_dtype="auto" as T5ForConditionalGeneration model = AutoModel.from_pretrained(model_path, torch_dtype="auto") self.assertEqual(model.dtype, torch.float16) # test fp16 save_pretrained, loaded with the explicit fp16 model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype=torch.float16) self.assertEqual(model.dtype, torch.float16) # test AutoModel separately as it goes through a different path # test auto-detection - as currently TINY_T5 doesn't have torch_dtype entry model = AutoModel.from_pretrained(TINY_T5, torch_dtype="auto") # test that the config object didn't get polluted with torch_dtype="auto" # there was a bug that after this call we ended up with config.torch_dtype=="auto" self.assertNotEqual(model.config.torch_dtype, "auto") # now test the outcome self.assertEqual(model.dtype, torch.float32) model = AutoModel.from_pretrained(TINY_T5, torch_dtype=torch.float16) self.assertEqual(model.dtype, torch.float16) # test model whose first param is not of a floating type, but int model = AutoModel.from_pretrained(TINY_BERT_FOR_TOKEN_CLASSIFICATION, torch_dtype="auto") self.assertEqual(model.dtype, torch.float32) def test_no_super_init_config_and_model(self): config = NoSuperInitConfig(attribute=32) model = NoSuperInitModel(config) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) new_model = NoSuperInitModel.from_pretrained(tmp_dir) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) def test_shard_checkpoint(self): # This is the model we will use, total size 340,000 bytes. model = torch.nn.Sequential( torch.nn.Linear(100, 200, bias=False), # size 80,000 torch.nn.Linear(200, 200, bias=False), # size 160,000 torch.nn.Linear(200, 100, bias=False), # size 80,000 torch.nn.Linear(100, 50, bias=False), # size 20,000 ) state_dict = model.state_dict() with self.subTest("No shard when max size is bigger than model size"): shards, index = shard_checkpoint(state_dict) self.assertIsNone(index) self.assertDictEqual(shards, {WEIGHTS_NAME: state_dict}) with self.subTest("Test sharding, no weights bigger than max size"): shards, index = shard_checkpoint(state_dict, max_shard_size="300kB") # Split is first two layers then last two. self.assertDictEqual( index, { "metadata": {"total_size": 340000}, "weight_map": { "0.weight": "pytorch_model-00001-of-00002.bin", "1.weight": "pytorch_model-00001-of-00002.bin", "2.weight": "pytorch_model-00002-of-00002.bin", "3.weight": "pytorch_model-00002-of-00002.bin", }, }, ) shard1 = {"0.weight": state_dict["0.weight"], "1.weight": state_dict["1.weight"]} shard2 = {"2.weight": state_dict["2.weight"], "3.weight": state_dict["3.weight"]} self.assertDictEqual( shards, {"pytorch_model-00001-of-00002.bin": shard1, "pytorch_model-00002-of-00002.bin": shard2} ) with self.subTest("Test sharding with weights bigger than max size"): shards, index = shard_checkpoint(state_dict, max_shard_size="100kB") # Split is first layer, second layer then last 2. self.assertDictEqual( index, { "metadata": {"total_size": 340000}, "weight_map": { "0.weight": "pytorch_model-00001-of-00003.bin", "1.weight": "pytorch_model-00002-of-00003.bin", "2.weight": "pytorch_model-00003-of-00003.bin", "3.weight": "pytorch_model-00003-of-00003.bin", }, }, ) shard1 = {"0.weight": state_dict["0.weight"]} shard2 = {"1.weight": state_dict["1.weight"]} shard3 = {"2.weight": state_dict["2.weight"], "3.weight": state_dict["3.weight"]} self.assertDictEqual( shards, { "pytorch_model-00001-of-00003.bin": shard1, "pytorch_model-00002-of-00003.bin": shard2, "pytorch_model-00003-of-00003.bin": shard3, }, ) def test_checkpoint_sharding_local_bin(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: # We use the same folder for various sizes to make sure a new save erases the old checkpoint. for max_size in ["50kB", "50kiB", "100kB", "100kiB", "200kB", "200kiB"]: model.save_pretrained(tmp_dir, max_shard_size=max_size, safe_serialization=False) # Get each shard file and its size shard_to_size = {} for shard in os.listdir(tmp_dir): if shard.endswith(".bin"): shard_file = os.path.join(tmp_dir, shard) shard_to_size[shard_file] = os.path.getsize(shard_file) index_file = os.path.join(tmp_dir, WEIGHTS_INDEX_NAME) # Check there is an index but no regular weight file self.assertTrue(os.path.isfile(index_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) # Check a file is bigger than max_size only when it has a single weight for shard_file, size in shard_to_size.items(): if max_size.endswith("kiB"): max_size_int = int(max_size[:-3]) * 2**10 else: max_size_int = int(max_size[:-2]) * 10**3 # Note: pickle adds some junk so the weight of the file can end up being slightly bigger than # the size asked for (since we count parameters) if size >= max_size_int + 50000: state_dict = torch.load(shard_file) self.assertEqual(len(state_dict), 1) # Check the index and the shard files found match with open(index_file, "r", encoding="utf-8") as f: index = json.loads(f.read()) all_shards = set(index["weight_map"].values()) shards_found = {f for f in os.listdir(tmp_dir) if f.endswith(".bin")} self.assertSetEqual(all_shards, shards_found) # Finally, check the model can be reloaded new_model = BertModel.from_pretrained(tmp_dir) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) def test_checkpoint_sharding_from_hub(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded") # the model above is the same as the model below, just a sharded version. ref_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") for p1, p2 in zip(model.parameters(), ref_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) def test_checkpoint_variant_local_bin(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, variant="v2", safe_serialization=False) weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["bin"]) weights_file = os.path.join(tmp_dir, weights_name) self.assertTrue(os.path.isfile(weights_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained(tmp_dir) new_model = BertModel.from_pretrained(tmp_dir, variant="v2") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) def test_checkpoint_variant_local_sharded_bin(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, variant="v2", max_shard_size="50kB", safe_serialization=False) weights_index_name = ".".join(WEIGHTS_INDEX_NAME.split(".")[:-1] + ["v2"] + ["json"]) weights_index_file = os.path.join(tmp_dir, weights_index_name) self.assertTrue(os.path.isfile(weights_index_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_INDEX_NAME))) for i in range(1, 5): weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + [f"v2-0000{i}-of-00005"] + ["bin"]) weights_name_file = os.path.join(tmp_dir, weights_name) self.assertTrue(os.path.isfile(weights_name_file)) with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained(tmp_dir) new_model = BertModel.from_pretrained(tmp_dir, variant="v2") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) @require_safetensors def test_checkpoint_variant_local_safe(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, variant="v2", safe_serialization=True) weights_name = ".".join(SAFE_WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["safetensors"]) weights_file = os.path.join(tmp_dir, weights_name) self.assertTrue(os.path.isfile(weights_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained(tmp_dir) new_model = BertModel.from_pretrained(tmp_dir, variant="v2") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) @require_safetensors def test_checkpoint_variant_local_sharded_safe(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, variant="v2", max_shard_size="50kB", safe_serialization=True) weights_index_name = ".".join(SAFE_WEIGHTS_INDEX_NAME.split(".")[:-1] + ["v2"] + ["json"]) weights_index_file = os.path.join(tmp_dir, weights_index_name) self.assertTrue(os.path.isfile(weights_index_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME))) for i in range(1, 5): weights_name = ".".join(SAFE_WEIGHTS_NAME.split(".")[:-1] + [f"v2-0000{i}-of-00005"] + ["safetensors"]) weights_name_file = os.path.join(tmp_dir, weights_name) self.assertTrue(os.path.isfile(weights_name_file)) with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained(tmp_dir) new_model = BertModel.from_pretrained(tmp_dir, variant="v2") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) def test_checkpoint_variant_hub(self): with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-variant", cache_dir=tmp_dir) model = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant", cache_dir=tmp_dir, variant="v2" ) self.assertIsNotNone(model) def test_checkpoint_variant_hub_sharded(self): with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant-sharded", cache_dir=tmp_dir ) model = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant-sharded", cache_dir=tmp_dir, variant="v2" ) self.assertIsNotNone(model) @require_safetensors def test_checkpoint_variant_hub_safe(self): with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-variant-safe", cache_dir=tmp_dir) model = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant-safe", cache_dir=tmp_dir, variant="v2" ) self.assertIsNotNone(model) @require_safetensors def test_checkpoint_variant_hub_sharded_safe(self): with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant-sharded-safe", cache_dir=tmp_dir ) model = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant-sharded-safe", cache_dir=tmp_dir, variant="v2" ) self.assertIsNotNone(model) def test_checkpoint_variant_save_load_bin(self): with tempfile.TemporaryDirectory() as tmp_dir: model = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant", cache_dir=tmp_dir, variant="v2" ) weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["bin"]) model.save_pretrained(tmp_dir, variant="v2", safe_serialization=False) # saving will create a variant checkpoint self.assertTrue(os.path.isfile(os.path.join(tmp_dir, weights_name))) model.save_pretrained(tmp_dir, safe_serialization=False) # saving shouldn't delete variant checkpoints weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["bin"]) self.assertTrue(os.path.isfile(os.path.join(tmp_dir, weights_name))) # there should be a normal checkpoint self.assertTrue(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) self.assertIsNotNone(model) @require_accelerate @mark.accelerate_tests def test_from_pretrained_low_cpu_mem_usage_functional(self): # test that we can use `from_pretrained(..., low_cpu_mem_usage=True)` with normal and # sharded models mnames = [ "hf-internal-testing/tiny-random-bert-sharded", "hf-internal-testing/tiny-random-bert", ] for mname in mnames: _ = BertModel.from_pretrained(mname, low_cpu_mem_usage=True) @require_usr_bin_time @require_accelerate @mark.accelerate_tests def test_from_pretrained_low_cpu_mem_usage_measured(self): # test that `from_pretrained(..., low_cpu_mem_usage=True)` uses less cpu memory than default mname = "bert-base-cased" preamble = "from transformers import AutoModel" one_liner_str = f'{preamble}; AutoModel.from_pretrained("{mname}", low_cpu_mem_usage=False)' max_rss_normal = self.python_one_liner_max_rss(one_liner_str) # print(f"{max_rss_normal=}") one_liner_str = f'{preamble}; AutoModel.from_pretrained("{mname}", low_cpu_mem_usage=True)' max_rss_low_mem = self.python_one_liner_max_rss(one_liner_str) # print(f"{max_rss_low_mem=}") diff_bytes = max_rss_normal - max_rss_low_mem diff_percent = diff_bytes / max_rss_low_mem # print(f"{diff_bytes=}, {diff_percent=}") # ideally we would compare that the diff is close to ~1x checkpoint size in bytes, but # measuring cpu memory on linux is very tricky and inconsistent, so instead let's check that # it's at least 15% less cpu memory consumed self.assertGreater( diff_percent, 0.15, "should use less CPU memory for low_cpu_mem_usage=True, " f"but got max_rss_normal={max_rss_normal} and max_rss_low_mem={max_rss_low_mem}", ) # if you want to compare things manually, let's first look at the size of the model in bytes # model = BertModel.from_pretrained(mname, low_cpu_mem_usage=False) # total_numel = sum(dict((p.data_ptr(), p.numel()) for p in model.parameters()).values()) # total_bytes = total_numel * 4 # 420MB # Now the diff_bytes should be very close to total_bytes, but the reports are inconsistent. # The easiest way to test this is to switch the model and torch.load to do all the work on # gpu - that way one can measure exactly the total and peak memory used. Perhaps once we add # functionality to load models directly on gpu, this test can be rewritten to use torch's # cuda memory tracking and then we should be able to do a much more precise test. @require_accelerate @mark.accelerate_tests @require_torch_multi_accelerator @slow def test_model_parallelism_gpt2(self): device_map = {"transformer.wte": 0, "transformer.wpe": 0, "lm_head": 0, "transformer.ln_f": 1} for i in range(12): device_map[f"transformer.h.{i}"] = 0 if i <= 5 else 1 model = AutoModelForCausalLM.from_pretrained("gpt2", device_map=device_map) tokenizer = AutoTokenizer.from_pretrained("gpt2") inputs = tokenizer("Hello, my name is", return_tensors="pt") output = model.generate(inputs["input_ids"].to(0)) text_output = tokenizer.decode(output[0].tolist()) self.assertEqual(text_output, "Hello, my name is John. I'm a writer, and I'm a writer. I'm") @require_accelerate @mark.accelerate_tests @require_torch_accelerator def test_from_pretrained_disk_offload_task_model(self): model = AutoModel.from_pretrained("hf-internal-testing/tiny-random-gpt2") device_map = { "transformer.wte": 0, "transformer.wpe": 0, "transformer.h.0": "cpu", "transformer.h.1": "cpu", "transformer.h.2": "cpu", "transformer.h.3": "disk", "transformer.h.4": "disk", "transformer.ln_f": 0, "lm_head": 0, } with tempfile.TemporaryDirectory() as tmp_dir: inputs = torch.tensor([[1, 2, 3]]).to(0) model.save_pretrained(tmp_dir) new_model = AutoModelForCausalLM.from_pretrained(tmp_dir).to(0) outputs1 = new_model.to(0)(inputs) offload_folder = os.path.join(tmp_dir, "offload") new_model_with_offload = AutoModelForCausalLM.from_pretrained( tmp_dir, device_map=device_map, offload_folder=offload_folder ) outputs2 = new_model_with_offload(inputs) self.assertTrue(torch.allclose(outputs1.logits.cpu(), outputs2.logits.cpu())) # With state dict temp offload offload_folder = os.path.join(tmp_dir, "offload") new_model_with_offload = AutoModelForCausalLM.from_pretrained( tmp_dir, device_map=device_map, offload_folder=offload_folder, offload_state_dict=True, ) outputs2 = new_model_with_offload(inputs) self.assertTrue(torch.allclose(outputs1.logits.cpu(), outputs2.logits.cpu())) @require_accelerate @mark.accelerate_tests @require_torch_accelerator def test_from_pretrained_disk_offload_derived_to_base_model(self): derived_model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2") device_map = { "wte": 0, "wpe": 0, "h.0": "cpu", "h.1": "cpu", "h.2": "cpu", "h.3": "disk", "h.4": "disk", "ln_f": 0, } with tempfile.TemporaryDirectory() as tmp_dir: inputs = torch.tensor([[1, 2, 3]]).to(0) derived_model.save_pretrained(tmp_dir, use_safetensors=True) base_model = AutoModel.from_pretrained(tmp_dir) outputs1 = base_model.to(0)(inputs) # with disk offload offload_folder = os.path.join(tmp_dir, "offload") base_model_with_offload = AutoModel.from_pretrained( tmp_dir, device_map=device_map, offload_folder=offload_folder ) outputs2 = base_model_with_offload(inputs) self.assertTrue(torch.allclose(outputs1[0].cpu(), outputs2[0].cpu())) # With state dict temp offload new_model_with_offload = AutoModel.from_pretrained( tmp_dir, device_map=device_map, offload_folder=offload_folder, offload_state_dict=True, ) outputs2 = new_model_with_offload(inputs) self.assertTrue(torch.allclose(outputs1[0].cpu(), outputs2[0].cpu())) @slow @require_torch def test_from_pretrained_non_contiguous_checkpoint(self): # See: https://github.com/huggingface/transformers/pull/28414 # Tiny models on the Hub have contiguous weights, contrarily to google/owlvit model = OwlViTForObjectDetection.from_pretrained("fxmarty/owlvit-tiny-non-contiguous-weight") self.assertTrue(model.owlvit.visual_projection.weight.is_contiguous()) model = OwlViTForObjectDetection.from_pretrained( "fxmarty/owlvit-tiny-non-contiguous-weight", device_map="auto" ) self.assertTrue(model.owlvit.visual_projection.weight.is_contiguous()) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=False) model.save_pretrained(tmp_dir, safe_serialization=True) def test_cached_files_are_used_when_internet_is_down(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") # This check we did call the fake head request mock_head.assert_called() def test_load_from_one_file(self): try: tmp_file = tempfile.mktemp() with open(tmp_file, "wb") as f: http_get( "https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/pytorch_model.bin", f ) config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") _ = BertModel.from_pretrained(tmp_file, config=config) finally: os.remove(tmp_file) def test_legacy_load_from_url(self): # This test is for deprecated behavior and can be removed in v5 config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") _ = BertModel.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/pytorch_model.bin", config=config ) @require_safetensors def test_use_safetensors(self): # Should not raise anymore AutoModel.from_pretrained("hf-internal-testing/tiny-random-RobertaModel", use_safetensors=True) # test that error if only safetensors is available with self.assertRaises(OSError) as env_error: BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-safetensors", use_safetensors=False) self.assertTrue("does not appear to have a file named pytorch_model.bin" in str(env_error.exception)) # test that only safetensors if both available and use_safetensors=False with tempfile.TemporaryDirectory() as tmp_dir: CLIPTextModel.from_pretrained( "hf-internal-testing/diffusers-stable-diffusion-tiny-all", subfolder="text_encoder", use_safetensors=False, cache_dir=tmp_dir, ) all_downloaded_files = glob.glob(os.path.join(tmp_dir, "*", "snapshots", "*", "*", "*")) self.assertTrue(any(f.endswith("bin") for f in all_downloaded_files)) self.assertFalse(any(f.endswith("safetensors") for f in all_downloaded_files)) # test that no safetensors if both available and use_safetensors=True with tempfile.TemporaryDirectory() as tmp_dir: CLIPTextModel.from_pretrained( "hf-internal-testing/diffusers-stable-diffusion-tiny-all", subfolder="text_encoder", use_safetensors=True, cache_dir=tmp_dir, ) all_downloaded_files = glob.glob(os.path.join(tmp_dir, "*", "snapshots", "*", "*", "*")) self.assertTrue(any(f.endswith("safetensors") for f in all_downloaded_files)) self.assertFalse(any(f.endswith("bin") for f in all_downloaded_files)) @require_safetensors def test_safetensors_save_and_load(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) # No pytorch_model.bin file, only a model.safetensors self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) new_model = BertModel.from_pretrained(tmp_dir) # Check models are equal for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) @require_safetensors def test_safetensors_load_from_hub(self): safetensors_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-safetensors") pytorch_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") # Check models are equal for p1, p2 in zip(safetensors_model.parameters(), pytorch_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) @require_safetensors def test_safetensors_save_and_load_sharded(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True, max_shard_size="100kB") # No pytorch_model.bin index file, only a model.safetensors index self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_INDEX_NAME))) self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME))) # No regular weights file self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) new_model = BertModel.from_pretrained(tmp_dir) # Check models are equal for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) @require_safetensors def test_safetensors_load_from_hub_sharded(self): safetensors_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded-safetensors") pytorch_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded") # Check models are equal for p1, p2 in zip(safetensors_model.parameters(), pytorch_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) def test_base_model_to_head_model_load(self): base_model = BaseModel(PretrainedConfig()) with tempfile.TemporaryDirectory() as tmp_dir: base_model.save_pretrained(tmp_dir, safe_serialization=False) # Can load a base model in a model with head model = ModelWithHead.from_pretrained(tmp_dir) for p1, p2 in zip(model.base.parameters(), base_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) # It doesn't work if the state dict has a mix of keys of the head and base without prefix though. base_state_dict = base_model.state_dict() head_state_dict = model.state_dict() base_state_dict["linear2.weight"] = head_state_dict["linear2.weight"] base_state_dict["linear2.bias"] = head_state_dict["linear2.bias"] safe_save_file(base_state_dict, os.path.join(tmp_dir, SAFE_WEIGHTS_NAME), metadata={"format": "pt"}) with self.assertRaisesRegex( ValueError, "The state dictionary of the model you are trying to load is corrupted." ): _ = ModelWithHead.from_pretrained(tmp_dir) def test_tied_weights_reload(self): # Base model = BaseModelWithTiedWeights(PretrainedConfig()) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) new_model = BaseModelWithTiedWeights.from_pretrained(tmp_dir) self.assertIs(new_model.linear.weight, new_model.linear_2.weight) state_dict = model.state_dict() # Remove tied weight from state_dict -> model should load with no complain of missing keys del state_dict["linear_2.weight"] torch.save(state_dict, os.path.join(tmp_dir, WEIGHTS_NAME)) new_model, load_info = BaseModelWithTiedWeights.from_pretrained(tmp_dir, output_loading_info=True) self.assertListEqual(load_info["missing_keys"], []) self.assertIs(new_model.linear.weight, new_model.linear_2.weight) # With head model.save_pretrained(tmp_dir) new_model, load_info = ModelWithHeadAndTiedWeights.from_pretrained(tmp_dir, output_loading_info=True) self.assertIs(new_model.base.linear.weight, new_model.decoder.weight) # Should only complain about the missing bias self.assertListEqual(load_info["missing_keys"], ["decoder.bias"]) @is_flaky( description="Capturing logs is flaky: https://app.circleci.com/pipelines/github/huggingface/transformers/81004/workflows/4919e5c9-0ea2-457b-ad4f-65371f79e277/jobs/1038999" ) def test_unexpected_keys_warnings(self): model = ModelWithHead(PretrainedConfig()) logger = logging.get_logger("transformers.modeling_utils") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) # Loading the model with a new class, we don't get a warning for unexpected weights, just an info with CaptureLogger(logger) as cl: _, loading_info = BaseModel.from_pretrained(tmp_dir, output_loading_info=True) self.assertNotIn("were not used when initializing ModelWithHead", cl.out) self.assertEqual( set(loading_info["unexpected_keys"]), {"linear.weight", "linear.bias", "linear2.weight", "linear2.bias"}, ) # Loading the model with the same class, we do get a warning for unexpected weights state_dict = model.state_dict() state_dict["added_key"] = copy.deepcopy(state_dict["linear.weight"]) safe_save_file(state_dict, os.path.join(tmp_dir, SAFE_WEIGHTS_NAME), metadata={"format": "pt"}) with CaptureLogger(logger) as cl: _, loading_info = ModelWithHead.from_pretrained(tmp_dir, output_loading_info=True) self.assertIn("were not used when initializing ModelWithHead: ['added_key']", cl.out) self.assertEqual(loading_info["unexpected_keys"], ["added_key"]) def test_warn_if_padding_and_no_attention_mask(self): logger = logging.get_logger("transformers.modeling_utils") with self.subTest("Ensure no warnings when pad_token_id is None."): logger.warning_once.cache_clear() with CaptureLogger(logger) as cl: config_no_pad_token = PretrainedConfig() config_no_pad_token.pad_token_id = None model = ModelWithHead(config_no_pad_token) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertNotIn("We strongly recommend passing in an `attention_mask`", cl.out) with self.subTest("Ensure no warnings when there is an attention_mask."): logger.warning_once.cache_clear() with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) self.assertNotIn("We strongly recommend passing in an `attention_mask`", cl.out) with self.subTest("Ensure no warnings when there are no pad_token_ids in the input_ids."): logger.warning_once.cache_clear() with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[1, 345, 232, 328, 740, 140, 1695, 69, 6078, 2341, 25]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertNotIn("We strongly recommend passing in an `attention_mask`", cl.out) with self.subTest("Ensure a warning is shown when the input_ids start with a pad_token_id."): logger.warning_once.cache_clear() with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 432, 5232]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertIn("We strongly recommend passing in an `attention_mask`", cl.out) with self.subTest("Ensure a warning is shown when the input_ids end with a pad_token_id."): logger.warning_once.cache_clear() with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[432, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertIn("We strongly recommend passing in an `attention_mask`", cl.out) with self.subTest("Ensure that the warning is shown at most once."): logger.warning_once.cache_clear() with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertEqual(cl.out.count("We strongly recommend passing in an `attention_mask`"), 1) with self.subTest("Ensure a different warning is shown when the pad_token_id is equal to the bos_token_id."): logger.warning_once.cache_clear() with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 config.bos_token_id = config.pad_token_id model = ModelWithHead(config) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertIn("You may ignore this warning if your `pad_token_id`", cl.out) if not is_torchdynamo_available(): return with self.subTest("Ensure that the warning code is skipped when compiling with torchdynamo."): logger.warning_once.cache_clear() from torch._dynamo import config, testing config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 432, 5232]]) def f(input_ids): model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) compile_counter = testing.CompileCounter() opt_fn = torch.compile(f, dynamic=True, backend=compile_counter) opt_fn(input_ids) self.assertEqual(compile_counter.frame_count, 0) @require_torch_accelerator @slow def test_pretrained_low_mem_new_config(self): # Checking for 1 model(the same one which was described in the issue) . model_ids = ["gpt2"] for model_id in model_ids: model_config = AutoConfig.from_pretrained(pretrained_model_name_or_path=model_id) model_config.n_layer = 48 model_config.n_head = 25 model_config.n_embd = 1600 model = AutoModelForCausalLM.from_pretrained( pretrained_model_name_or_path=model_id, config=model_config, ignore_mismatched_sizes=True, torch_dtype=torch.float16, low_cpu_mem_usage=True, ) model_ref = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path=model_id) self.assertEqual(model.__class__.__name__, model_ref.__class__.__name__) def test_generation_config_is_loaded_with_model(self): # Note: `joaogante/tiny-random-gpt2-with-generation-config` has a `generation_config.json` containing a dummy # `transformers_version` field set to `foo`. If loading the file fails, this test also fails. # 1. Load without further parameters model = AutoModelForCausalLM.from_pretrained("joaogante/tiny-random-gpt2-with-generation-config") self.assertEqual(model.generation_config.transformers_version, "foo") # 2. Load with `device_map` model = AutoModelForCausalLM.from_pretrained( "joaogante/tiny-random-gpt2-with-generation-config", device_map="auto" ) self.assertEqual(model.generation_config.transformers_version, "foo") @require_safetensors def test_safetensors_torch_from_torch(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) new_model = BertModel.from_pretrained(tmp_dir) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) @require_safetensors @require_flax def test_safetensors_torch_from_flax(self): hub_model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) new_model = BertModel.from_pretrained(tmp_dir) for p1, p2 in zip(hub_model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) @require_tf @require_safetensors def test_safetensors_torch_from_tf(self): hub_model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") model = TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-tf-only") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) new_model = BertModel.from_pretrained(tmp_dir) for p1, p2 in zip(hub_model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) @require_safetensors def test_safetensors_torch_from_torch_sharded(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True, max_shard_size="100kB") new_model = BertModel.from_pretrained(tmp_dir) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) def test_modifying_model_config_causes_warning_saving_generation_config(self): model = AutoModelForCausalLM.from_pretrained("gpt2") model.config.top_k = 1 with tempfile.TemporaryDirectory() as tmp_dir: with self.assertLogs("transformers.modeling_utils", level="WARNING") as logs: model.save_pretrained(tmp_dir) self.assertEqual(len(logs.output), 1) self.assertIn("Your generation config was originally created from the model config", logs.output[0]) @slow @require_torch class ModelOnTheFlyConversionTester(unittest.TestCase): @classmethod def setUpClass(cls): cls.user = "huggingface-hub-ci" cls.token = os.getenv("HUGGINGFACE_PRODUCTION_USER_TOKEN", None) if cls.token is None: raise ValueError("Cannot run tests as secret isn't setup.") cls.api = HfApi(token=cls.token) def setUp(self) -> None: self.repo_name = f"{self.user}/test-model-on-the-fly-{uuid.uuid4()}" def tearDown(self) -> None: self.api.delete_repo(self.repo_name) def test_safetensors_on_the_fly_conversion(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False) converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True) with self.subTest("Initial and converted models are equal"): for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()): self.assertTrue(torch.equal(p1, p2)) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) discussion = next(discussions) self.assertEqual(discussion.author, "SFconvertbot") self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") def test_safetensors_on_the_fly_conversion_private(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False, private=True) converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token) with self.subTest("Initial and converted models are equal"): for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()): self.assertTrue(torch.equal(p1, p2)) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name, token=self.token) discussion = next(discussions) self.assertEqual(discussion.author, self.user) self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") def test_safetensors_on_the_fly_conversion_gated(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False) headers = {"Authorization": f"Bearer {self.token}"} requests.put( f"https://huggingface.co/api/models/{self.repo_name}/settings", json={"gated": "auto"}, headers=headers ) converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token) with self.subTest("Initial and converted models are equal"): for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()): self.assertTrue(torch.equal(p1, p2)) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) discussion = next(discussions) self.assertEqual(discussion.author, "SFconvertbot") self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") def test_safetensors_on_the_fly_sharded_conversion(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False, max_shard_size="200kb") converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True) with self.subTest("Initial and converted models are equal"): for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()): self.assertTrue(torch.equal(p1, p2)) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) discussion = next(discussions) self.assertEqual(discussion.author, "SFconvertbot") self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") def test_safetensors_on_the_fly_sharded_conversion_private(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub( self.repo_name, token=self.token, safe_serialization=False, max_shard_size="200kb", private=True ) converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token) with self.subTest("Initial and converted models are equal"): for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()): self.assertTrue(torch.equal(p1, p2)) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) discussion = next(discussions) self.assertEqual(discussion.author, self.user) self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") def test_safetensors_on_the_fly_sharded_conversion_gated(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub(self.repo_name, token=self.token, max_shard_size="200kb", safe_serialization=False) headers = {"Authorization": f"Bearer {self.token}"} requests.put( f"https://huggingface.co/api/models/{self.repo_name}/settings", json={"gated": "auto"}, headers=headers ) converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token) with self.subTest("Initial and converted models are equal"): for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()): self.assertTrue(torch.equal(p1, p2)) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) discussion = next(discussions) self.assertEqual(discussion.author, "SFconvertbot") self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") @unittest.skip("Edge case, should work once the Space is updated`") def test_safetensors_on_the_fly_wrong_user_opened_pr(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False, private=True) BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token) # This should have opened a PR with the user's account with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) discussion = next(discussions) self.assertEqual(discussion.author, self.user) self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") # We now switch the repo visibility to public self.api.update_repo_visibility(self.repo_name, private=False) # We once again call from_pretrained, which should call the bot to open a PR BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) bot_opened_pr = None bot_opened_pr_title = None for discussion in discussions: if discussion.author == "SFconvertBot": bot_opened_pr = True bot_opened_pr_title = discussion.title self.assertTrue(bot_opened_pr) self.assertEqual(bot_opened_pr_title, "Adding `safetensors` variant of this model") def test_safetensors_on_the_fly_specific_revision(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) # Push a model on `main` initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False) # Push a model on a given revision initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False, revision="new-branch") # Try to convert the model on that revision should raise with self.assertRaises(EnvironmentError): BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token, revision="new-branch") @require_torch @is_staging_test class ModelPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-model") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-model-org") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-dynamic-model") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-dynamic-model-with-tags") except HTTPError: pass @unittest.skip("This test is flaky") def test_push_to_hub(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = BertModel(config) model.push_to_hub("test-model", token=self._token) new_model = BertModel.from_pretrained(f"{USER}/test-model") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(token=self._token, repo_id="test-model") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, repo_id="test-model", push_to_hub=True, token=self._token) new_model = BertModel.from_pretrained(f"{USER}/test-model") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) def test_push_to_hub_with_description(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = BertModel(config) COMMIT_DESCRIPTION = """ The commit description supports markdown synthax see: ```python >>> form transformers import AutoConfig >>> config = AutoConfig.from_pretrained("bert-base-uncased") ``` """ commit_details = model.push_to_hub( "test-model", use_auth_token=self._token, create_pr=True, commit_description=COMMIT_DESCRIPTION ) self.assertEqual(commit_details.commit_description, COMMIT_DESCRIPTION) @unittest.skip("This test is flaky") def test_push_to_hub_in_organization(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = BertModel(config) model.push_to_hub("valid_org/test-model-org", token=self._token) new_model = BertModel.from_pretrained("valid_org/test-model-org") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(token=self._token, repo_id="valid_org/test-model-org") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, push_to_hub=True, token=self._token, repo_id="valid_org/test-model-org") new_model = BertModel.from_pretrained("valid_org/test-model-org") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) def test_push_to_hub_dynamic_model(self): CustomConfig.register_for_auto_class() CustomModel.register_for_auto_class() config = CustomConfig(hidden_size=32) model = CustomModel(config) model.push_to_hub("test-dynamic-model", token=self._token) # checks self.assertDictEqual( config.auto_map, {"AutoConfig": "custom_configuration.CustomConfig", "AutoModel": "custom_modeling.CustomModel"}, ) new_model = AutoModel.from_pretrained(f"{USER}/test-dynamic-model", trust_remote_code=True) # Can't make an isinstance check because the new_model is from the CustomModel class of a dynamic module self.assertEqual(new_model.__class__.__name__, "CustomModel") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) config = AutoConfig.from_pretrained(f"{USER}/test-dynamic-model", trust_remote_code=True) new_model = AutoModel.from_config(config, trust_remote_code=True) self.assertEqual(new_model.__class__.__name__, "CustomModel") def test_push_to_hub_with_tags(self): from huggingface_hub import ModelCard new_tags = ["tag-1", "tag-2"] CustomConfig.register_for_auto_class() CustomModel.register_for_auto_class() config = CustomConfig(hidden_size=32) model = CustomModel(config) self.assertTrue(model.model_tags is None) model.add_model_tags(new_tags) self.assertTrue(model.model_tags == new_tags) model.push_to_hub("test-dynamic-model-with-tags", token=self._token) loaded_model_card = ModelCard.load(f"{USER}/test-dynamic-model-with-tags") self.assertEqual(loaded_model_card.data.tags, new_tags) @require_torch class AttentionMaskTester(unittest.TestCase): def check_non_causal(self, bsz, q_len, kv_len, mask_2d, mask_4d): mask_indices = (mask_2d != 1)[:, None].broadcast_to((bsz, q_len, kv_len)) mask_4d_values = mask_4d[:, 0][mask_indices] is_inf = mask_4d_values == -float("inf") is_min = mask_4d_values == torch.finfo(mask_4d.dtype).min assert torch.logical_or(is_inf, is_min).all() def check_to_4d(self, mask_converter, q_len, kv_len, additional_mask=None, bsz=3): mask_2d = torch.ones((bsz, kv_len), device=torch_device, dtype=torch.long) if additional_mask is not None: for bsz_idx, seq_idx in additional_mask: mask_2d[bsz_idx, seq_idx] = 0 mask_4d = mask_converter.to_4d(mask_2d, query_length=q_len, key_value_length=kv_len, dtype=torch.float32) assert mask_4d.shape == (bsz, 1, q_len, kv_len) # make sure there are no overflows assert mask_4d.min() != float("-inf") context = mask_converter.sliding_window if mask_converter.is_causal and context is None: # k * (k+1) / 2 tokens are masked in triangualar masks num_tokens_masked = bsz * (q_len * (q_len - 1) // 2) if 0 not in mask_2d: assert (mask_4d != 0).sum().cpu().item() == num_tokens_masked if 0 in mask_2d: # at least causal mask + maybe more assert (mask_4d != 0).sum().cpu().item() >= num_tokens_masked self.check_non_causal(bsz, q_len, kv_len, mask_2d, mask_4d) elif not mask_converter.is_causal and context is None: if 0 not in mask_2d: assert (mask_4d != 0).sum().cpu().item() == 0 if 0 in mask_2d: self.check_non_causal(bsz, q_len, kv_len, mask_2d, mask_4d) elif mask_converter.is_causal and context is not None: # k * (k+1) / 2 tokens are masked in triangualar masks num_tokens_masked = (q_len * (q_len - 1) // 2) + self.compute_num_context_mask(kv_len, context, q_len) num_tokens_masked = bsz * num_tokens_masked if 0 not in mask_2d: assert (mask_4d != 0).sum().cpu().item() == num_tokens_masked if 0 in mask_2d: # at least causal mask + maybe more assert (mask_4d != 0).sum().cpu().item() >= num_tokens_masked self.check_non_causal(bsz, q_len, kv_len, mask_2d, mask_4d) def check_to_causal(self, mask_converter, q_len, kv_len, bsz=3): mask_4d = mask_converter.to_causal_4d( bsz, query_length=q_len, key_value_length=kv_len, device=torch_device, dtype=torch.float32 ) if q_len == 1 and mask_converter.sliding_window is None: # no causal mask if q_len is 1 assert mask_4d is None return context = mask_converter.sliding_window if mask_converter.is_causal and context is None: # k * (k+1) / 2 tokens are masked in triangualar masks num_tokens_masked = bsz * (q_len * (q_len - 1) // 2) assert (mask_4d != 0).sum().cpu().item() == num_tokens_masked elif not mask_converter.is_causal and context is None: assert (mask_4d != 0).sum().cpu().item() == 0 elif mask_converter.is_causal and context is not None: # k * (k+1) / 2 tokens are masked in triangualar masks num_tokens_masked = (q_len * (q_len - 1) // 2) + self.compute_num_context_mask(kv_len, context, q_len) num_tokens_masked = bsz * num_tokens_masked assert (mask_4d != 0).sum().cpu().item() == num_tokens_masked def compute_num_context_mask(self, kv_len, context, q_len): # This function computes the # of attention tokens that are added for # the sliding window c_mask_len = kv_len - context num_mask_triangle = c_mask_len * (c_mask_len + 1) // 2 cut_mask_len = max(c_mask_len - q_len, 0) num_cut_mask = cut_mask_len * (cut_mask_len + 1) // 2 return num_mask_triangle - num_cut_mask def test_2d_to_4d_causal(self): mask_converter = AttentionMaskConverter(is_causal=True) # auto-regressive use case self.check_to_4d(mask_converter, q_len=1, kv_len=7) # special auto-regressive case self.check_to_4d(mask_converter, q_len=3, kv_len=7) # non auto-regressive case self.check_to_4d(mask_converter, q_len=7, kv_len=7) # same with extra attention masks self.check_to_4d(mask_converter, q_len=1, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) self.check_to_4d(mask_converter, q_len=3, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) self.check_to_4d(mask_converter, q_len=7, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) # check that the mask does not overflow on causal masked tokens self.check_to_4d(mask_converter, q_len=7, kv_len=7, additional_mask=[(0, 0), (1, 0), (1, 1)]) def test_2d_to_4d(self): mask_converter = AttentionMaskConverter(is_causal=False) # non auto-regressive case self.check_to_4d(mask_converter, q_len=7, kv_len=7) # same with extra attention masks self.check_to_4d(mask_converter, q_len=7, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) def test_2d_to_4d_causal_sliding(self): mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=5) # auto-regressive use case self.check_to_4d(mask_converter, q_len=1, kv_len=7) # special auto-regressive case self.check_to_4d(mask_converter, q_len=3, kv_len=7) # non auto-regressive case self.check_to_4d(mask_converter, q_len=7, kv_len=7) # same with extra attention masks self.check_to_4d(mask_converter, q_len=1, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) self.check_to_4d(mask_converter, q_len=3, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) self.check_to_4d(mask_converter, q_len=7, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) def test_causal_mask(self): mask_converter = AttentionMaskConverter(is_causal=True) # auto-regressive use case self.check_to_causal(mask_converter, q_len=1, kv_len=7) # special auto-regressive case self.check_to_causal(mask_converter, q_len=3, kv_len=7) # non auto-regressive case self.check_to_causal(mask_converter, q_len=7, kv_len=7) def test_causal_mask_sliding(self): mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=3) # auto-regressive use case self.check_to_causal(mask_converter, q_len=1, kv_len=7) # special auto-regressive case self.check_to_causal(mask_converter, q_len=3, kv_len=7) # non auto-regressive case self.check_to_causal(mask_converter, q_len=7, kv_len=7) def test_torch_compile_fullgraph(self): model = Prepare4dCausalAttentionMaskModel() inputs_embeds = torch.rand([1, 3, 32]) res_non_compiled = model(inputs_embeds) compiled_model = torch.compile(model, fullgraph=True) res_compiled = compiled_model(inputs_embeds) self.assertTrue(torch.equal(res_non_compiled, res_compiled)) model = Create4dCausalAttentionMaskModel() inputs_embeds = torch.rand(2, 4, 16) res_non_compiled = model(inputs_embeds) compiled_model = torch.compile(model, fullgraph=True) res_compiled = compiled_model(inputs_embeds) self.assertTrue(torch.equal(res_non_compiled, res_compiled)) model = Prepare4dAttentionMaskModel() mask = torch.ones(2, 4) mask[0, :2] = 0 inputs_embeds = torch.rand(2, 4, 16) res_non_compiled = model(mask, inputs_embeds) compiled_model = torch.compile(model, fullgraph=True) res_compiled = compiled_model(mask, inputs_embeds) self.assertTrue(torch.equal(res_non_compiled, res_compiled)) @require_torch @slow def test_unmask_unattended_left_padding(self): attention_mask = torch.Tensor([[0, 0, 1], [1, 1, 1], [0, 1, 1]]).to(torch.int64) expanded_mask = torch.Tensor( [ [[[0, 0, 0], [0, 0, 0], [0, 0, 1]]], [[[1, 0, 0], [1, 1, 0], [1, 1, 1]]], [[[0, 0, 0], [0, 1, 0], [0, 1, 1]]], ] ).to(torch.int64) reference_output = torch.Tensor( [ [[[1, 1, 1], [1, 1, 1], [0, 0, 1]]], [[[1, 0, 0], [1, 1, 0], [1, 1, 1]]], [[[1, 1, 1], [0, 1, 0], [0, 1, 1]]], ] ).to(torch.int64) result = AttentionMaskConverter._unmask_unattended(expanded_mask, attention_mask, unmasked_value=1) self.assertTrue(torch.equal(result, reference_output)) attention_mask = torch.Tensor([[0, 0, 1, 1, 1], [1, 1, 1, 1, 1], [0, 1, 1, 1, 1]]).to(torch.int64) attn_mask_converter = AttentionMaskConverter(is_causal=True) past_key_values_length = 0 key_value_length = attention_mask.shape[-1] + past_key_values_length expanded_mask = attn_mask_converter.to_4d( attention_mask, attention_mask.shape[-1], key_value_length=key_value_length, dtype=torch.float32 ) result = AttentionMaskConverter._unmask_unattended(expanded_mask, attention_mask, unmasked_value=0) min_inf = torch.finfo(torch.float32).min reference_output = torch.Tensor( [ [ [ [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [min_inf, min_inf, 0, min_inf, min_inf], [min_inf, min_inf, 0, 0, min_inf], [min_inf, min_inf, 0, 0, 0], ] ], [ [ [0, min_inf, min_inf, min_inf, min_inf], [0, 0, min_inf, min_inf, min_inf], [0, 0, 0, min_inf, min_inf], [0, 0, 0, 0, min_inf], [0, 0, 0, 0, 0], ] ], [ [ [0, 0, 0, 0, 0], [min_inf, 0, min_inf, min_inf, min_inf], [min_inf, 0, 0, min_inf, min_inf], [min_inf, 0, 0, 0, min_inf], [min_inf, 0, 0, 0, 0], ] ], ] ) self.assertTrue(torch.equal(reference_output, result)) @require_torch @slow def test_unmask_unattended_right_padding(self): attention_mask = torch.Tensor([[1, 1, 1, 0], [1, 1, 1, 1], [1, 1, 0, 0]]).to(torch.int64) attn_mask_converter = AttentionMaskConverter(is_causal=True) past_key_values_length = 0 key_value_length = attention_mask.shape[-1] + past_key_values_length expanded_mask = attn_mask_converter.to_4d( attention_mask, attention_mask.shape[-1], key_value_length=key_value_length, dtype=torch.float32 ) result = AttentionMaskConverter._unmask_unattended(expanded_mask, attention_mask, unmasked_value=0) self.assertTrue(torch.equal(expanded_mask, result)) @require_torch @slow def test_unmask_unattended_random_mask(self): attention_mask = torch.Tensor([[1, 0, 1, 0], [1, 0, 1, 1], [1, 1, 0, 1]]).to(torch.int64) attn_mask_converter = AttentionMaskConverter(is_causal=True) past_key_values_length = 0 key_value_length = attention_mask.shape[-1] + past_key_values_length expanded_mask = attn_mask_converter.to_4d( attention_mask, attention_mask.shape[-1], key_value_length=key_value_length, dtype=torch.float32 ) result = AttentionMaskConverter._unmask_unattended(expanded_mask, attention_mask, unmasked_value=0) self.assertTrue(torch.equal(expanded_mask, result)) @require_torch class TestAttentionImplementation(unittest.TestCase): def test_error_no_sdpa_available(self): with self.assertRaises(ValueError) as cm: _ = AutoModel.from_pretrained("hf-tiny-model-private/tiny-random-MCTCTModel", attn_implementation="sdpa") self.assertTrue( "does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention" in str(cm.exception) ) _ = AutoModel.from_pretrained("hf-tiny-model-private/tiny-random-MCTCTModel") def test_error_no_flash_available(self): with self.assertRaises(ValueError) as cm: _ = AutoModel.from_pretrained( "hf-tiny-model-private/tiny-random-MCTCTModel", attn_implementation="flash_attention_2" ) self.assertTrue("does not support Flash Attention 2.0" in str(cm.exception)) def test_error_no_flash_available_with_config(self): with self.assertRaises(ValueError) as cm: config = AutoConfig.from_pretrained("hf-tiny-model-private/tiny-random-MCTCTModel") _ = AutoModel.from_pretrained( "hf-tiny-model-private/tiny-random-MCTCTModel", config=config, attn_implementation="flash_attention_2" ) self.assertTrue("does not support Flash Attention 2.0" in str(cm.exception)) def test_error_wrong_attn_implementation(self): with self.assertRaises(ValueError) as cm: _ = AutoModel.from_pretrained("hf-tiny-model-private/tiny-random-MCTCTModel", attn_implementation="foo") self.assertTrue('The only possible arguments are `attn_implementation="eager"' in str(cm.exception)) def test_not_available_flash(self): if is_flash_attn_2_available(): self.skipTest("Please uninstall flash-attn package to run test_not_available_flash") with self.assertRaises(ImportError) as cm: _ = AutoModel.from_pretrained( "hf-internal-testing/tiny-random-GPTBigCodeModel", attn_implementation="flash_attention_2" ) self.assertTrue("the package flash_attn seems to be not installed" in str(cm.exception)) def test_not_available_flash_with_config(self): if is_flash_attn_2_available(): self.skipTest("Please uninstall flash-attn package to run test_not_available_flash") config = AutoConfig.from_pretrained("hf-internal-testing/tiny-random-GPTBigCodeModel") with self.assertRaises(ImportError) as cm: _ = AutoModel.from_pretrained( "hf-internal-testing/tiny-random-GPTBigCodeModel", config=config, attn_implementation="flash_attention_2", ) self.assertTrue("the package flash_attn seems to be not installed" in str(cm.exception)) def test_not_available_sdpa(self): if is_torch_sdpa_available(): self.skipTest("This test requires torch<=2.0") with self.assertRaises(ImportError) as cm: _ = AutoModel.from_pretrained( "hf-internal-testing/tiny-random-GPTBigCodeModel", attn_implementation="sdpa" ) self.assertTrue("PyTorch SDPA requirements in Transformers are not met" in str(cm.exception)) @slow @require_torch_gpu class Mask4DTestBase(unittest.TestCase): def tearDown(self): gc.collect() torch.cuda.empty_cache() def get_test_data(self): texts = ["the cat sat", "the cat had", "the cat is"] encoded = [self.tokenizer.encode(t) for t in texts] input_0 = torch.tensor(encoded, device=torch_device) # tensor([[ 1, 278, 6635, 3290], # [ 1, 278, 6635, 750], # [ 1, 278, 6635, 338]], device='cuda:0') # Combining common prefix with the unique ending tokens: input_1 = torch.cat([input_0[0][:-1], input_0[:, -1]]).unsqueeze(0) # tensor([[ 1, 278, 6635, 3290, 750, 338]], device='cuda:0') # Creating a 4D mask where each of the last 3 tokens do not attend to each other. mask_1 = torch.tensor( [ [ [ [1, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 0, 0], [1, 1, 1, 0, 1, 0], [1, 1, 1, 0, 0, 1], ] ] ], device="cuda:0", dtype=torch.int64, ) # Creating a position_ids tensor. note the repeating figures in the end. position_ids_1 = torch.tensor([[0, 1, 2, 3, 3, 3]], device=torch_device, dtype=torch.int64) return input_0, input_1, mask_1, position_ids_1 @slow @require_torch_gpu class Mask4DTestFP32(Mask4DTestBase): def setUp(self): model_name = "JackFram/llama-68m" # small Llama-like model from FlexFlow model_dtype = torch.float32 self.tokenizer = AutoTokenizer.from_pretrained(model_name) self.model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=model_dtype).to(torch_device) def test_attention(self): """comparing outputs of attention layer""" input_0, input_1, mask_1, position_ids_1 = self.get_test_data() hid_0 = self.model.model.embed_tokens(input_0) outs_0 = self.model.model.layers[0].self_attn.forward(hid_0)[0] # outs_0.shape == torch.Size([3, 4, 768]) hid_1 = self.model.model.embed_tokens(input_1) outs_1 = self.model.model.layers[0].self_attn.forward( hid_1, attention_mask=mask_1.bool(), position_ids=position_ids_1 )[0] # outs_1.shape == torch.Size([1, 6, 768]) outs_0_last_tokens = outs_0[:, -1, :] # last tokens in each batch line outs_1_last_tokens = outs_1[0, -3:, :] # last three tokens assert torch.allclose(outs_0_last_tokens, outs_1_last_tokens) def test_inner_model(self): """comparing hidden outputs of whole inner model""" input_0, input_1, mask_1, position_ids_1 = self.get_test_data() logits_0 = self.model.forward(input_0).logits logits_1 = self.model.forward(input_1, attention_mask=mask_1.bool(), position_ids=position_ids_1).logits logits_0_last_tokens = logits_0[:, -1, :] # last tokens in each batch line logits_1_last_tokens = logits_1[0, -3:, :] # last three tokens torch.testing.assert_close( logits_0_last_tokens, logits_1_last_tokens, ) def test_causal_model_logits(self): """comparing logits outputs of whole inner model""" input_0, input_1, mask_1, position_ids_1 = self.get_test_data() logits_0 = self.model.forward(input_0).logits logits_1 = self.model.forward(input_1, attention_mask=mask_1.bool(), position_ids=position_ids_1).logits logits_0_last_tokens = logits_0[:, -1, :] # last tokens in each batch line logits_1_last_tokens = logits_1[0, -3:, :] # last three tokens torch.testing.assert_close( logits_0_last_tokens, logits_1_last_tokens, ) @slow @require_torch_gpu class Mask4DTestFP16(Mask4DTestBase): test_attention = Mask4DTestFP32.test_attention def setUp(self): model_name = "JackFram/llama-68m" # small Llama-like model from FlexFlow model_dtype = torch.float16 self.tokenizer = AutoTokenizer.from_pretrained(model_name) self.model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=model_dtype).to(torch_device) def test_causal_model_logits(self): """comparing logits outputs of whole inner model""" input_0, input_1, mask_1, position_ids_1 = self.get_test_data() logits_0 = self.model.forward(input_0).logits logits_1 = self.model.forward(input_1, attention_mask=mask_1.bool(), position_ids=position_ids_1).logits logits_0_last_tokens = logits_0[:, -1, :] # last tokens in each batch line logits_1_last_tokens = logits_1[0, -3:, :] # last three tokens indices_0 = logits_0_last_tokens.sort(descending=True).indices indices_1 = logits_1_last_tokens.sort(descending=True).indices # checking logits, but note relaxed tolerances for FP16 torch.testing.assert_close(logits_0_last_tokens, logits_1_last_tokens, atol=0.02, rtol=0.001) # checking tokens order for the top tokens for token_ids_0, token_ids_1 in zip(indices_0, indices_1): self.assertTrue(torch.equal(token_ids_0[:128], token_ids_1[:128]))
0
hf_public_repos/transformers
hf_public_repos/transformers/tests/test_tokenization_utils.py
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPT2TokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class TokenizerUtilTester(unittest.TestCase): def test_cached_files_are_used_when_internet_is_down(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert") # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert") # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def test_cached_files_are_used_when_internet_is_down_missing_files(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = GPT2TokenizerFast.from_pretrained("gpt2") # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = GPT2TokenizerFast.from_pretrained("gpt2") # This check we did call the fake head request mock_head.assert_called() def test_legacy_load_from_one_file(self): # This test is for deprecated behavior and can be removed in v5 try: tmp_file = tempfile.mktemp() with open(tmp_file, "wb") as f: http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model", f) _ = AlbertTokenizer.from_pretrained(tmp_file) finally: os.remove(tmp_file) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile("tokenizer.json"): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open("tokenizer.json", "wb") as f: http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json", f) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size, 1000) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove("tokenizer.json") def test_legacy_load_from_url(self): # This test is for deprecated behavior and can be removed in v5 _ = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model") @is_staging_test class TokenizerPushToHubTester(unittest.TestCase): vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"] @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-tokenizer") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-tokenizer-org") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-dynamic-tokenizer") except HTTPError: pass def test_push_to_hub(self): with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) tokenizer = BertTokenizer(vocab_file) tokenizer.push_to_hub("test-tokenizer", token=self._token) new_tokenizer = BertTokenizer.from_pretrained(f"{USER}/test-tokenizer") self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab) # Reset repo delete_repo(token=self._token, repo_id="test-tokenizer") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(tmp_dir, repo_id="test-tokenizer", push_to_hub=True, token=self._token) new_tokenizer = BertTokenizer.from_pretrained(f"{USER}/test-tokenizer") self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab) def test_push_to_hub_in_organization(self): with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) tokenizer = BertTokenizer(vocab_file) tokenizer.push_to_hub("valid_org/test-tokenizer-org", token=self._token) new_tokenizer = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org") self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab) # Reset repo delete_repo(token=self._token, repo_id="valid_org/test-tokenizer-org") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( tmp_dir, repo_id="valid_org/test-tokenizer-org", push_to_hub=True, token=self._token ) new_tokenizer = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org") self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab) @require_tokenizers def test_push_to_hub_dynamic_tokenizer(self): CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) tokenizer = CustomTokenizer(vocab_file) # No fast custom tokenizer tokenizer.push_to_hub("test-dynamic-tokenizer", token=self._token) tokenizer = AutoTokenizer.from_pretrained(f"{USER}/test-dynamic-tokenizer", trust_remote_code=True) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizer") # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) bert_tokenizer = BertTokenizerFast.from_pretrained(tmp_dir) bert_tokenizer.save_pretrained(tmp_dir) tokenizer = CustomTokenizerFast.from_pretrained(tmp_dir) tokenizer.push_to_hub("test-dynamic-tokenizer", token=self._token) tokenizer = AutoTokenizer.from_pretrained(f"{USER}/test-dynamic-tokenizer", trust_remote_code=True) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizerFast") tokenizer = AutoTokenizer.from_pretrained( f"{USER}/test-dynamic-tokenizer", use_fast=False, trust_remote_code=True ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizer") class TrieTest(unittest.TestCase): def test_trie(self): trie = Trie() trie.add("Hello 友達") self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}}) trie.add("Hello") trie.data self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}}) def test_trie_split(self): trie = Trie() self.assertEqual(trie.split("[CLS] This is a extra_id_100"), ["[CLS] This is a extra_id_100"]) trie.add("[CLS]") trie.add("extra_id_1") trie.add("extra_id_100") self.assertEqual(trie.split("[CLS] This is a extra_id_100"), ["[CLS]", " This is a ", "extra_id_100"]) def test_trie_single(self): trie = Trie() trie.add("A") self.assertEqual(trie.split("ABC"), ["A", "BC"]) self.assertEqual(trie.split("BCA"), ["BC", "A"]) def test_trie_final(self): trie = Trie() trie.add("TOKEN]") trie.add("[SPECIAL_TOKEN]") self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]"), ["This is something ", "[SPECIAL_TOKEN]"]) def test_trie_subtokens(self): trie = Trie() trie.add("A") trie.add("P") trie.add("[SPECIAL_TOKEN]") self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]"), ["This is something ", "[SPECIAL_TOKEN]"]) def test_trie_suffix_tokens(self): trie = Trie() trie.add("AB") trie.add("B") trie.add("C") self.assertEqual(trie.split("ABC"), ["AB", "C"]) def test_trie_skip(self): trie = Trie() trie.add("ABC") trie.add("B") trie.add("CD") self.assertEqual(trie.split("ABCD"), ["ABC", "D"]) def test_cut_text_hardening(self): # Even if the offsets are wrong, we necessarily output correct string # parts. trie = Trie() parts = trie.cut_text("ABC", [0, 0, 2, 1, 2, 3]) self.assertEqual(parts, ["AB", "C"])
0
hf_public_repos/transformers
hf_public_repos/transformers/tests/test_tokenization_common.py
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import itertools import json import os import pickle import re import shutil import tempfile import traceback import unittest from collections import OrderedDict from itertools import takewhile from typing import TYPE_CHECKING, Any, Dict, List, Tuple, Union from parameterized import parameterized from transformers import ( AlbertTokenizer, AlbertTokenizerFast, BertTokenizer, BertTokenizerFast, PreTrainedTokenizer, PreTrainedTokenizerBase, PreTrainedTokenizerFast, SpecialTokensMixin, Trainer, TrainingArguments, is_flax_available, is_tf_available, is_torch_available, logging, ) from transformers.testing_utils import ( check_json_file_has_correct_format, get_tests_dir, is_pt_tf_cross_test, require_jinja, require_tf, require_tokenizers, require_torch, run_test_in_subprocess, slow, ) from transformers.tokenization_utils import AddedToken if is_torch_available(): import torch.nn as nn if TYPE_CHECKING: from transformers import PretrainedConfig, PreTrainedModel, TFPreTrainedModel logger = logging.get_logger(__name__) NON_ENGLISH_TAGS = ["chinese", "dutch", "french", "finnish", "german", "multilingual"] SMALL_TRAINING_CORPUS = [ ["This is the first sentence.", "This is the second one."], ["This sentence (contains #) over symbols and numbers 12 3.", "But not this one."], ] def filter_non_english(_, pretrained_name: str): """Filter all the model for non-english language""" return not any(lang in pretrained_name for lang in NON_ENGLISH_TAGS) def filter_roberta_detectors(_, pretrained_name: str): return "detector" not in pretrained_name def merge_model_tokenizer_mappings( model_mapping: Dict["PretrainedConfig", Union["PreTrainedModel", "TFPreTrainedModel"]], tokenizer_mapping: Dict["PretrainedConfig", Tuple["PreTrainedTokenizer", "PreTrainedTokenizerFast"]], ) -> Dict[ Union["PreTrainedTokenizer", "PreTrainedTokenizerFast"], Tuple["PretrainedConfig", Union["PreTrainedModel", "TFPreTrainedModel"]], ]: configurations = list(model_mapping.keys()) model_tokenizer_mapping = OrderedDict([]) for configuration in configurations: if configuration in model_mapping and configuration in tokenizer_mapping: model = model_mapping[configuration] tokenizer = tokenizer_mapping[configuration][0] tokenizer_fast = tokenizer_mapping[configuration][1] if tokenizer is not None: if configuration.__name__.startswith(tokenizer.__name__.replace("Tokenizer", "")): model_tokenizer_mapping.update({tokenizer: (configuration, model)}) if tokenizer_fast is not None: if configuration.__name__.startswith(tokenizer_fast.__name__.replace("TokenizerFast", "")): model_tokenizer_mapping.update({tokenizer_fast: (configuration, model)}) return model_tokenizer_mapping def _test_subword_regularization_tokenizer(in_queue, out_queue, timeout): error = None try: inputs = in_queue.get(timeout=timeout) tokenizer = inputs["tokenizer"] sp_model_kwargs = inputs["sp_model_kwargs"] test_sentencepiece_ignore_case = inputs["test_sentencepiece_ignore_case"] unittest.TestCase().assertTrue(hasattr(tokenizer, "sp_model_kwargs")) unittest.TestCase().assertIsNotNone(tokenizer.sp_model_kwargs) unittest.TestCase().assertTrue(isinstance(tokenizer.sp_model_kwargs, dict)) unittest.TestCase().assertDictEqual(tokenizer.sp_model_kwargs, sp_model_kwargs) check_subword_sampling(tokenizer, test_sentencepiece_ignore_case=test_sentencepiece_ignore_case) except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() def check_subword_sampling( tokenizer: PreTrainedTokenizer, text: str = None, test_sentencepiece_ignore_case: bool = True, ) -> None: """ Check if the tokenizer generates different results when subword regularization is enabled. Subword regularization augments training data with subword sampling. This has a random component. Args: tokenizer: The tokenizer to check. text: The text to use for the checks. test_sentencepiece_ignore_case: See `TokenizerTesterMixin.test_sentencepiece_ignore_case`. """ text = "This is a test for subword regularization." if text is None else text if test_sentencepiece_ignore_case: text = text.lower() tokens_list = [] for _ in range(5): tokens_list.append(tokenizer.tokenize(text)) # the list of different pairs of tokens_list combinations = itertools.combinations(tokens_list, 2) # check of sampling is done subword_sampling_found = False for combination in combinations: if combination[0] != combination[1]: subword_sampling_found = True unittest.TestCase().assertTrue(subword_sampling_found) # check if converting back to original text works for tokens in tokens_list: if test_sentencepiece_ignore_case: unittest.TestCase().assertEqual(text, tokenizer.convert_tokens_to_string(tokens).lower()) else: unittest.TestCase().assertEqual(text, tokenizer.convert_tokens_to_string(tokens)) class TokenizerTesterMixin: tokenizer_class = None rust_tokenizer_class = None test_slow_tokenizer = True test_rust_tokenizer = True space_between_special_tokens = False from_pretrained_kwargs = None from_pretrained_filter = None from_pretrained_vocab_key = "vocab_file" test_seq2seq = True # set to True to test a sentencepiece tokenizer test_sentencepiece = False # set to True to ignore casing when testing a sentencepiece tokenizer # test_sentencepiece must also be set to True test_sentencepiece_ignore_case = False def setUp(self) -> None: # Tokenizer.filter makes it possible to filter which Tokenizer to case based on all the # information available in Tokenizer (name, rust class, python class, vocab key name) if self.test_rust_tokenizer: tokenizers_list = [ ( self.rust_tokenizer_class, pretrained_name, self.from_pretrained_kwargs if self.from_pretrained_kwargs is not None else {}, ) for pretrained_name in self.rust_tokenizer_class.pretrained_vocab_files_map[ self.from_pretrained_vocab_key ].keys() if self.from_pretrained_filter is None or (self.from_pretrained_filter is not None and self.from_pretrained_filter(pretrained_name)) ] self.tokenizers_list = tokenizers_list[:1] # Let's just test the first pretrained vocab for speed else: self.tokenizers_list = [] with open(f"{get_tests_dir()}/fixtures/sample_text.txt", encoding="utf-8") as f_data: self._data = f_data.read().replace("\n\n", "\n").strip() self.tmpdirname = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.tmpdirname) def get_input_output_texts(self, tokenizer): input_txt = self.get_clean_sequence(tokenizer)[0] return input_txt, input_txt def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5) -> Tuple[str, list]: # the length of the tokenizer does not always represent the tokens that it can encode: what if there are holes? toks = [ (i, tokenizer.decode([i], clean_up_tokenization_spaces=False)) for i in set(tokenizer.get_vocab().values()) ] toks = list(filter(lambda t: re.match(r"^[ a-zA-Z]+$", t[1]), toks)) toks = list(filter(lambda t: [t[0]] == tokenizer.encode(t[1], add_special_tokens=False), toks)) if max_length is not None and len(toks) > max_length: toks = toks[:max_length] if min_length is not None and len(toks) < min_length and len(toks) > 0: while len(toks) < min_length: toks = toks + toks # toks_str = [t[1] for t in toks] toks_ids = [t[0] for t in toks] # Ensure consistency output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False) if " " not in output_txt and len(toks_ids) > 1: output_txt = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=False) + " " + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=False) ) if with_prefix_space: output_txt = " " + output_txt output_ids = tokenizer.encode(output_txt, add_special_tokens=False) return output_txt, output_ids def get_tokenizers(self, fast=True, **kwargs) -> List[PreTrainedTokenizerBase]: if fast and self.test_rust_tokenizer and self.test_slow_tokenizer: return [self.get_tokenizer(**kwargs), self.get_rust_tokenizer(**kwargs)] elif fast and self.test_rust_tokenizer: return [self.get_rust_tokenizer(**kwargs)] elif self.test_slow_tokenizer: return [self.get_tokenizer(**kwargs)] else: raise ValueError("This tokenizer class has no tokenizer to be tested.") def get_tokenizer(self, **kwargs) -> PreTrainedTokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def get_rust_tokenizer(self, **kwargs) -> PreTrainedTokenizerFast: return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def tokenizer_integration_test_util( self, expected_encoding: Dict, model_name: str, revision: str = None, sequences: List[str] = None, decode_kwargs: Dict[str, Any] = None, padding: bool = True, ): """ Util for integration test. Text is tokenized and then reverted back to text. Both results are then checked. Args: expected_encoding: The expected result of the tokenizer output. model_name: The model name of the tokenizer to load and use. revision: The full git revision number of the model. This is to pin the tokenizer config and to avoid that tests start to fail if the config gets changed upstream. sequences: Can overwrite the texts that are used to check the tokenizer. This is useful if the tokenizer supports non english languages like france. decode_kwargs: Additional args for the ``decode`` function which reverts the tokenized text back to a string. padding: Activates and controls padding of the tokenizer. """ decode_kwargs = {} if decode_kwargs is None else decode_kwargs if sequences is None: sequences = [ "Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides " "general-purpose architectures (BERT, GPT-2, RoBERTa, XLM, DistilBert, XLNet...) for Natural " "Language Understanding (NLU) and Natural Language Generation (NLG) with over 32+ pretrained " "models in 100+ languages and deep interoperability between Jax, PyTorch and TensorFlow.", "BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly " "conditioning on both left and right context in all layers.", "The quick brown fox jumps over the lazy dog.", ] if self.test_sentencepiece_ignore_case: sequences = [sequence.lower() for sequence in sequences] tokenizer_classes = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class) for tokenizer_class in tokenizer_classes: tokenizer = tokenizer_class.from_pretrained( model_name, revision=revision, # to pin the tokenizer version ) encoding = tokenizer(sequences, padding=padding) decoded_sequences = [ tokenizer.decode(seq, skip_special_tokens=True, **decode_kwargs) for seq in encoding["input_ids"] ] encoding_data = encoding.data self.assertDictEqual(encoding_data, expected_encoding) for expected, decoded in zip(sequences, decoded_sequences): if self.test_sentencepiece_ignore_case: expected = expected.lower() self.assertEqual(expected, decoded) def assert_padded_input_match(self, input_r: list, input_p: list, max_length: int, pad_token_id: int): # Ensure we match max_length self.assertEqual(len(input_r), max_length) self.assertEqual(len(input_p), max_length) # Ensure the number of padded tokens is the same padded_tokens_r = list(takewhile(lambda i: i == pad_token_id, reversed(input_r))) padded_tokens_p = list(takewhile(lambda i: i == pad_token_id, reversed(input_p))) self.assertSequenceEqual(padded_tokens_r, padded_tokens_p) def assert_batch_padded_input_match( self, input_r: dict, input_p: dict, max_length: int, pad_token_id: int, model_main_input_name: str = "input_ids", ): for i_r in input_r.values(): ( self.assertEqual(len(i_r), 2), self.assertEqual(len(i_r[0]), max_length), self.assertEqual(len(i_r[1]), max_length), ) ( self.assertEqual(len(i_r), 2), self.assertEqual(len(i_r[0]), max_length), self.assertEqual(len(i_r[1]), max_length), ) for i_r, i_p in zip(input_r[model_main_input_name], input_p[model_main_input_name]): self.assert_padded_input_match(i_r, i_p, max_length, pad_token_id) for i_r, i_p in zip(input_r["attention_mask"], input_p["attention_mask"]): self.assertSequenceEqual(i_r, i_p) @staticmethod def convert_batch_encode_plus_format_to_encode_plus(batch_encode_plus_sequences): # Switch from batch_encode_plus format: {'input_ids': [[...], [...]], ...} # to the list of examples/ encode_plus format: [{'input_ids': [...], ...}, {'input_ids': [...], ...}] return [ {value: batch_encode_plus_sequences[value][i] for value in batch_encode_plus_sequences.keys()} for i in range(len(batch_encode_plus_sequences["input_ids"])) ] # TODO: this test can be combined with `test_sentencepiece_tokenize_and_convert_tokens_to_string` after the latter is extended to all tokenizers. def test_tokenize_special_tokens(self): """Test `tokenize` with special tokens.""" tokenizers = self.get_tokenizers(fast=True, do_lower_case=True) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): SPECIAL_TOKEN_1 = "[SPECIAL_TOKEN_1]" SPECIAL_TOKEN_2 = "[SPECIAL_TOKEN_2]" # Both methods should add the token to `_additional_special_tokens` and `added_tokens_decoder` tokenizer.add_tokens([SPECIAL_TOKEN_1], special_tokens=True) tokenizer.add_special_tokens( {"additional_special_tokens": [SPECIAL_TOKEN_2]}, replace_additional_special_tokens=False ) token_1 = tokenizer.tokenize(SPECIAL_TOKEN_1) token_2 = tokenizer.tokenize(SPECIAL_TOKEN_2) self.assertEqual(len(token_1), 1) self.assertEqual(len(token_2), 1) self.assertEqual(token_1[0], SPECIAL_TOKEN_1) # next is failing for almost all the Fast tokenizers now. # self.assertEqual(token_2[0], SPECIAL_TOKEN_2) # TODO: this test could be extended to all tokenizers - not just the sentencepiece def test_sentencepiece_tokenize_and_convert_tokens_to_string(self): """Test ``_tokenize`` and ``convert_tokens_to_string``.""" if not self.test_sentencepiece: return tokenizer = self.get_tokenizer() text = "This is text to test the tokenizer." if self.test_sentencepiece_ignore_case: text = text.lower() tokens = tokenizer.tokenize(text) self.assertTrue(len(tokens) > 0) # check if converting back to original text works reverse_text = tokenizer.convert_tokens_to_string(tokens) if self.test_sentencepiece_ignore_case: reverse_text = reverse_text.lower() self.assertEqual(reverse_text, text) special_tokens = tokenizer.all_special_tokens special_tokens_string = tokenizer.convert_tokens_to_string(special_tokens) for special_token in special_tokens: self.assertIn(special_token, special_tokens_string) if self.test_rust_tokenizer: rust_tokenizer = self.get_rust_tokenizer() special_tokens_string_rust = rust_tokenizer.convert_tokens_to_string(special_tokens) self.assertEqual(special_tokens_string, special_tokens_string_rust) def test_sentencepiece_tokenize_and_decode(self): if not self.test_sentencepiece: return text = "This is text to test the tokenizer." if self.test_rust_tokenizer: tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() slow_ids = tokenizer(text).input_ids fast_ids = rust_tokenizer(text).input_ids self.assertEqual(slow_ids, fast_ids) slow_decoded = tokenizer.decode(slow_ids) fast_decoded = rust_tokenizer.decode(slow_ids) self.assertEqual(slow_decoded, fast_decoded) def test_subword_regularization_tokenizer(self) -> None: if not self.test_sentencepiece: return # Subword regularization is only available for the slow tokenizer. sp_model_kwargs = {"enable_sampling": True, "alpha": 0.1, "nbest_size": -1} tokenizer = self.get_tokenizer(sp_model_kwargs=sp_model_kwargs) run_test_in_subprocess( test_case=self, target_func=_test_subword_regularization_tokenizer, inputs={ "tokenizer": tokenizer, "sp_model_kwargs": sp_model_kwargs, "test_sentencepiece_ignore_case": self.test_sentencepiece_ignore_case, }, ) def test_pickle_subword_regularization_tokenizer(self) -> None: if not self.test_sentencepiece: return """Google pickle __getstate__ __setstate__ if you are struggling with this.""" # Subword regularization is only available for the slow tokenizer. sp_model_kwargs = {"enable_sampling": True, "alpha": 0.1, "nbest_size": -1} tokenizer = self.get_tokenizer(sp_model_kwargs=sp_model_kwargs) tokenizer_bin = pickle.dumps(tokenizer) del tokenizer tokenizer_new = pickle.loads(tokenizer_bin) run_test_in_subprocess( test_case=self, target_func=_test_subword_regularization_tokenizer, inputs={ "tokenizer": tokenizer_new, "sp_model_kwargs": sp_model_kwargs, "test_sentencepiece_ignore_case": self.test_sentencepiece_ignore_case, }, ) def test_save_sentencepiece_tokenizer(self) -> None: if not self.test_sentencepiece or not self.test_slow_tokenizer: return # We want to verify that we will be able to save the tokenizer even if the original files that were used to # build the tokenizer have been deleted in the meantime. text = "This is text to test the tokenizer." tokenizer_slow_1 = self.get_tokenizer() encoding_tokenizer_slow_1 = tokenizer_slow_1(text) tmpdirname_1 = tempfile.mkdtemp() tmpdirname_2 = tempfile.mkdtemp() tokenizer_slow_1.save_pretrained(tmpdirname_1) tokenizer_slow_2 = self.tokenizer_class.from_pretrained(tmpdirname_1) encoding_tokenizer_slow_2 = tokenizer_slow_2(text) shutil.rmtree(tmpdirname_1) tokenizer_slow_2.save_pretrained(tmpdirname_2) tokenizer_slow_3 = self.tokenizer_class.from_pretrained(tmpdirname_2) encoding_tokenizer_slow_3 = tokenizer_slow_3(text) shutil.rmtree(tmpdirname_2) self.assertEqual(encoding_tokenizer_slow_1, encoding_tokenizer_slow_2) self.assertEqual(encoding_tokenizer_slow_1, encoding_tokenizer_slow_3) def test_model_input_names_signature(self): accepted_model_main_input_names = [ "input_ids", # nlp models "input_values", # speech models ] tokenizers = self.get_tokenizers() for tokenizer in tokenizers: # first name of model_input_names has to correspond to main model input name # to make sure `tokenizer.pad(...)` works correctly self.assertTrue(tokenizer.model_input_names[0] in accepted_model_main_input_names) def test_rust_tokenizer_signature(self): if not self.test_rust_tokenizer: return signature = inspect.signature(self.rust_tokenizer_class.__init__) self.assertIn("tokenizer_file", signature.parameters) self.assertIsNone(signature.parameters["tokenizer_file"].default) def test_tokenizer_slow_store_full_signature(self): if not self.test_slow_tokenizer: return signature = inspect.signature(self.tokenizer_class.__init__) tokenizer = self.get_tokenizer() for parameter_name, parameter in signature.parameters.items(): if parameter.default != inspect.Parameter.empty: self.assertIn(parameter_name, tokenizer.init_kwargs) def test_tokenizer_fast_store_full_signature(self): if not self.test_rust_tokenizer: return signature = inspect.signature(self.rust_tokenizer_class.__init__) tokenizer = self.get_rust_tokenizer() for parameter_name, parameter in signature.parameters.items(): if parameter.default != inspect.Parameter.empty and parameter_name not in [ "vocab_file", "merges_file", "tokenizer_file", ]: self.assertIn(parameter_name, tokenizer.init_kwargs) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: return if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence, _ = self.get_input_output_texts(tokenizer) # We don't have an exact equivalence on `tokenize()` between Rust and Slow # Slow tokenizer only split tokens, Rust tokenizers will replace with <unk> # tokens = tokenizer.tokenize(sequence) # rust_tokens = rust_tokenizer.tokenize(sequence) # self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) ids = tokenizer.encode(sequence, add_special_tokens=True) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=True) self.assertListEqual(ids, rust_ids) def test_tokenizers_common_properties(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): attributes_list = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", ] for attr in attributes_list: self.assertTrue(hasattr(tokenizer, attr)) self.assertTrue(hasattr(tokenizer, attr + "_id")) self.assertTrue(hasattr(tokenizer, "additional_special_tokens")) self.assertTrue(hasattr(tokenizer, "additional_special_tokens_ids")) attributes_list = [ "model_max_length", "init_inputs", "init_kwargs", ] if not isinstance(tokenizer, PreTrainedTokenizerFast): attributes_list += [ "added_tokens_encoder", "added_tokens_decoder", ] for attr in attributes_list: self.assertTrue(hasattr(tokenizer, attr)) def test_tokenizers_common_ids_setters(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): attributes_list = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", ] vocab = tokenizer.get_vocab() token_id_to_test_setters = next(iter(vocab.values())) token_to_test_setters = tokenizer.convert_ids_to_tokens( token_id_to_test_setters, skip_special_tokens=False ) for attr in attributes_list: setattr(tokenizer, attr + "_id", None) self.assertEqual(getattr(tokenizer, attr), None) self.assertEqual(getattr(tokenizer, attr + "_id"), None) setattr(tokenizer, attr + "_id", token_id_to_test_setters) self.assertEqual(getattr(tokenizer, attr), token_to_test_setters) self.assertEqual(getattr(tokenizer, attr + "_id"), token_id_to_test_setters) setattr(tokenizer, "additional_special_tokens_ids", []) self.assertListEqual(getattr(tokenizer, "additional_special_tokens"), []) self.assertListEqual(getattr(tokenizer, "additional_special_tokens_ids"), []) setattr(tokenizer, "additional_special_tokens_ids", [token_id_to_test_setters]) self.assertListEqual(getattr(tokenizer, "additional_special_tokens"), [token_to_test_setters]) self.assertListEqual(getattr(tokenizer, "additional_special_tokens_ids"), [token_id_to_test_setters]) @parameterized.expand([(True,), (False,)]) def test_tokenizers_special_tokens_properties_unset(self, verbose): tokenizers = self.get_tokenizers(verbose=verbose) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): attributes_list = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", "additional_special_tokens", ] for attr in attributes_list: setattr(tokenizer, attr, None) self.assertIsNone(getattr(tokenizer, attr)) def test_save_and_load_tokenizer(self): # safety check on max_len default value so we are sure the test works tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): self.assertNotEqual(tokenizer.model_max_length, 42) # Now let's start the test tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Isolate this from the other tests because we save additional tokens/etc tmpdirname = tempfile.mkdtemp() sample_text = " He is very happy, UNwant\u00E9d,running" before_tokens = tokenizer.encode(sample_text, add_special_tokens=False) before_vocab = tokenizer.get_vocab() tokenizer.save_pretrained(tmpdirname) after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False) after_vocab = after_tokenizer.get_vocab() self.assertListEqual(before_tokens, after_tokens) self.assertDictEqual(before_vocab, after_vocab) shutil.rmtree(tmpdirname) tokenizers = self.get_tokenizers(model_max_length=42) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Isolate this from the other tests because we save additional tokens/etc tmpdirname = tempfile.mkdtemp() sample_text = " He is very happy, UNwant\u00E9d,running" tokenizer.add_tokens(["bim", "bambam"]) additional_special_tokens = tokenizer.additional_special_tokens additional_special_tokens.append("new_additional_special_token") tokenizer.add_special_tokens( {"additional_special_tokens": additional_special_tokens}, replace_additional_special_tokens=False ) before_tokens = tokenizer.encode(sample_text, add_special_tokens=False) before_vocab = tokenizer.get_vocab() tokenizer.save_pretrained(tmpdirname) after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False) after_vocab = after_tokenizer.get_vocab() self.assertListEqual(before_tokens, after_tokens) self.assertDictEqual(before_vocab, after_vocab) self.assertIn("bim", after_vocab) self.assertIn("bambam", after_vocab) self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens) self.assertEqual(after_tokenizer.model_max_length, 42) tokenizer = tokenizer.__class__.from_pretrained(tmpdirname, model_max_length=43) self.assertEqual(tokenizer.model_max_length, 43) shutil.rmtree(tmpdirname) # Test that we can also use the non-legacy saving format for fast tokenizers tokenizers = self.get_tokenizers(model_max_length=42) for tokenizer in tokenizers: if not tokenizer.is_fast: continue with self.subTest(f"{tokenizer.__class__.__name__}"): # Isolate this from the other tests because we save additional tokens/etc tmpdirname = tempfile.mkdtemp() sample_text = " He is very happy, UNwant\u00E9d,running" tokenizer.add_tokens(["bim", "bambam"]) additional_special_tokens = tokenizer.additional_special_tokens additional_special_tokens.append("new_additional_special_token") tokenizer.add_special_tokens( {"additional_special_tokens": additional_special_tokens}, replace_additional_special_tokens=False ) before_tokens = tokenizer.encode(sample_text, add_special_tokens=False) before_vocab = tokenizer.get_vocab() tokenizer.save_pretrained(tmpdirname) after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False) after_vocab = after_tokenizer.get_vocab() self.assertListEqual(before_tokens, after_tokens) self.assertDictEqual(before_vocab, after_vocab) self.assertIn("bim", after_vocab) self.assertIn("bambam", after_vocab) self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens) self.assertEqual(after_tokenizer.model_max_length, 42) tokenizer = tokenizer.__class__.from_pretrained(tmpdirname, model_max_length=43) self.assertEqual(tokenizer.model_max_length, 43) shutil.rmtree(tmpdirname) def test_pickle_tokenizer(self): """Google pickle __getstate__ __setstate__ if you are struggling with this.""" tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): self.assertIsNotNone(tokenizer) text = "Munich and Berlin are nice cities" subwords = tokenizer.tokenize(text) filename = os.path.join(self.tmpdirname, "tokenizer.bin") with open(filename, "wb") as handle: pickle.dump(tokenizer, handle) with open(filename, "rb") as handle: tokenizer_new = pickle.load(handle) subwords_loaded = tokenizer_new.tokenize(text) self.assertListEqual(subwords, subwords_loaded) @require_tokenizers def test_pickle_added_tokens(self): tok1 = AddedToken("<s>", rstrip=True, lstrip=True, normalized=False, single_word=True) tok2 = pickle.loads(pickle.dumps(tok1)) self.assertEqual(tok1.__getstate__(), tok2.__getstate__()) def test_added_tokens_do_lower_case(self): tokenizers = self.get_tokenizers(do_lower_case=True) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): if not hasattr(tokenizer, "do_lower_case") or not tokenizer.do_lower_case: continue special_token = tokenizer.all_special_tokens[0] text = special_token + " aaaaa bbbbbb low cccccccccdddddddd l " + special_token text2 = special_token + " AAAAA BBBBBB low CCCCCCCCCDDDDDDDD l " + special_token toks_before_adding = tokenizer.tokenize(text) # toks before adding new_toks new_toks = ["aaaaa bbbbbb", "cccccccccdddddddd", "AAAAA BBBBBB", "CCCCCCCCCDDDDDDDD"] added = tokenizer.add_tokens([AddedToken(tok, lstrip=True, rstrip=True) for tok in new_toks]) toks_after_adding = tokenizer.tokenize(text) toks_after_adding2 = tokenizer.tokenize(text2) # Rust tokenizers dont't lowercase added tokens at the time calling `tokenizer.add_tokens`, # while python tokenizers do, so new_toks 0 and 2 would be treated as the same, so do new_toks 1 and 3. self.assertIn(added, [2, 4]) self.assertListEqual(toks_after_adding, toks_after_adding2) self.assertTrue( len(toks_before_adding) > len(toks_after_adding), # toks_before_adding should be longer ) # Check that none of the special tokens are lowercased sequence_with_special_tokens = "A " + " yEs ".join(tokenizer.all_special_tokens) + " B" # Convert the tokenized list to str as some special tokens are tokenized like normal tokens # which have a prefix spacee e.g. the mask token of Albert, and cannot match the original # special tokens exactly. tokenized_sequence = "".join(tokenizer.tokenize(sequence_with_special_tokens)) for special_token in tokenizer.all_special_tokens: self.assertTrue(special_token in tokenized_sequence or special_token.lower() in tokenized_sequence) tokenizers = self.get_tokenizers(do_lower_case=True) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): if hasattr(tokenizer, "do_lower_case") and tokenizer.do_lower_case: continue special_token = tokenizer.all_special_tokens[0] text = special_token + " aaaaa bbbbbb low cccccccccdddddddd l " + special_token text2 = special_token + " AAAAA BBBBBB low CCCCCCCCCDDDDDDDD l " + special_token toks_before_adding = tokenizer.tokenize(text) # toks before adding new_toks new_toks = ["aaaaa bbbbbb", "cccccccccdddddddd", "AAAAA BBBBBB", "CCCCCCCCCDDDDDDDD"] added = tokenizer.add_tokens([AddedToken(tok, lstrip=True, rstrip=True) for tok in new_toks]) self.assertIn(added, [2, 4]) toks_after_adding = tokenizer.tokenize(text) toks_after_adding2 = tokenizer.tokenize(text2) self.assertEqual(len(toks_after_adding), len(toks_after_adding2)) # Length should still be the same self.assertNotEqual( toks_after_adding[1], toks_after_adding2[1] ) # But at least the first non-special tokens should differ self.assertTrue( len(toks_before_adding) > len(toks_after_adding), # toks_before_adding should be longer ) # TODO @ArthurZ Nuke this def test_add_tokens_tokenizer(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): vocab_size = tokenizer.vocab_size all_size = len(tokenizer) self.assertNotEqual(vocab_size, 0) # We usually have added tokens from the start in tests (but also otherwise) because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) new_toks = [ AddedToken("aaaaa bbbbbb", rstrip=True, lstrip=True), AddedToken("cccccccccdddddddd", rstrip=True, lstrip=True), ] added_toks = tokenizer.add_tokens(new_toks) vocab_size_2 = tokenizer.vocab_size all_size_2 = len(tokenizer) self.assertNotEqual(vocab_size_2, 0) self.assertEqual(vocab_size, vocab_size_2) self.assertEqual(added_toks, len(new_toks)) self.assertEqual(all_size_2, all_size + len(new_toks)) tokens = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l", add_special_tokens=False) self.assertGreaterEqual(len(tokens), 4) self.assertGreater(tokens[0], tokenizer.vocab_size - 1) self.assertGreater(tokens[-2], tokenizer.vocab_size - 1) new_toks_2 = { "eos_token": AddedToken(">>>>|||<||<<|<<", rstrip=True, lstrip=True), "pad_token": AddedToken("<<<<<|||>|>>>>|>", rstrip=True, lstrip=True), } added_toks_2 = tokenizer.add_special_tokens(new_toks_2) vocab_size_3 = tokenizer.vocab_size all_size_3 = len(tokenizer) self.assertNotEqual(vocab_size_3, 0) self.assertEqual(vocab_size, vocab_size_3) self.assertEqual(added_toks_2, len(new_toks_2)) self.assertEqual(all_size_3, all_size_2 + len(new_toks_2)) tokens = tokenizer.encode( ">>>>|||<||<<|<< aaaaa bbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l", add_special_tokens=False ) self.assertGreaterEqual(len(tokens), 6) self.assertGreater(tokens[0], tokenizer.vocab_size - 1) self.assertGreater(tokens[0], tokens[1]) self.assertGreater(tokens[-2], tokenizer.vocab_size - 1) self.assertGreater(tokens[-2], tokens[-3]) self.assertEqual(tokens[0], tokenizer.eos_token_id) self.assertEqual(tokens[-2], tokenizer.pad_token_id) def test_add_special_tokens(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): input_text, ids = self.get_clean_sequence(tokenizer) special_token = AddedToken("[SPECIAL_TOKEN]", lstrip=True, rstrip=True) tokenizer.add_special_tokens({"cls_token": special_token}) special_token = str(special_token) encoded_special_token = tokenizer.encode(special_token, add_special_tokens=False) self.assertEqual(len(encoded_special_token), 1) text = tokenizer.decode(ids + encoded_special_token, clean_up_tokenization_spaces=False) encoded = tokenizer.encode(text, add_special_tokens=False) input_encoded = tokenizer.encode(input_text, add_special_tokens=False) special_token_id = tokenizer.encode(special_token, add_special_tokens=False) self.assertEqual(encoded, input_encoded + special_token_id) decoded = tokenizer.decode(encoded, skip_special_tokens=True) self.assertTrue(special_token not in decoded) def test_internal_consistency(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): input_text, output_text = self.get_input_output_texts(tokenizer) tokens = tokenizer.tokenize(input_text) ids = tokenizer.convert_tokens_to_ids(tokens) ids_2 = tokenizer.encode(input_text, add_special_tokens=False) self.assertListEqual(ids, ids_2) tokens_2 = tokenizer.convert_ids_to_tokens(ids) self.assertNotEqual(len(tokens_2), 0) text_2 = tokenizer.decode(ids) self.assertIsInstance(text_2, str) self.assertEqual(text_2, output_text) @require_tokenizers def test_encode_decode_with_spaces(self): tokenizers = self.get_tokenizers(do_lower_case=False, fast=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): new_toks = [ # These are added tokens, they will be normalized.... AddedToken("[ABC]", normalized=True, lstrip=True, rstrip=True), AddedToken("[DEF]", normalized=True, lstrip=True, rstrip=True), AddedToken("GHI IHG", normalized=True, lstrip=True, rstrip=True), ] tokenizer.add_tokens(new_toks) tokenizer.add_tokens([AddedToken("[SAMPLE]", normalized=True)], special_tokens=True) input = "[ABC][DEF][ABC]GHI IHG[DEF]" if self.space_between_special_tokens: output = "[ABC] [DEF] [ABC] GHI IHG [DEF]" else: output = input encoded = tokenizer.encode(input, add_special_tokens=False) decoded = tokenizer.decode(encoded, spaces_between_special_tokens=self.space_between_special_tokens) self.assertIn(decoded, [output, output.lower()]) return # TODO @ArthurZ Refactor testing as now the do_normalize works for special and non special encoded = tokenizer.encode("[ABC] [DEF][SAMPLE]", add_special_tokens=False) decoded = tokenizer.decode(encoded, spaces_between_special_tokens=True, skip_special_tokens=False) self.assertIn(decoded, ["[ABC] [DEF] [SAMPLE]", "[ABC] [DEF] [SAMPLE]".lower()]) decoded = tokenizer.decode(encoded, spaces_between_special_tokens=True, skip_special_tokens=True) self.assertIn(decoded, ["[ABC] [DEF]", "[ABC] [DEF]".lower()]) encoded = tokenizer.encode("[ABC][SAMPLE][DEF]", add_special_tokens=False) decoded = tokenizer.decode(encoded, spaces_between_special_tokens=True) self.assertIn(decoded, ["[ABC] [SAMPLE] [DEF]", "[ABC][SAMPLE][DEF]".lower()]) decoded = tokenizer.decode(encoded, spaces_between_special_tokens=False) self.assertIn(decoded, ["[ABC][SAMPLE][DEF]", "[ABC][SAMPLE][DEF]".lower()]) def test_pretrained_model_lists(self): # We should have at least one default checkpoint for each tokenizer # We should specify the max input length as well (used in some part to list the pretrained checkpoints) self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map), 1) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]), 1) self.assertEqual( len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]), len(self.tokenizer_class.max_model_input_sizes), ) weights_list = list(self.tokenizer_class.max_model_input_sizes.keys()) weights_lists_2 = [] for file_id, map_list in self.tokenizer_class.pretrained_vocab_files_map.items(): weights_lists_2.append(list(map_list.keys())) for weights_list_2 in weights_lists_2: self.assertListEqual(weights_list, weights_list_2) def test_mask_output(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): if ( tokenizer.build_inputs_with_special_tokens.__qualname__.split(".")[0] != "PreTrainedTokenizer" and "token_type_ids" in tokenizer.model_input_names ): seq_0 = "Test this method." seq_1 = "With these inputs." information = tokenizer.encode_plus(seq_0, seq_1, add_special_tokens=True) sequences, mask = information["input_ids"], information["token_type_ids"] self.assertEqual(len(sequences), len(mask)) def test_token_type_ids(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): seq_0 = "Test this method." # We want to have sequence 0 and sequence 1 are tagged # respectively with 0 and 1 token_ids # (regardless of whether the model use token type ids) # We use this assumption in the QA pipeline among other place output = tokenizer(seq_0, return_token_type_ids=True) self.assertIn(0, output["token_type_ids"]) def test_sequence_ids(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: if not tokenizer.is_fast: continue with self.subTest(f"{tokenizer.__class__.__name__}"): seq_0 = "Test this method." seq_1 = "With these inputs." # We want to have sequence 0 and sequence 1 are tagged # respectively with 0 and 1 token_ids # (regardless of whether the model use token type ids) # We use this assumption in the QA pipeline among other place output = tokenizer(seq_0) self.assertIn(0, output.sequence_ids()) output = tokenizer(seq_0, seq_1) self.assertIn(0, output.sequence_ids()) self.assertIn(1, output.sequence_ids()) if tokenizer.num_special_tokens_to_add(pair=True): self.assertIn(None, output.sequence_ids()) @require_jinja def test_chat_template(self): dummy_template = "{% for message in messages %}{{message['role'] + message['content']}}{% endfor %}" dummy_conversation = [ {"role": "system", "content": "system message"}, {"role": "user", "content": "user message"}, {"role": "assistant", "content": "assistant message"}, ] expected_output = "systemsystem messageuseruser messageassistantassistant message" tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): output = tokenizer.apply_chat_template( dummy_conversation, chat_template=dummy_template, tokenize=False ) self.assertEqual(output, expected_output) # Test we can pass chat_template arg # Check that no error raised when tokenize=True tokenizer.apply_chat_template(dummy_conversation, chat_template=dummy_template, tokenize=True) tokenizer.chat_template = dummy_template self.assertEqual(tokenizer.chat_template, dummy_template) # Test property setter output = tokenizer.apply_chat_template(dummy_conversation, tokenize=False) self.assertEqual(output, expected_output) # Test chat_template attribute is used if no arg is passed tokenizer.apply_chat_template(dummy_conversation, tokenize=True) # Check that no error raised with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(tmp_dir_name) tokenizer = tokenizer.from_pretrained(tmp_dir_name) self.assertEqual(tokenizer.chat_template, dummy_template) # Test template has persisted output = tokenizer.apply_chat_template(dummy_conversation, tokenize=False) self.assertEqual(output, expected_output) # Test output is the same after reloading tokenizer.apply_chat_template(dummy_conversation, tokenize=True) # Check that no error raised def test_number_of_added_tokens(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): seq_0 = "Test this method." seq_1 = "With these inputs." sequences = tokenizer.encode(seq_0, seq_1, add_special_tokens=False) attached_sequences = tokenizer.encode(seq_0, seq_1, add_special_tokens=True) # Method is implemented (e.g. not GPT-2) if len(attached_sequences) != 2: self.assertEqual( tokenizer.num_special_tokens_to_add(pair=True), len(attached_sequences) - len(sequences) ) def test_maximum_encoding_length_single_input(self): tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): seq_0, ids = self.get_clean_sequence(tokenizer, max_length=20) sequence = tokenizer.encode(seq_0, add_special_tokens=False) total_length = len(sequence) self.assertGreater( total_length, 4, "Issue with the testing sequence, please update it, it's too short" ) # Test with max model input length model_max_length = tokenizer.model_max_length self.assertEqual(model_max_length, 100) seq_1 = seq_0 * model_max_length sequence1 = tokenizer(seq_1, add_special_tokens=False) total_length1 = len(sequence1["input_ids"]) self.assertGreater( total_length1, model_max_length, "Issue with the testing sequence, please update it, it's too short", ) # Simple padding_strategies = ( [False, True, "longest"] if tokenizer.pad_token and tokenizer.pad_token_id >= 0 else [False] ) for padding_state in padding_strategies: with self.subTest(f"Padding: {padding_state}"): for truncation_state in [True, "longest_first", "only_first"]: with self.subTest(f"Truncation: {truncation_state}"): output = tokenizer(seq_1, padding=padding_state, truncation=truncation_state) self.assertEqual(len(output["input_ids"]), model_max_length) output = tokenizer([seq_1], padding=padding_state, truncation=truncation_state) self.assertEqual(len(output["input_ids"][0]), model_max_length) # Simple with no truncation # Reset warnings tokenizer.deprecation_warnings = {} with self.assertLogs("transformers", level="WARNING") as cm: output = tokenizer(seq_1, padding=padding_state, truncation=False) self.assertNotEqual(len(output["input_ids"]), model_max_length) self.assertEqual(len(cm.records), 1) self.assertTrue( cm.records[0].message.startswith( "Token indices sequence length is longer than the specified maximum sequence length" " for this model" ) ) tokenizer.deprecation_warnings = {} with self.assertLogs("transformers", level="WARNING") as cm: output = tokenizer([seq_1], padding=padding_state, truncation=False) self.assertNotEqual(len(output["input_ids"][0]), model_max_length) self.assertEqual(len(cm.records), 1) self.assertTrue( cm.records[0].message.startswith( "Token indices sequence length is longer than the specified maximum sequence length" " for this model" ) ) # Overflowing tokens stride = 2 information = tokenizer( seq_0, max_length=total_length - 2, add_special_tokens=False, stride=stride, truncation="longest_first", return_overflowing_tokens=True, # add_prefix_space=False, ) # Overflowing tokens are handled quite differently in slow and fast tokenizers if isinstance(tokenizer, PreTrainedTokenizerFast): truncated_sequence = information["input_ids"][0] overflowing_tokens = information["input_ids"][1] self.assertEqual(len(information["input_ids"]), 2) self.assertEqual(len(truncated_sequence), total_length - 2) self.assertEqual(truncated_sequence, sequence[:-2]) self.assertEqual(len(overflowing_tokens), 2 + stride) self.assertEqual(overflowing_tokens, sequence[-(2 + stride) :]) else: truncated_sequence = information["input_ids"] overflowing_tokens = information["overflowing_tokens"] self.assertEqual(len(truncated_sequence), total_length - 2) self.assertEqual(truncated_sequence, sequence[:-2]) self.assertEqual(len(overflowing_tokens), 2 + stride) self.assertEqual(overflowing_tokens, sequence[-(2 + stride) :]) def test_maximum_encoding_length_pair_input(self): tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Build a sequence from our model's vocabulary stride = 2 seq_0, ids = self.get_clean_sequence(tokenizer, max_length=20) if len(ids) <= 2 + stride: seq_0 = (seq_0 + " ") * (2 + stride) ids = None seq0_tokens = tokenizer.encode(seq_0, add_special_tokens=False) self.assertGreater(len(seq0_tokens), 2 + stride) seq_1 = "This is another sentence to be encoded." seq1_tokens = tokenizer.encode(seq_1, add_special_tokens=False) if abs(len(seq0_tokens) - len(seq1_tokens)) <= 2: seq1_tokens = seq1_tokens + seq1_tokens seq_1 = tokenizer.decode(seq1_tokens, clean_up_tokenization_spaces=False) seq1_tokens = tokenizer.encode(seq_1, add_special_tokens=False) self.assertGreater(len(seq1_tokens), 2 + stride) smallest = seq1_tokens if len(seq0_tokens) > len(seq1_tokens) else seq0_tokens # We are not using the special tokens - a bit too hard to test all the tokenizers with this # TODO try this again later sequence = tokenizer.encode(seq_0, seq_1, add_special_tokens=False) # , add_prefix_space=False) # Test with max model input length model_max_length = tokenizer.model_max_length self.assertEqual(model_max_length, 100) seq_2 = seq_0 * model_max_length self.assertGreater(len(seq_2), model_max_length) sequence1 = tokenizer(seq_1, add_special_tokens=False) total_length1 = len(sequence1["input_ids"]) sequence2 = tokenizer(seq_2, seq_1, add_special_tokens=False) total_length2 = len(sequence2["input_ids"]) self.assertLess( total_length1, model_max_length - 10, "Issue with the testing sequence, please update it." ) self.assertGreater( total_length2, model_max_length, "Issue with the testing sequence, please update it." ) # Simple padding_strategies = ( [False, True, "longest"] if tokenizer.pad_token and tokenizer.pad_token_id >= 0 else [False] ) for padding_state in padding_strategies: with self.subTest(f"{tokenizer.__class__.__name__} Padding: {padding_state}"): for truncation_state in [True, "longest_first", "only_first"]: with self.subTest(f"{tokenizer.__class__.__name__} Truncation: {truncation_state}"): output = tokenizer(seq_2, seq_1, padding=padding_state, truncation=truncation_state) self.assertEqual(len(output["input_ids"]), model_max_length) output = tokenizer( [seq_2], [seq_1], padding=padding_state, truncation=truncation_state ) self.assertEqual(len(output["input_ids"][0]), model_max_length) # Simple output = tokenizer(seq_1, seq_2, padding=padding_state, truncation="only_second") self.assertEqual(len(output["input_ids"]), model_max_length) output = tokenizer([seq_1], [seq_2], padding=padding_state, truncation="only_second") self.assertEqual(len(output["input_ids"][0]), model_max_length) # Simple with no truncation # Reset warnings tokenizer.deprecation_warnings = {} with self.assertLogs("transformers", level="WARNING") as cm: output = tokenizer(seq_1, seq_2, padding=padding_state, truncation=False) self.assertNotEqual(len(output["input_ids"]), model_max_length) self.assertEqual(len(cm.records), 1) self.assertTrue( cm.records[0].message.startswith( "Token indices sequence length is longer than the specified maximum sequence length" " for this model" ) ) tokenizer.deprecation_warnings = {} with self.assertLogs("transformers", level="WARNING") as cm: output = tokenizer([seq_1], [seq_2], padding=padding_state, truncation=False) self.assertNotEqual(len(output["input_ids"][0]), model_max_length) self.assertEqual(len(cm.records), 1) self.assertTrue( cm.records[0].message.startswith( "Token indices sequence length is longer than the specified maximum sequence length" " for this model" ) ) truncated_first_sequence = tokenizer.encode(seq_0, add_special_tokens=False)[:-2] + tokenizer.encode( seq_1, add_special_tokens=False ) truncated_second_sequence = ( tokenizer.encode(seq_0, add_special_tokens=False) + tokenizer.encode(seq_1, add_special_tokens=False)[:-2] ) truncated_longest_sequence = ( truncated_first_sequence if len(seq0_tokens) > len(seq1_tokens) else truncated_second_sequence ) overflow_first_sequence = tokenizer.encode(seq_0, add_special_tokens=False)[ -(2 + stride) : ] + tokenizer.encode(seq_1, add_special_tokens=False) overflow_second_sequence = ( tokenizer.encode(seq_0, add_special_tokens=False) + tokenizer.encode(seq_1, add_special_tokens=False)[-(2 + stride) :] ) overflow_longest_sequence = ( overflow_first_sequence if len(seq0_tokens) > len(seq1_tokens) else overflow_second_sequence ) # Overflowing tokens are handled quite differently in slow and fast tokenizers if isinstance(tokenizer, PreTrainedTokenizerFast): information = tokenizer( seq_0, seq_1, max_length=len(sequence) - 2, add_special_tokens=False, stride=stride, truncation="longest_first", return_overflowing_tokens=True, # add_prefix_space=False, ) truncated_sequence = information["input_ids"][0] overflowing_tokens = information["input_ids"][1] self.assertEqual(len(information["input_ids"]), 2) self.assertEqual(len(truncated_sequence), len(sequence) - 2) self.assertEqual(truncated_sequence, truncated_longest_sequence) self.assertEqual(len(overflowing_tokens), 2 + stride + len(smallest)) self.assertEqual(overflowing_tokens, overflow_longest_sequence) else: # No overflowing tokens when using 'longest' in python tokenizers with self.assertRaises(ValueError) as context: information = tokenizer( seq_0, seq_1, max_length=len(sequence) - 2, add_special_tokens=False, stride=stride, truncation="longest_first", return_overflowing_tokens=True, # add_prefix_space=False, ) self.assertTrue( context.exception.args[0].startswith( "Not possible to return overflowing tokens for pair of sequences with the " "`longest_first`. Please select another truncation strategy than `longest_first`, " "for instance `only_second` or `only_first`." ) ) # Overflowing tokens are handled quite differently in slow and fast tokenizers if isinstance(tokenizer, PreTrainedTokenizerFast): information = tokenizer( seq_0, seq_1, max_length=len(sequence) - 2, add_special_tokens=False, stride=stride, truncation=True, return_overflowing_tokens=True, # add_prefix_space=False, ) truncated_sequence = information["input_ids"][0] overflowing_tokens = information["input_ids"][1] self.assertEqual(len(information["input_ids"]), 2) self.assertEqual(len(truncated_sequence), len(sequence) - 2) self.assertEqual(truncated_sequence, truncated_longest_sequence) self.assertEqual(len(overflowing_tokens), 2 + stride + len(smallest)) self.assertEqual(overflowing_tokens, overflow_longest_sequence) else: # No overflowing tokens when using 'longest' in python tokenizers with self.assertRaises(ValueError) as context: information = tokenizer( seq_0, seq_1, max_length=len(sequence) - 2, add_special_tokens=False, stride=stride, truncation=True, return_overflowing_tokens=True, # add_prefix_space=False, ) self.assertTrue( context.exception.args[0].startswith( "Not possible to return overflowing tokens for pair of sequences with the " "`longest_first`. Please select another truncation strategy than `longest_first`, " "for instance `only_second` or `only_first`." ) ) information_first_truncated = tokenizer( seq_0, seq_1, max_length=len(sequence) - 2, add_special_tokens=False, stride=stride, truncation="only_first", return_overflowing_tokens=True, # add_prefix_space=False, ) # Overflowing tokens are handled quite differently in slow and fast tokenizers if isinstance(tokenizer, PreTrainedTokenizerFast): truncated_sequence = information_first_truncated["input_ids"][0] overflowing_tokens = information_first_truncated["input_ids"][1] self.assertEqual(len(information_first_truncated["input_ids"]), 2) self.assertEqual(len(truncated_sequence), len(sequence) - 2) self.assertEqual(truncated_sequence, truncated_first_sequence) self.assertEqual(len(overflowing_tokens), 2 + stride + len(seq1_tokens)) self.assertEqual(overflowing_tokens, overflow_first_sequence) else: truncated_sequence = information_first_truncated["input_ids"] overflowing_tokens = information_first_truncated["overflowing_tokens"] self.assertEqual(len(truncated_sequence), len(sequence) - 2) self.assertEqual(truncated_sequence, truncated_first_sequence) self.assertEqual(len(overflowing_tokens), 2 + stride) self.assertEqual(overflowing_tokens, seq0_tokens[-(2 + stride) :]) information_second_truncated = tokenizer( seq_0, seq_1, max_length=len(sequence) - 2, add_special_tokens=False, stride=stride, truncation="only_second", return_overflowing_tokens=True, # add_prefix_space=False, ) # Overflowing tokens are handled quite differently in slow and fast tokenizers if isinstance(tokenizer, PreTrainedTokenizerFast): truncated_sequence = information_second_truncated["input_ids"][0] overflowing_tokens = information_second_truncated["input_ids"][1] self.assertEqual(len(information_second_truncated["input_ids"]), 2) self.assertEqual(len(truncated_sequence), len(sequence) - 2) self.assertEqual(truncated_sequence, truncated_second_sequence) self.assertEqual(len(overflowing_tokens), 2 + stride + len(seq0_tokens)) self.assertEqual(overflowing_tokens, overflow_second_sequence) else: truncated_sequence = information_second_truncated["input_ids"] overflowing_tokens = information_second_truncated["overflowing_tokens"] self.assertEqual(len(truncated_sequence), len(sequence) - 2) self.assertEqual(truncated_sequence, truncated_second_sequence) self.assertEqual(len(overflowing_tokens), 2 + stride) self.assertEqual(overflowing_tokens, seq1_tokens[-(2 + stride) :]) # def test_encode_input_type(self): # tokenizers = self.get_tokenizers(do_lower_case=False) # for tokenizer in tokenizers: # with self.subTest(f"{tokenizer.__class__.__name__}"): # sequence = "Let's encode this sequence" # tokens = sequence.split() # tokenizer.tokenize(sequence) # # input_ids = tokenizer.convert_tokens_to_ids(tokens) # formatted_input = tokenizer.encode(sequence, add_special_tokens=True, add_prefix_space=False) # self.assertEqual( # tokenizer.encode(tokens, is_split_into_words=True, add_special_tokens=True), formatted_input # ) # # This is not supported with the Rust tokenizers # # self.assertEqual(tokenizer.encode(input_ids, add_special_tokens=True), formatted_input) # def test_swap_special_token(self): # tokenizers = self.get_tokenizers(do_lower_case=False) # for tokenizer in tokenizers: # with self.subTest(f"{tokenizer.__class__.__name__}"): # # Our mask token # mask = "<mask>" # # We take a single word in the middle of the vocabulary # all_tokens = sorted(tokenizer.get_vocab().keys()) # word = tokenizer.decode(tokenizer.encode(all_tokens[len(all_tokens)//2], add_special_tokens=False)[:1]) # sequence_0 = "Encode " + word + " sequence" # sequence_masked_0 = "Encode " + mask + " sequence" # sequence_1 = word + " this sequence" # sequence_masked_1 = mask + " this sequence" # # Add tokens so that masked token isn't split # # tokens = [AddedToken(t, lstrip=True, normalized=False) for t in sequence.split()] # # tokenizer.add_tokens(tokens) # tokenizer.add_special_tokens( # {"mask_token": AddedToken(mask, normalized=False)} # ) # Eat left space on Byte-level BPE tokenizers # mask_ind = tokenizer.convert_tokens_to_ids(mask) # # Test first masked sequence # encoded_0 = tokenizer.encode(sequence_0, add_special_tokens=False) # encoded_masked = tokenizer.encode(sequence_masked_0, add_special_tokens=False) # self.assertEqual(len(encoded_masked), len(encoded_0)) # mask_loc = encoded_masked.index(mask_ind) # encoded_masked[mask_loc] = encoded_0[mask_loc] # self.assertEqual(encoded_masked, encoded_0) # # Test second masked sequence # encoded_1 = tokenizer.encode(sequence_1, add_special_tokens=False) # encoded_masked = tokenizer.encode(sequence_masked_1, add_special_tokens=False) # self.assertEqual(len(encoded_masked), len(encoded_1)) # mask_loc = encoded_masked.index(mask_ind) # encoded_masked[mask_loc] = encoded_1[mask_loc] # self.assertEqual(encoded_masked, encoded_1) def test_special_tokens_mask(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): sequence_0 = "Encode this." # Testing single inputs encoded_sequence = tokenizer.encode(sequence_0, add_special_tokens=False) encoded_sequence_dict = tokenizer.encode_plus( sequence_0, add_special_tokens=True, return_special_tokens_mask=True, # , add_prefix_space=False ) encoded_sequence_w_special = encoded_sequence_dict["input_ids"] special_tokens_mask = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special)) filtered_sequence = [x for i, x in enumerate(encoded_sequence_w_special) if not special_tokens_mask[i]] self.assertEqual(encoded_sequence, filtered_sequence) def test_special_tokens_mask_input_pairs(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): sequence_0 = "Encode this." sequence_1 = "This one too please." encoded_sequence = tokenizer.encode(sequence_0, add_special_tokens=False) encoded_sequence += tokenizer.encode(sequence_1, add_special_tokens=False) encoded_sequence_dict = tokenizer.encode_plus( sequence_0, sequence_1, add_special_tokens=True, return_special_tokens_mask=True, # add_prefix_space=False, ) encoded_sequence_w_special = encoded_sequence_dict["input_ids"] special_tokens_mask = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special)) filtered_sequence = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(encoded_sequence_w_special) ] filtered_sequence = [x for x in filtered_sequence if x is not None] self.assertEqual(encoded_sequence, filtered_sequence) def test_padding_side_in_kwargs(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): if self.test_rust_tokenizer: tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, padding_side="left", **kwargs ) self.assertEqual(tokenizer_r.padding_side, "left") tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, padding_side="right", **kwargs ) self.assertEqual(tokenizer_r.padding_side, "right") self.assertRaises( ValueError, self.rust_tokenizer_class.from_pretrained, pretrained_name, padding_side="unauthorized", **kwargs, ) if self.test_slow_tokenizer: tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, padding_side="left", **kwargs) self.assertEqual(tokenizer_p.padding_side, "left") tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, padding_side="right", **kwargs) self.assertEqual(tokenizer_p.padding_side, "right") self.assertRaises( ValueError, self.tokenizer_class.from_pretrained, pretrained_name, padding_side="unauthorized", **kwargs, ) def test_truncation_side_in_kwargs(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): if self.test_rust_tokenizer: tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, truncation_side="left", **kwargs ) self.assertEqual(tokenizer_r.truncation_side, "left") tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, truncation_side="right", **kwargs ) self.assertEqual(tokenizer_r.truncation_side, "right") self.assertRaises( ValueError, self.rust_tokenizer_class.from_pretrained, pretrained_name, truncation_side="unauthorized", **kwargs, ) if self.test_slow_tokenizer: tokenizer_p = self.tokenizer_class.from_pretrained( pretrained_name, truncation_side="left", **kwargs ) self.assertEqual(tokenizer_p.truncation_side, "left") tokenizer_p = self.tokenizer_class.from_pretrained( pretrained_name, truncation_side="right", **kwargs ) self.assertEqual(tokenizer_p.truncation_side, "right") self.assertRaises( ValueError, self.tokenizer_class.from_pretrained, pretrained_name, truncation_side="unauthorized", **kwargs, ) def test_right_and_left_padding(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): sequence = "Sequence" padding_size = 10 # check correct behaviour if no pad_token_id exists and add it eventually self._check_no_pad_token_padding(tokenizer, sequence) padding_idx = tokenizer.pad_token_id # RIGHT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True tokenizer.padding_side = "right" encoded_sequence = tokenizer.encode(sequence) sequence_length = len(encoded_sequence) padded_sequence = tokenizer.encode( sequence, max_length=sequence_length + padding_size, padding="max_length" ) padded_sequence_length = len(padded_sequence) self.assertEqual(sequence_length + padding_size, padded_sequence_length) self.assertEqual(encoded_sequence + [padding_idx] * padding_size, padded_sequence) # LEFT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True tokenizer.padding_side = "left" encoded_sequence = tokenizer.encode(sequence) sequence_length = len(encoded_sequence) padded_sequence = tokenizer.encode( sequence, max_length=sequence_length + padding_size, padding="max_length" ) padded_sequence_length = len(padded_sequence) self.assertEqual(sequence_length + padding_size, padded_sequence_length) self.assertEqual([padding_idx] * padding_size + encoded_sequence, padded_sequence) # RIGHT & LEFT PADDING - Check that nothing is done for 'longest' and 'no_padding' encoded_sequence = tokenizer.encode(sequence) sequence_length = len(encoded_sequence) tokenizer.padding_side = "right" padded_sequence_right = tokenizer.encode(sequence, padding=True) padded_sequence_right_length = len(padded_sequence_right) self.assertEqual(sequence_length, padded_sequence_right_length) self.assertEqual(encoded_sequence, padded_sequence_right) tokenizer.padding_side = "left" padded_sequence_left = tokenizer.encode(sequence, padding="longest") padded_sequence_left_length = len(padded_sequence_left) self.assertEqual(sequence_length, padded_sequence_left_length) self.assertEqual(encoded_sequence, padded_sequence_left) tokenizer.padding_side = "right" padded_sequence_right = tokenizer.encode(sequence) padded_sequence_right_length = len(padded_sequence_right) self.assertEqual(sequence_length, padded_sequence_right_length) self.assertEqual(encoded_sequence, padded_sequence_right) tokenizer.padding_side = "left" padded_sequence_left = tokenizer.encode(sequence, padding=False) padded_sequence_left_length = len(padded_sequence_left) self.assertEqual(sequence_length, padded_sequence_left_length) self.assertEqual(encoded_sequence, padded_sequence_left) def test_right_and_left_truncation(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): sequence = "This is a test sequence" # RIGHT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True truncation_size = 3 tokenizer.truncation_side = "right" encoded_sequence = tokenizer.encode(sequence, add_special_tokens=False) sequence_length = len(encoded_sequence) # Remove EOS/BOS tokens truncated_sequence = tokenizer.encode( sequence, max_length=sequence_length - truncation_size, truncation=True, add_special_tokens=False ) truncated_sequence_length = len(truncated_sequence) self.assertEqual(sequence_length, truncated_sequence_length + truncation_size) self.assertEqual(encoded_sequence[:-truncation_size], truncated_sequence) # LEFT PADDING - Check that it correctly pads when a maximum length is specified along with the truncation flag set to True tokenizer.truncation_side = "left" sequence_length = len(encoded_sequence) truncated_sequence = tokenizer.encode( sequence, max_length=sequence_length - truncation_size, truncation=True, add_special_tokens=False ) truncated_sequence_length = len(truncated_sequence) self.assertEqual(sequence_length, truncated_sequence_length + truncation_size) self.assertEqual(encoded_sequence[truncation_size:], truncated_sequence) # RIGHT & LEFT PADDING - Check that nothing is done for 'longest' and 'no_truncation' sequence_length = len(encoded_sequence) tokenizer.truncation_side = "right" truncated_sequence_right = tokenizer.encode(sequence, truncation=True, add_special_tokens=False) truncated_sequence_right_length = len(truncated_sequence_right) self.assertEqual(sequence_length, truncated_sequence_right_length) self.assertEqual(encoded_sequence, truncated_sequence_right) tokenizer.truncation_side = "left" truncated_sequence_left = tokenizer.encode( sequence, truncation="longest_first", add_special_tokens=False ) truncated_sequence_left_length = len(truncated_sequence_left) self.assertEqual(sequence_length, truncated_sequence_left_length) self.assertEqual(encoded_sequence, truncated_sequence_left) tokenizer.truncation_side = "right" truncated_sequence_right = tokenizer.encode(sequence, add_special_tokens=False) truncated_sequence_right_length = len(truncated_sequence_right) self.assertEqual(sequence_length, truncated_sequence_right_length) self.assertEqual(encoded_sequence, truncated_sequence_right) tokenizer.truncation_side = "left" truncated_sequence_left = tokenizer.encode(sequence, truncation=False, add_special_tokens=False) truncated_sequence_left_length = len(truncated_sequence_left) self.assertEqual(sequence_length, truncated_sequence_left_length) self.assertEqual(encoded_sequence, truncated_sequence_left) def test_padding_to_max_length(self): """We keep this test for backward compatibility but it should be remove when `pad_to_max_length` is deprecated.""" tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): sequence = "Sequence" padding_size = 10 # check correct behaviour if no pad_token_id exists and add it eventually self._check_no_pad_token_padding(tokenizer, sequence) padding_idx = tokenizer.pad_token_id # Check that it correctly pads when a maximum length is specified along with the padding flag set to True tokenizer.padding_side = "right" encoded_sequence = tokenizer.encode(sequence) sequence_length = len(encoded_sequence) # FIXME: the next line should be padding(max_length) to avoid warning padded_sequence = tokenizer.encode( sequence, max_length=sequence_length + padding_size, pad_to_max_length=True ) padded_sequence_length = len(padded_sequence) self.assertEqual(sequence_length + padding_size, padded_sequence_length) self.assertEqual(encoded_sequence + [padding_idx] * padding_size, padded_sequence) # Check that nothing is done when a maximum length is not specified encoded_sequence = tokenizer.encode(sequence) sequence_length = len(encoded_sequence) tokenizer.padding_side = "right" padded_sequence_right = tokenizer.encode(sequence, pad_to_max_length=True) padded_sequence_right_length = len(padded_sequence_right) self.assertEqual(sequence_length, padded_sequence_right_length) self.assertEqual(encoded_sequence, padded_sequence_right) def test_padding_to_multiple_of(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): if tokenizer.pad_token is None: self.skipTest("No padding token.") else: empty_tokens = tokenizer("", padding=True, pad_to_multiple_of=8) normal_tokens = tokenizer("This is a sample input", padding=True, pad_to_multiple_of=8) for key, value in empty_tokens.items(): self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") for key, value in normal_tokens.items(): self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") normal_tokens = tokenizer("This", pad_to_multiple_of=8) for key, value in normal_tokens.items(): self.assertNotEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") # Should also work with truncation normal_tokens = tokenizer("This", padding=True, truncation=True, pad_to_multiple_of=8) for key, value in normal_tokens.items(): self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") # truncation to something which is not a multiple of pad_to_multiple_of raises an error self.assertRaises( ValueError, tokenizer.__call__, "This", padding=True, truncation=True, max_length=12, pad_to_multiple_of=8, ) def test_padding_with_attention_mask(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): if tokenizer.pad_token is None: self.skipTest("No padding token.") if "attention_mask" not in tokenizer.model_input_names: self.skipTest("This model does not use attention mask.") features = [ {"input_ids": [1, 2, 3, 4, 5, 6], "attention_mask": [1, 1, 1, 1, 1, 0]}, {"input_ids": [1, 2, 3], "attention_mask": [1, 1, 0]}, ] padded_features = tokenizer.pad(features) if tokenizer.padding_side == "right": self.assertListEqual(padded_features["attention_mask"], [[1, 1, 1, 1, 1, 0], [1, 1, 0, 0, 0, 0]]) else: self.assertListEqual(padded_features["attention_mask"], [[1, 1, 1, 1, 1, 0], [0, 0, 0, 1, 1, 0]]) def test_encode_plus_with_padding(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): sequence = "Sequence" # check correct behaviour if no pad_token_id exists and add it eventually self._check_no_pad_token_padding(tokenizer, sequence) padding_size = 10 padding_idx = tokenizer.pad_token_id token_type_padding_idx = tokenizer.pad_token_type_id encoded_sequence = tokenizer.encode_plus(sequence, return_special_tokens_mask=True) input_ids = encoded_sequence["input_ids"] special_tokens_mask = encoded_sequence["special_tokens_mask"] sequence_length = len(input_ids) # Test 'longest' and 'no_padding' don't do anything tokenizer.padding_side = "right" not_padded_sequence = tokenizer.encode_plus( sequence, padding=True, return_special_tokens_mask=True, ) not_padded_input_ids = not_padded_sequence["input_ids"] not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"] not_padded_sequence_length = len(not_padded_input_ids) self.assertEqual(sequence_length, not_padded_sequence_length) self.assertEqual(input_ids, not_padded_input_ids) self.assertEqual(special_tokens_mask, not_padded_special_tokens_mask) not_padded_sequence = tokenizer.encode_plus( sequence, padding=False, return_special_tokens_mask=True, ) not_padded_input_ids = not_padded_sequence["input_ids"] not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"] not_padded_sequence_length = len(not_padded_input_ids) self.assertEqual(sequence_length, not_padded_sequence_length) self.assertEqual(input_ids, not_padded_input_ids) self.assertEqual(special_tokens_mask, not_padded_special_tokens_mask) # Test right padding tokenizer.padding_side = "right" right_padded_sequence = tokenizer.encode_plus( sequence, max_length=sequence_length + padding_size, padding="max_length", return_special_tokens_mask=True, ) right_padded_input_ids = right_padded_sequence["input_ids"] right_padded_special_tokens_mask = right_padded_sequence["special_tokens_mask"] right_padded_sequence_length = len(right_padded_input_ids) self.assertEqual(sequence_length + padding_size, right_padded_sequence_length) self.assertEqual(input_ids + [padding_idx] * padding_size, right_padded_input_ids) self.assertEqual(special_tokens_mask + [1] * padding_size, right_padded_special_tokens_mask) # Test left padding tokenizer.padding_side = "left" left_padded_sequence = tokenizer.encode_plus( sequence, max_length=sequence_length + padding_size, padding="max_length", return_special_tokens_mask=True, ) left_padded_input_ids = left_padded_sequence["input_ids"] left_padded_special_tokens_mask = left_padded_sequence["special_tokens_mask"] left_padded_sequence_length = len(left_padded_input_ids) self.assertEqual(sequence_length + padding_size, left_padded_sequence_length) self.assertEqual([padding_idx] * padding_size + input_ids, left_padded_input_ids) self.assertEqual([1] * padding_size + special_tokens_mask, left_padded_special_tokens_mask) if "token_type_ids" in tokenizer.model_input_names: token_type_ids = encoded_sequence["token_type_ids"] left_padded_token_type_ids = left_padded_sequence["token_type_ids"] right_padded_token_type_ids = right_padded_sequence["token_type_ids"] self.assertEqual( token_type_ids + [token_type_padding_idx] * padding_size, right_padded_token_type_ids ) self.assertEqual( [token_type_padding_idx] * padding_size + token_type_ids, left_padded_token_type_ids ) if "attention_mask" in tokenizer.model_input_names: attention_mask = encoded_sequence["attention_mask"] right_padded_attention_mask = right_padded_sequence["attention_mask"] left_padded_attention_mask = left_padded_sequence["attention_mask"] self.assertEqual(attention_mask + [0] * padding_size, right_padded_attention_mask) self.assertEqual([0] * padding_size + attention_mask, left_padded_attention_mask) def test_padding_warning_message_fast_tokenizer(self): if not self.test_rust_tokenizer: return sequence = "This is a text" tokenizer_fast = self.get_rust_tokenizer() # check correct behaviour if no pad_token_id exists and add it eventually self._check_no_pad_token_padding(tokenizer_fast, sequence) encoding_fast = tokenizer_fast(sequence) with self.assertLogs("transformers", level="WARNING") as cm: tokenizer_fast.pad(encoding_fast) self.assertEqual(len(cm.records), 1) self.assertIn( "Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to" " encode the text followed by a call to the `pad` method to get a padded encoding.", cm.records[0].message, ) if not self.test_slow_tokenizer: return tokenizer_slow = self.get_tokenizer() # check correct behaviour if no pad_token_id exists and add it eventually self._check_no_pad_token_padding(tokenizer_slow, sequence) encoding_slow = tokenizer_slow(sequence) with self.assertLogs(level="WARNING") as cm: # We want to assert there are no warnings, but the 'assertLogs' method does not support that. # Therefore, we are adding a dummy warning, and then we will assert it is the only warning. logger.warning("Dummy warning") tokenizer_slow.pad(encoding_slow) self.assertEqual(len(cm.records), 1) self.assertIn( "Dummy warning", cm.records[0].message, ) def test_separate_tokenizers(self): # This tests that tokenizers don't impact others. Unfortunately the case where it fails is when # we're loading an S3 configuration from a pre-trained identifier, and we have no way of testing those today. tokenizers = self.get_tokenizers(random_argument=True) new_tokenizers = self.get_tokenizers(random_argument=False) for tokenizer, new_tokenizer in zip(tokenizers, new_tokenizers): with self.subTest(f"{tokenizer.__class__.__name__}"): self.assertTrue(tokenizer.init_kwargs["random_argument"]) self.assertTrue(tokenizer.init_kwargs["random_argument"]) self.assertFalse(new_tokenizer.init_kwargs["random_argument"]) def test_get_vocab(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): vocab_dict = tokenizer.get_vocab() self.assertIsInstance(vocab_dict, dict) self.assertGreaterEqual(len(tokenizer), len(vocab_dict)) vocab = [tokenizer.convert_ids_to_tokens(i) for i in range(len(tokenizer))] self.assertEqual(len(vocab), len(tokenizer)) tokenizer.add_tokens(["asdfasdfasdfasdf"]) vocab = [tokenizer.convert_ids_to_tokens(i) for i in range(len(tokenizer))] self.assertEqual(len(vocab), len(tokenizer)) def test_conversion_reversible(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): vocab = tokenizer.get_vocab() for word, ind in vocab.items(): if word == tokenizer.unk_token: continue self.assertEqual(tokenizer.convert_tokens_to_ids(word), ind) self.assertEqual(tokenizer.convert_ids_to_tokens(ind), word) def test_call(self): # Tests that all call wrap to encode_plus and batch_encode_plus tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): sequences = [ "Testing batch encode plus", "Testing batch encode plus with different sequence lengths", "Testing batch encode plus with different sequence lengths correctly pads", ] # Test not batched encoded_sequences_1 = tokenizer.encode_plus(sequences[0]) encoded_sequences_2 = tokenizer(sequences[0]) self.assertEqual(encoded_sequences_1, encoded_sequences_2) # Test not batched pairs encoded_sequences_1 = tokenizer.encode_plus(sequences[0], sequences[1]) encoded_sequences_2 = tokenizer(sequences[0], sequences[1]) self.assertEqual(encoded_sequences_1, encoded_sequences_2) # Test batched encoded_sequences_1 = tokenizer.batch_encode_plus(sequences) encoded_sequences_2 = tokenizer(sequences) self.assertEqual(encoded_sequences_1, encoded_sequences_2) # Test batched pairs encoded_sequences_1 = tokenizer.batch_encode_plus(list(zip(sequences, sequences))) encoded_sequences_2 = tokenizer(sequences, sequences) self.assertEqual(encoded_sequences_1, encoded_sequences_2) def test_batch_encode_plus_batch_sequence_length(self): # Tests that all encoded values have the correct size tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): sequences = [ "Testing batch encode plus", "Testing batch encode plus with different sequence lengths", "Testing batch encode plus with different sequence lengths correctly pads", ] encoded_sequences = [tokenizer.encode_plus(sequence) for sequence in sequences] encoded_sequences_batch = tokenizer.batch_encode_plus(sequences, padding=False) self.assertListEqual( encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch) ) maximum_length = len( max([encoded_sequence["input_ids"] for encoded_sequence in encoded_sequences], key=len) ) # check correct behaviour if no pad_token_id exists and add it eventually self._check_no_pad_token_padding(tokenizer, sequences) encoded_sequences_padded = [ tokenizer.encode_plus(sequence, max_length=maximum_length, padding="max_length") for sequence in sequences ] encoded_sequences_batch_padded = tokenizer.batch_encode_plus(sequences, padding=True) self.assertListEqual( encoded_sequences_padded, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch_padded), ) # check 'longest' is unsensitive to a max length encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(sequences, padding=True) encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus( sequences, max_length=maximum_length + 10, padding="longest" ) for key in encoded_sequences_batch_padded_1.keys(): self.assertListEqual( encoded_sequences_batch_padded_1[key], encoded_sequences_batch_padded_2[key], ) # check 'no_padding' is unsensitive to a max length encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(sequences, padding=False) encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus( sequences, max_length=maximum_length + 10, padding=False ) for key in encoded_sequences_batch_padded_1.keys(): self.assertListEqual( encoded_sequences_batch_padded_1[key], encoded_sequences_batch_padded_2[key], ) @require_tokenizers def test_added_token_are_matched_longest_first(self): if not self.test_slow_tokenizer: self.skipTest("This test is only for slow tokenizers") return tokenizers = self.get_tokenizers(fast=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): try: tokenizer.add_tokens([AddedToken("extra_id_1")]) tokenizer.add_tokens([AddedToken("extra_id_100")]) except Exception: # Canine cannot add tokens which are not codepoints self.skipTest("Cannot add those Added tokens") # XXX: This used to split on `extra_id_1` first we're matching # longest first now. tokens = tokenizer.tokenize("This is some extra_id_100") self.assertIn("extra_id_100", tokens) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): tokenizer.add_tokens([AddedToken("extra_id_100")]) tokenizer.add_tokens([AddedToken("extra_id_1")]) tokens = tokenizer.tokenize("This is some extra_id_100") self.assertIn("extra_id_100", tokens) @require_tokenizers def test_added_token_serializable(self): # TODO this is tested 10_000 times.... tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): new_token = AddedToken("new_token", lstrip=True) tokenizer.add_tokens([new_token]) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(tmp_dir_name) tokenizer.from_pretrained(tmp_dir_name) def test_batch_encode_plus_padding(self): # Test that padded sequences are equivalent between batch_encode_plus and encode_plus # Right padding tests tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): sequences = [ "Testing batch encode plus", "Testing batch encode plus with different sequence lengths", "Testing batch encode plus with different sequence lengths correctly pads", ] max_length = 100 # check correct behaviour if no pad_token_id exists and add it eventually self._check_no_pad_token_padding(tokenizer, sequences) encoded_sequences = [ tokenizer.encode_plus(sequence, max_length=max_length, padding="max_length") for sequence in sequences ] encoded_sequences_batch = tokenizer.batch_encode_plus( sequences, max_length=max_length, padding="max_length" ) self.assertListEqual( encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch) ) # Left padding tests tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): tokenizer.padding_side = "left" sequences = [ "Testing batch encode plus", "Testing batch encode plus with different sequence lengths", "Testing batch encode plus with different sequence lengths correctly pads", ] max_length = 100 # check correct behaviour if no pad_token_id exists and add it eventually self._check_no_pad_token_padding(tokenizer, sequences) encoded_sequences = [ tokenizer.encode_plus(sequence, max_length=max_length, padding="max_length") for sequence in sequences ] encoded_sequences_batch = tokenizer.batch_encode_plus( sequences, max_length=max_length, padding="max_length" ) self.assertListEqual( encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch) ) def test_pretokenized_inputs(self): # Test when inputs are pretokenized tokenizers = self.get_tokenizers(do_lower_case=False) # , add_prefix_space=True) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): if hasattr(tokenizer, "add_prefix_space") and not tokenizer.add_prefix_space: continue # Prepare a sequence from our tokenizer vocabulary sequence, ids = self.get_clean_sequence(tokenizer, with_prefix_space=True, max_length=20) # sequence = " " + sequence # To be sure the byte-level tokenizers are feeling good token_sequence = sequence.split() # sequence_no_prefix_space = sequence.strip() # Test encode for pretokenized inputs output = tokenizer.encode(token_sequence, is_split_into_words=True, add_special_tokens=False) output_sequence = tokenizer.encode(sequence, add_special_tokens=False) self.assertEqual(output, output_sequence) output = tokenizer.encode(token_sequence, is_split_into_words=True, add_special_tokens=True) output_sequence = tokenizer.encode(sequence, add_special_tokens=True) self.assertEqual(output, output_sequence) # Test encode_plus for pretokenized inputs output = tokenizer.encode_plus(token_sequence, is_split_into_words=True, add_special_tokens=False) output_sequence = tokenizer.encode_plus(sequence, add_special_tokens=False) for key in output.keys(): self.assertEqual(output[key], output_sequence[key]) output = tokenizer.encode_plus(token_sequence, is_split_into_words=True, add_special_tokens=True) output_sequence = tokenizer.encode_plus(sequence, add_special_tokens=True) for key in output.keys(): self.assertEqual(output[key], output_sequence[key]) # Test batch_encode_plus for pretokenized inputs sequence_batch = [sequence.strip()] * 2 + [sequence.strip() + " " + sequence.strip()] token_sequence_batch = [s.split() for s in sequence_batch] sequence_batch_cleaned_up_spaces = [" " + " ".join(s) for s in token_sequence_batch] output = tokenizer.batch_encode_plus( token_sequence_batch, is_split_into_words=True, add_special_tokens=False ) output_sequence = tokenizer.batch_encode_plus( sequence_batch_cleaned_up_spaces, add_special_tokens=False ) for key in output.keys(): self.assertEqual(output[key], output_sequence[key]) output = tokenizer.batch_encode_plus( token_sequence_batch, is_split_into_words=True, add_special_tokens=True ) output_sequence = tokenizer.batch_encode_plus( sequence_batch_cleaned_up_spaces, add_special_tokens=True ) for key in output.keys(): self.assertEqual(output[key], output_sequence[key]) # Test encode for pretokenized inputs pairs output = tokenizer.encode( token_sequence, token_sequence, is_split_into_words=True, add_special_tokens=False ) output_sequence = tokenizer.encode(sequence, sequence, add_special_tokens=False) self.assertEqual(output, output_sequence) output = tokenizer.encode( token_sequence, token_sequence, is_split_into_words=True, add_special_tokens=True ) output_sequence = tokenizer.encode(sequence, sequence, add_special_tokens=True) self.assertEqual(output, output_sequence) # Test encode_plus for pretokenized inputs pairs output = tokenizer.encode_plus( token_sequence, token_sequence, is_split_into_words=True, add_special_tokens=False ) output_sequence = tokenizer.encode_plus(sequence, sequence, add_special_tokens=False) for key in output.keys(): self.assertEqual(output[key], output_sequence[key]) output = tokenizer.encode_plus( token_sequence, token_sequence, is_split_into_words=True, add_special_tokens=True ) output_sequence = tokenizer.encode_plus(sequence, sequence, add_special_tokens=True) for key in output.keys(): self.assertEqual(output[key], output_sequence[key]) # Test batch_encode_plus for pretokenized inputs pairs sequence_pair_batch = [(sequence.strip(), sequence.strip())] * 2 + [ (sequence.strip() + " " + sequence.strip(), sequence.strip()) ] token_sequence_pair_batch = [tuple(s.split() for s in pair) for pair in sequence_pair_batch] sequence_pair_batch_cleaned_up_spaces = [ tuple(" " + " ".join(s) for s in pair) for pair in token_sequence_pair_batch ] output = tokenizer.batch_encode_plus( token_sequence_pair_batch, is_split_into_words=True, add_special_tokens=False ) output_sequence = tokenizer.batch_encode_plus( sequence_pair_batch_cleaned_up_spaces, add_special_tokens=False ) for key in output.keys(): self.assertEqual(output[key], output_sequence[key]) output = tokenizer.batch_encode_plus( token_sequence_pair_batch, is_split_into_words=True, add_special_tokens=True ) output_sequence = tokenizer.batch_encode_plus( sequence_pair_batch_cleaned_up_spaces, add_special_tokens=True ) for key in output.keys(): self.assertEqual(output[key], output_sequence[key]) def test_prepare_for_model(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): string_sequence = "Testing the prepare_for_model method." ids = tokenizer.encode(string_sequence, add_special_tokens=False) prepared_input_dict = tokenizer.prepare_for_model(ids, add_special_tokens=True) input_dict = tokenizer.encode_plus(string_sequence, add_special_tokens=True) self.assertEqual(input_dict, prepared_input_dict) def test_batch_encode_plus_overflowing_tokens(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: string_sequences = ["Testing the prepare_for_model method.", "Test"] if tokenizer.pad_token is None: tokenizer.add_special_tokens({"pad_token": "[PAD]"}) tokenizer.batch_encode_plus( string_sequences, return_overflowing_tokens=True, truncation=True, padding=True, max_length=3 ) @is_pt_tf_cross_test def test_batch_encode_plus_tensors(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): sequences = [ "Testing batch encode plus", "Testing batch encode plus with different sequence lengths", "Testing batch encode plus with different sequence lengths correctly pads", ] # A Tensor cannot be build by sequences which are not the same size self.assertRaises(ValueError, tokenizer.batch_encode_plus, sequences, return_tensors="pt") self.assertRaises(ValueError, tokenizer.batch_encode_plus, sequences, return_tensors="tf") if tokenizer.pad_token_id is None: self.assertRaises( ValueError, tokenizer.batch_encode_plus, sequences, padding=True, return_tensors="pt", ) self.assertRaises( ValueError, tokenizer.batch_encode_plus, sequences, padding="longest", return_tensors="tf", ) else: pytorch_tensor = tokenizer.batch_encode_plus(sequences, padding=True, return_tensors="pt") tensorflow_tensor = tokenizer.batch_encode_plus(sequences, padding="longest", return_tensors="tf") encoded_sequences = tokenizer.batch_encode_plus(sequences, padding=True) for key in encoded_sequences.keys(): pytorch_value = pytorch_tensor[key].tolist() tensorflow_value = tensorflow_tensor[key].numpy().tolist() encoded_value = encoded_sequences[key] self.assertEqual(pytorch_value, tensorflow_value, encoded_value) def _check_no_pad_token_padding(self, tokenizer, sequences): # if tokenizer does not have pad_token_id, an error should be thrown if tokenizer.pad_token_id is None: with self.assertRaises(ValueError): if isinstance(sequences, list): tokenizer.batch_encode_plus(sequences, padding="longest") else: tokenizer.encode_plus(sequences, padding=True) # add pad_token_id to pass subsequent tests tokenizer.add_special_tokens({"pad_token": "<PAD>"}) @require_torch @slow def test_torch_encode_plus_sent_to_model(self): import torch from transformers import MODEL_MAPPING, TOKENIZER_MAPPING MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(MODEL_MAPPING, TOKENIZER_MAPPING) tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING: return config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__] config = config_class() if config.is_encoder_decoder or config.pad_token_id is None: return model = model_class(config) # Make sure the model contains at least the full vocabulary size in its embedding matrix is_using_common_embeddings = hasattr(model.get_input_embeddings(), "weight") if is_using_common_embeddings: self.assertGreaterEqual(model.get_input_embeddings().weight.shape[0], len(tokenizer)) # Build sequence first_ten_tokens = list(tokenizer.get_vocab().keys())[:10] sequence = " ".join(first_ten_tokens) encoded_sequence = tokenizer.encode_plus(sequence, return_tensors="pt") # Ensure that the BatchEncoding.to() method works. encoded_sequence.to(model.device) batch_encoded_sequence = tokenizer.batch_encode_plus([sequence, sequence], return_tensors="pt") # This should not fail with torch.no_grad(): # saves some time model(**encoded_sequence) model(**batch_encoded_sequence) # if self.test_rust_tokenizer: # fast_tokenizer = self.get_rust_tokenizer() # encoded_sequence_fast = fast_tokenizer.encode_plus(sequence, return_tensors="pt") # batch_encoded_sequence_fast = fast_tokenizer.batch_encode_plus([sequence, sequence], return_tensors="pt") # # This should not fail # model(**encoded_sequence_fast) # model(**batch_encoded_sequence_fast) @require_tf @slow def test_tf_encode_plus_sent_to_model(self): from transformers import TF_MODEL_MAPPING, TOKENIZER_MAPPING MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(TF_MODEL_MAPPING, TOKENIZER_MAPPING) tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING: return config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__] config = config_class() if config.is_encoder_decoder or config.pad_token_id is None: return model = model_class(config) # Make sure the model contains at least the full vocabulary size in its embedding matrix self.assertGreaterEqual(model.config.vocab_size, len(tokenizer)) # Build sequence first_ten_tokens = list(tokenizer.get_vocab().keys())[:10] sequence = " ".join(first_ten_tokens) encoded_sequence = tokenizer.encode_plus(sequence, return_tensors="tf") batch_encoded_sequence = tokenizer.batch_encode_plus([sequence, sequence], return_tensors="tf") # This should not fail model(encoded_sequence) model(batch_encoded_sequence) # TODO: Check if require_torch is the best to test for numpy here ... Maybe move to require_flax when available @require_torch @slow def test_np_encode_plus_sent_to_model(self): from transformers import MODEL_MAPPING, TOKENIZER_MAPPING MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(MODEL_MAPPING, TOKENIZER_MAPPING) tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING: return config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__] config = config_class() if config.is_encoder_decoder or config.pad_token_id is None: return # Build sequence first_ten_tokens = list(tokenizer.get_vocab().keys())[:10] sequence = " ".join(first_ten_tokens) encoded_sequence = tokenizer.encode_plus(sequence, return_tensors="np") batch_encoded_sequence = tokenizer.batch_encode_plus([sequence, sequence], return_tensors="np") # TODO: add forward through JAX/Flax when PR is merged # This is currently here to make ruff happy ! if encoded_sequence is None: raise ValueError("Cannot convert list to numpy tensor on encode_plus()") if batch_encoded_sequence is None: raise ValueError("Cannot convert list to numpy tensor on batch_encode_plus()") if self.test_rust_tokenizer: fast_tokenizer = self.get_rust_tokenizer() encoded_sequence_fast = fast_tokenizer.encode_plus(sequence, return_tensors="np") batch_encoded_sequence_fast = fast_tokenizer.batch_encode_plus( [sequence, sequence], return_tensors="np" ) # TODO: add forward through JAX/Flax when PR is merged # This is currently here to make ruff happy ! if encoded_sequence_fast is None: raise ValueError("Cannot convert list to numpy tensor on encode_plus() (fast)") if batch_encoded_sequence_fast is None: raise ValueError("Cannot convert list to numpy tensor on batch_encode_plus() (fast)") @require_torch def test_prepare_seq2seq_batch(self): if not self.test_seq2seq: return tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Longer text that will definitely require truncation. src_text = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for" " Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons" " will only worsen the violence and misery for millions of people.", ] tgt_text = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al" ' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi' " că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] try: batch = tokenizer.prepare_seq2seq_batch( src_texts=src_text, tgt_texts=tgt_text, max_length=3, max_target_length=10, return_tensors="pt", src_lang="en_XX", # this should be ignored (for all but mbart) but not cause an error ) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1], 3) self.assertEqual(batch.labels.shape[1], 10) # max_target_length will default to max_length if not specified batch = tokenizer.prepare_seq2seq_batch( src_text, tgt_texts=tgt_text, max_length=3, return_tensors="pt" ) self.assertEqual(batch.input_ids.shape[1], 3) self.assertEqual(batch.labels.shape[1], 3) batch_encoder_only = tokenizer.prepare_seq2seq_batch( src_texts=src_text, max_length=3, max_target_length=10, return_tensors="pt" ) self.assertEqual(batch_encoder_only.input_ids.shape[1], 3) self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3) self.assertNotIn("decoder_input_ids", batch_encoder_only) def test_is_fast(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) # Check is_fast is set correctly self.assertTrue(tokenizer_r.is_fast) if self.test_slow_tokenizer: tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) self.assertFalse(tokenizer_p.is_fast) def test_fast_only_inputs(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) # Ensure None raise an error self.assertRaises(TypeError, tokenizer_r.tokenize, None) self.assertRaises(TypeError, tokenizer_r.encode, None) self.assertRaises(TypeError, tokenizer_r.encode_plus, None) self.assertRaises(TypeError, tokenizer_r.batch_encode_plus, None) def test_alignement_methods(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) words = ["Wonderful", "no", "inspiration", "example", "with", "subtoken"] text = " ".join(words) batch_size = 3 encoding = tokenizer_r.encode_plus(text, add_special_tokens=False) batch_encoding = tokenizer_r.batch_encode_plus([text] * batch_size, add_special_tokens=False) num_tokens = len(encoding["input_ids"]) last_word_index = len(words) - 1 last_token_index = num_tokens - 1 last_batch_index = batch_size - 1 last_char_index = len(text) - 1 # words, tokens self.assertEqual(len(encoding.words(0)), num_tokens) self.assertEqual(max(encoding.words(0)), last_word_index) self.assertEqual(min(encoding.words(0)), 0) self.assertEqual(len(batch_encoding.words(last_batch_index)), num_tokens) self.assertEqual(max(batch_encoding.words(last_batch_index)), last_word_index) self.assertEqual(min(batch_encoding.words(last_batch_index)), 0) self.assertEqual(len(encoding.tokens(0)), num_tokens) # Assert token_to_word self.assertEqual(encoding.token_to_word(0), 0) self.assertEqual(encoding.token_to_word(0, 0), 0) self.assertEqual(encoding.token_to_word(last_token_index), last_word_index) self.assertEqual(encoding.token_to_word(0, last_token_index), last_word_index) self.assertEqual(batch_encoding.token_to_word(1, 0), 0) self.assertEqual(batch_encoding.token_to_word(0, last_token_index), last_word_index) self.assertEqual(batch_encoding.token_to_word(last_batch_index, last_token_index), last_word_index) # Assert word_to_tokens self.assertEqual(encoding.word_to_tokens(0).start, 0) self.assertEqual(encoding.word_to_tokens(0, 0).start, 0) self.assertEqual(encoding.word_to_tokens(last_word_index).end, last_token_index + 1) self.assertEqual(encoding.word_to_tokens(0, last_word_index).end, last_token_index + 1) self.assertEqual(batch_encoding.word_to_tokens(1, 0).start, 0) self.assertEqual(batch_encoding.word_to_tokens(0, last_word_index).end, last_token_index + 1) self.assertEqual( batch_encoding.word_to_tokens(last_batch_index, last_word_index).end, last_token_index + 1 ) # Assert token_to_chars self.assertEqual(encoding.token_to_chars(0).start, 0) self.assertEqual(encoding.token_to_chars(0, 0).start, 0) self.assertEqual(encoding.token_to_chars(last_token_index).end, last_char_index + 1) self.assertEqual(encoding.token_to_chars(0, last_token_index).end, last_char_index + 1) self.assertEqual(batch_encoding.token_to_chars(1, 0).start, 0) self.assertEqual(batch_encoding.token_to_chars(0, last_token_index).end, last_char_index + 1) self.assertEqual( batch_encoding.token_to_chars(last_batch_index, last_token_index).end, last_char_index + 1 ) # Assert char_to_token self.assertEqual(encoding.char_to_token(0), 0) self.assertEqual(encoding.char_to_token(0, 0), 0) self.assertEqual(encoding.char_to_token(last_char_index), last_token_index) self.assertEqual(encoding.char_to_token(0, last_char_index), last_token_index) self.assertEqual(batch_encoding.char_to_token(1, 0), 0) self.assertEqual(batch_encoding.char_to_token(0, last_char_index), last_token_index) self.assertEqual(batch_encoding.char_to_token(last_batch_index, last_char_index), last_token_index) # Assert char_to_word self.assertEqual(encoding.char_to_word(0), 0) self.assertEqual(encoding.char_to_word(0, 0), 0) self.assertEqual(encoding.char_to_word(last_char_index), last_word_index) self.assertEqual(encoding.char_to_word(0, last_char_index), last_word_index) self.assertEqual(batch_encoding.char_to_word(1, 0), 0) self.assertEqual(batch_encoding.char_to_word(0, last_char_index), last_word_index) self.assertEqual(batch_encoding.char_to_word(last_batch_index, last_char_index), last_word_index) # Assert word_to_chars self.assertEqual(encoding.word_to_chars(0).start, 0) self.assertEqual(encoding.word_to_chars(0, 0).start, 0) self.assertEqual(encoding.word_to_chars(last_word_index).end, last_char_index + 1) self.assertEqual(encoding.word_to_chars(0, last_word_index).end, last_char_index + 1) self.assertEqual(batch_encoding.word_to_chars(1, 0).start, 0) self.assertEqual(batch_encoding.word_to_chars(0, last_word_index).end, last_char_index + 1) self.assertEqual( batch_encoding.word_to_chars(last_batch_index, last_word_index).end, last_char_index + 1 ) # Assert token_to_sequence self.assertEqual(encoding.token_to_sequence(num_tokens // 2), 0) self.assertEqual(encoding.token_to_sequence(0, num_tokens // 2), 0) self.assertEqual(batch_encoding.token_to_sequence(1, num_tokens // 2), 0) self.assertEqual(batch_encoding.token_to_sequence(0, num_tokens // 2), 0) self.assertEqual(batch_encoding.token_to_sequence(last_batch_index, num_tokens // 2), 0) # Pair of input sequences words = ["Wonderful", "no", "inspiration", "example", "with", "subtoken"] text = " ".join(words) pair_words = ["Amazing", "example", "full", "of", "inspiration"] pair_text = " ".join(pair_words) batch_size = 3 index_word_in_first_seq = words.index("inspiration") index_word_in_pair_seq = pair_words.index("inspiration") index_char_in_first_seq = text.find("inspiration") index_char_in_pair_seq = pair_text.find("inspiration") pair_encoding = tokenizer_r.encode_plus(text, pair_text, add_special_tokens=False) pair_batch_encoding = tokenizer_r.batch_encode_plus( [(text, pair_text)] * batch_size, add_special_tokens=False ) num_tokens = len(encoding["input_ids"]) last_word_index = len(words) - 1 last_token_index = num_tokens - 1 last_batch_index = batch_size - 1 last_char_index = len(text) - 1 # Assert word_to_tokens self.assertNotEqual( pair_encoding.word_to_tokens(index_word_in_first_seq, sequence_index=0).start, pair_encoding.word_to_tokens(index_word_in_pair_seq, sequence_index=1).start, ) self.assertEqual( pair_encoding["input_ids"][ pair_encoding.word_to_tokens(index_word_in_first_seq, sequence_index=0).start ], pair_encoding["input_ids"][ pair_encoding.word_to_tokens(index_word_in_pair_seq, sequence_index=1).start ], ) self.assertNotEqual( pair_batch_encoding.word_to_tokens(1, index_word_in_first_seq, sequence_index=0).start, pair_batch_encoding.word_to_tokens(1, index_word_in_pair_seq, sequence_index=1).start, ) self.assertEqual( pair_batch_encoding["input_ids"][1][ pair_batch_encoding.word_to_tokens(1, index_word_in_first_seq, sequence_index=0).start ], pair_batch_encoding["input_ids"][1][ pair_batch_encoding.word_to_tokens(1, index_word_in_pair_seq, sequence_index=1).start ], ) # Assert char_to_token self.assertNotEqual( pair_encoding.char_to_token(index_char_in_first_seq, sequence_index=0), pair_encoding.char_to_token(index_char_in_pair_seq, sequence_index=1), ) self.assertEqual( pair_encoding["input_ids"][pair_encoding.char_to_token(index_char_in_first_seq, sequence_index=0)], pair_encoding["input_ids"][pair_encoding.char_to_token(index_char_in_pair_seq, sequence_index=1)], ) self.assertNotEqual( pair_batch_encoding.char_to_token(1, index_char_in_first_seq, sequence_index=0), pair_batch_encoding.char_to_token(1, index_char_in_pair_seq, sequence_index=1), ) self.assertEqual( pair_batch_encoding["input_ids"][1][ pair_batch_encoding.char_to_token(1, index_char_in_first_seq, sequence_index=0) ], pair_batch_encoding["input_ids"][1][ pair_batch_encoding.char_to_token(1, index_char_in_pair_seq, sequence_index=1) ], ) # Assert char_to_word self.assertNotEqual( pair_encoding.char_to_word(index_char_in_first_seq, sequence_index=0), pair_encoding.char_to_word(index_char_in_pair_seq, sequence_index=1), ) self.assertEqual( words[pair_encoding.char_to_word(index_char_in_first_seq, sequence_index=0)], pair_words[pair_encoding.char_to_word(index_char_in_pair_seq, sequence_index=1)], ) self.assertNotEqual( pair_batch_encoding.char_to_word(1, index_char_in_first_seq, sequence_index=0), pair_batch_encoding.char_to_word(1, index_char_in_pair_seq, sequence_index=1), ) self.assertEqual( words[pair_batch_encoding.char_to_word(1, index_char_in_first_seq, sequence_index=0)], pair_words[pair_batch_encoding.char_to_word(1, index_char_in_pair_seq, sequence_index=1)], ) # Assert word_to_chars self.assertNotEqual( pair_encoding.word_to_chars(index_word_in_first_seq, sequence_index=0).start, pair_encoding.word_to_chars(index_word_in_pair_seq, sequence_index=1).start, ) self.assertEqual( text[pair_encoding.word_to_chars(index_word_in_first_seq, sequence_index=0).start], pair_text[pair_encoding.word_to_chars(index_word_in_pair_seq, sequence_index=1).start], ) self.assertNotEqual( pair_batch_encoding.word_to_chars(1, index_word_in_first_seq, sequence_index=0).start, pair_batch_encoding.word_to_chars(1, index_word_in_pair_seq, sequence_index=1).start, ) self.assertEqual( text[pair_batch_encoding.word_to_chars(1, index_word_in_first_seq, sequence_index=0).start], pair_text[pair_batch_encoding.word_to_chars(1, index_word_in_pair_seq, sequence_index=1).start], ) # Assert token_to_sequence pair_encoding = tokenizer_r.encode_plus(text, pair_text, add_special_tokens=True) pair_sequence_ids = [ pair_encoding.token_to_sequence(i) for i in range(len(pair_encoding["input_ids"])) ] self.assertIn(0, pair_sequence_ids) self.assertIn(1, pair_sequence_ids) if tokenizer_r.num_special_tokens_to_add(pair=True): self.assertIn(None, pair_sequence_ids) pair_batch_encoding = tokenizer_r.batch_encode_plus( [(text, pair_text)] * batch_size, add_special_tokens=True ) pair_batch_sequence_ids = [ pair_batch_encoding.token_to_sequence(1, i) for i in range(len(pair_batch_encoding["input_ids"][0])) ] self.assertIn(0, pair_batch_sequence_ids) self.assertIn(1, pair_batch_sequence_ids) if tokenizer_r.num_special_tokens_to_add(pair=True): self.assertIn(None, pair_batch_sequence_ids) def test_tokenization_python_rust_equals(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) # Ensure basic input match input_p = tokenizer_p.encode_plus(self._data) input_r = tokenizer_r.encode_plus(self._data) for key in filter(lambda x: x in ["input_ids", "token_type_ids", "attention_mask"], input_p.keys()): self.assertSequenceEqual(input_p[key], input_r[key]) input_pairs_p = tokenizer_p.encode_plus(self._data, self._data) input_pairs_r = tokenizer_r.encode_plus(self._data, self._data) for key in filter(lambda x: x in ["input_ids", "token_type_ids", "attention_mask"], input_p.keys()): self.assertSequenceEqual(input_pairs_p[key], input_pairs_r[key]) # Ensure truncation match input_p = tokenizer_p.encode_plus(self._data, max_length=512, truncation=True) input_r = tokenizer_r.encode_plus(self._data, max_length=512, truncation=True) for key in filter(lambda x: x in ["input_ids", "token_type_ids", "attention_mask"], input_p.keys()): self.assertSequenceEqual(input_p[key], input_r[key]) # Ensure truncation with stride match input_p = tokenizer_p.encode_plus( self._data, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True ) input_r = tokenizer_r.encode_plus( self._data, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True ) for key in filter(lambda x: x in ["input_ids", "token_type_ids", "attention_mask"], input_p.keys()): self.assertSequenceEqual(input_p[key], input_r[key][0]) def test_num_special_tokens_to_add_equal(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) # Check we have the same number of added_tokens for both pair and non-pair inputs. self.assertEqual( tokenizer_r.num_special_tokens_to_add(False), tokenizer_p.num_special_tokens_to_add(False) ) self.assertEqual( tokenizer_r.num_special_tokens_to_add(True), tokenizer_p.num_special_tokens_to_add(True) ) def test_max_length_equal(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) # Check we have the correct max_length for both pair and non-pair inputs. self.assertEqual(tokenizer_r.max_len_single_sentence, tokenizer_p.max_len_single_sentence) self.assertEqual(tokenizer_r.max_len_sentences_pair, tokenizer_p.max_len_sentences_pair) def test_special_tokens_map_equal(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): # sometimes the tokenizer saved online is not the same tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) # Assert the set of special tokens match. self.assertSequenceEqual( tokenizer_p.special_tokens_map.items(), tokenizer_r.special_tokens_map.items(), ) def test_add_tokens(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) vocab_size = len(tokenizer_r) self.assertEqual(tokenizer_r.add_tokens(""), 0) self.assertEqual(tokenizer_r.add_tokens("testoken"), 1) self.assertEqual(tokenizer_r.add_tokens(["testoken1", "testtoken2"]), 2) self.assertEqual(len(tokenizer_r), vocab_size + 3) self.assertEqual(tokenizer_r.add_special_tokens({}), 0) self.assertEqual(tokenizer_r.add_special_tokens({"bos_token": "[BOS]", "eos_token": "[EOS]"}), 2) self.assertRaises( AssertionError, tokenizer_r.add_special_tokens, {"additional_special_tokens": "<testtoken1>"} ) self.assertEqual(tokenizer_r.add_special_tokens({"additional_special_tokens": ["<testtoken2>"]}), 1) self.assertEqual( tokenizer_r.add_special_tokens({"additional_special_tokens": ["<testtoken3>", "<testtoken4>"]}), 2 ) self.assertIn("<testtoken3>", tokenizer_r.special_tokens_map["additional_special_tokens"]) self.assertIsInstance(tokenizer_r.special_tokens_map["additional_special_tokens"], list) self.assertGreaterEqual(len(tokenizer_r.special_tokens_map["additional_special_tokens"]), 2) self.assertEqual(len(tokenizer_r), vocab_size + 8) def test_offsets_mapping(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) text = "Wonderful no inspiration example with subtoken" pair = "Along with an awesome pair" # No pair tokens_with_offsets = tokenizer_r.encode_plus( text, return_special_tokens_mask=True, return_offsets_mapping=True, add_special_tokens=True ) added_tokens = tokenizer_r.num_special_tokens_to_add(False) offsets = tokens_with_offsets["offset_mapping"] # Assert there is the same number of tokens and offsets self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"])) # Assert there is online added_tokens special_tokens self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens) # Pairs tokens_with_offsets = tokenizer_r.encode_plus( text, pair, return_special_tokens_mask=True, return_offsets_mapping=True, add_special_tokens=True ) added_tokens = tokenizer_r.num_special_tokens_to_add(True) offsets = tokens_with_offsets["offset_mapping"] # Assert there is the same number of tokens and offsets self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"])) # Assert there is online added_tokens special_tokens self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens) def test_batch_encode_dynamic_overflowing(self): """ When calling batch_encode with multiple sequence it can returns different number of overflowing encoding for each sequence: [ Sequence 1: [Encoding 1, Encoding 2], Sequence 2: [Encoding 1], Sequence 3: [Encoding 1, Encoding 2, ... Encoding N] ] This needs to be padded so that it can represented as a tensor """ for tokenizer, pretrained_name, kwargs in self.tokenizers_list: tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})"): if is_torch_available(): returned_tensor = "pt" elif is_tf_available(): returned_tensor = "tf" elif is_flax_available(): returned_tensor = "jax" else: return if not tokenizer.pad_token or tokenizer.pad_token_id < 0: return tokens = tokenizer.encode_plus( "HuggingFace is solving NLP one commit at a time", max_length=6, padding=True, truncation=True, return_tensors=returned_tensor, return_overflowing_tokens=True, ) for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()): self.assertEqual(len(tokens[key].shape), 2) # Mono sample tokens = tokenizer.batch_encode_plus( ["HuggingFace is solving NLP one commit at a time"], max_length=6, padding=True, truncation="only_first", return_tensors=returned_tensor, return_overflowing_tokens=True, ) for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()): self.assertEqual(len(tokens[key].shape), 2) self.assertEqual(tokens[key].shape[-1], 6) # Multi sample tokens = tokenizer.batch_encode_plus( ["HuggingFace is solving NLP one commit at a time", "Very tiny input"], max_length=6, padding=True, truncation="only_first", return_tensors=returned_tensor, return_overflowing_tokens=True, ) for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()): self.assertEqual(len(tokens[key].shape), 2) self.assertEqual(tokens[key].shape[-1], 6) def test_compare_pretokenized_inputs(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) if hasattr(tokenizer_p, "add_prefix_space") and not tokenizer_p.add_prefix_space: continue # Too hard to test for now # Input string pretokenized_input_simple = "This is a sample input".split() pretokenized_input_pair = "This is a sample pair".split() # Test encode for pretokenized inputs output_r = tokenizer_r.encode( pretokenized_input_simple, is_split_into_words=True, add_special_tokens=False ) output_p = tokenizer_p.encode( pretokenized_input_simple, is_split_into_words=True, add_special_tokens=False ) self.assertEqual(output_p, output_r) kwargs = { "is_split_into_words": True, # "return_token_type_ids": True, # Use the defaults for each tokenizers # "return_attention_mask": True, # Use the defaults for each tokenizers "return_overflowing_tokens": False, "return_special_tokens_mask": True, "return_offsets_mapping": False, # Not implemented in python tokenizers # "add_special_tokens": False, } batch_kwargs = { "is_split_into_words": True, # "return_token_type_ids": True, # Use the defaults for each tokenizers # "return_attention_mask": True, # Use the defaults for each tokenizers "return_overflowing_tokens": False, "return_special_tokens_mask": True, "return_offsets_mapping": False, # Not implemented in python tokenizers # "add_special_tokens": False, } # Test encode_plus for pretokenized inputs output_r = tokenizer_r.encode_plus(pretokenized_input_simple, **kwargs) output_p = tokenizer_p.encode_plus(pretokenized_input_simple, **kwargs) for key in output_p.keys(): self.assertEqual(output_p[key], output_r[key]) # Test batch_encode_plus for pretokenized inputs input_batch = ([pretokenized_input_simple] * 2) + [pretokenized_input_simple + pretokenized_input_pair] output_r = tokenizer_r.batch_encode_plus(input_batch, **batch_kwargs) output_p = tokenizer_p.batch_encode_plus(input_batch, **batch_kwargs) for key in output_p.keys(): self.assertEqual(output_p[key], output_r[key]) # Test encode for pretokenized inputs pairs output_r = tokenizer_r.encode( pretokenized_input_simple, pretokenized_input_pair, is_split_into_words=True ) output_p = tokenizer_p.encode( pretokenized_input_simple, pretokenized_input_pair, is_split_into_words=True ) self.assertEqual(output_p, output_r) # Test encode_plus for pretokenized inputs output_r = tokenizer_r.encode_plus(pretokenized_input_simple, pretokenized_input_pair, **kwargs) output_p = tokenizer_p.encode_plus(pretokenized_input_simple, pretokenized_input_pair, **kwargs) for key in output_p.keys(): self.assertEqual(output_p[key], output_r[key]) # Test batch_encode_plus for pretokenized inputs input_batch_pair = ([pretokenized_input_simple, pretokenized_input_pair] * 2) + [ pretokenized_input_simple + pretokenized_input_pair, pretokenized_input_pair, ] output_r = tokenizer_r.batch_encode_plus(input_batch_pair, **batch_kwargs) output_p = tokenizer_p.batch_encode_plus(input_batch_pair, **batch_kwargs) for key in output_p.keys(): self.assertEqual(output_p[key], output_r[key]) def test_create_token_type_ids(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) input_simple = [1, 2, 3] input_pair = [1, 2, 3] # Generate output output_r = tokenizer_r.create_token_type_ids_from_sequences(input_simple) output_p = tokenizer_p.create_token_type_ids_from_sequences(input_simple) self.assertEqual(output_p, output_r) # Generate pair output output_r = tokenizer_r.create_token_type_ids_from_sequences(input_simple, input_pair) output_p = tokenizer_p.create_token_type_ids_from_sequences(input_simple, input_pair) self.assertEqual(output_p, output_r) def test_build_inputs_with_special_tokens(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) # # Input string # input_simple = tokenizer_p.tokenize("This is a sample input", add_special_tokens=False) # input_pair = tokenizer_p.tokenize("This is a sample pair", add_special_tokens=False) # # Generate output # output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple) # output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple) # self.assertEqual(output_p, output_r) # # Generate pair output # output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple, input_pair) # output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple, input_pair) # self.assertEqual(output_p, output_r) input_pairs = [ ("", ""), ("", "This is a sample pair"), ("This is a sample input", ""), ("This is a sample input", "This is a sample pair"), ] for sample_input, sample_pair in input_pairs: # Input tokens id input_simple = tokenizer_p.encode(sample_input, add_special_tokens=False) input_pair = tokenizer_p.encode(sample_pair, add_special_tokens=False) # Generate output output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple) output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple) self.assertEqual(output_p, output_r) # Generate pair output output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple, input_pair) output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple, input_pair) self.assertEqual(output_p, output_r) def test_padding(self, max_length=50): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id) pad_token_id = tokenizer_p.pad_token_id # Encode - Simple input input_r = tokenizer_r.encode("This is a simple input", max_length=max_length, pad_to_max_length=True) input_p = tokenizer_p.encode("This is a simple input", max_length=max_length, pad_to_max_length=True) self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.encode("This is a simple input", max_length=max_length, padding="max_length") input_p = tokenizer_p.encode("This is a simple input", max_length=max_length, padding="max_length") self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.encode("This is a simple input", padding="longest") input_p = tokenizer_p.encode("This is a simple input", padding=True) self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id) # Encode - Pair input input_r = tokenizer_r.encode( "This is a simple input", "This is a pair", max_length=max_length, pad_to_max_length=True ) input_p = tokenizer_p.encode( "This is a simple input", "This is a pair", max_length=max_length, pad_to_max_length=True ) self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.encode( "This is a simple input", "This is a pair", max_length=max_length, padding="max_length" ) input_p = tokenizer_p.encode( "This is a simple input", "This is a pair", max_length=max_length, padding="max_length" ) self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.encode("This is a simple input", "This is a pair", padding=True) input_p = tokenizer_p.encode("This is a simple input", "This is a pair", padding="longest") self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id) # Encode_plus - Simple input input_r = tokenizer_r.encode_plus( "This is a simple input", max_length=max_length, pad_to_max_length=True ) input_p = tokenizer_p.encode_plus( "This is a simple input", max_length=max_length, pad_to_max_length=True ) self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) input_r = tokenizer_r.encode_plus( "This is a simple input", max_length=max_length, padding="max_length" ) input_p = tokenizer_p.encode_plus( "This is a simple input", max_length=max_length, padding="max_length" ) self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) input_r = tokenizer_r.encode_plus("This is a simple input", padding="longest") input_p = tokenizer_p.encode_plus("This is a simple input", padding=True) self.assert_padded_input_match( input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id ) self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) # Encode_plus - Pair input input_r = tokenizer_r.encode_plus( "This is a simple input", "This is a pair", max_length=max_length, pad_to_max_length=True ) input_p = tokenizer_p.encode_plus( "This is a simple input", "This is a pair", max_length=max_length, pad_to_max_length=True ) self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) input_r = tokenizer_r.encode_plus( "This is a simple input", "This is a pair", max_length=max_length, padding="max_length" ) input_p = tokenizer_p.encode_plus( "This is a simple input", "This is a pair", max_length=max_length, padding="max_length" ) self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) input_r = tokenizer_r.encode_plus("This is a simple input", "This is a pair", padding="longest") input_p = tokenizer_p.encode_plus("This is a simple input", "This is a pair", padding=True) self.assert_padded_input_match( input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id ) self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) # Batch_encode_plus - Simple input input_r = tokenizer_r.batch_encode_plus( ["This is a simple input 1", "This is a simple input 2"], max_length=max_length, pad_to_max_length=True, ) input_p = tokenizer_p.batch_encode_plus( ["This is a simple input 1", "This is a simple input 2"], max_length=max_length, pad_to_max_length=True, ) self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.batch_encode_plus( ["This is a simple input 1", "This is a simple input 2"], max_length=max_length, padding="max_length", ) input_p = tokenizer_p.batch_encode_plus( ["This is a simple input 1", "This is a simple input 2"], max_length=max_length, padding="max_length", ) self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.batch_encode_plus( ["This is a simple input 1", "This is a simple input 2"], max_length=max_length, padding="longest", ) input_p = tokenizer_p.batch_encode_plus( ["This is a simple input 1", "This is a simple input 2"], max_length=max_length, padding=True, ) self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id) input_r = tokenizer_r.batch_encode_plus( ["This is a simple input 1", "This is a simple input 2"], padding="longest" ) input_p = tokenizer_p.batch_encode_plus( ["This is a simple input 1", "This is a simple input 2"], padding=True ) self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id) # Batch_encode_plus - Pair input input_r = tokenizer_r.batch_encode_plus( [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ], max_length=max_length, truncation=True, padding="max_length", ) input_p = tokenizer_p.batch_encode_plus( [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ], max_length=max_length, truncation=True, padding="max_length", ) self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.batch_encode_plus( [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ], padding=True, ) input_p = tokenizer_p.batch_encode_plus( [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ], padding="longest", ) self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id) # Using pad on single examples after tokenization input_r = tokenizer_r.encode_plus("This is a input 1") input_r = tokenizer_r.pad(input_r) input_p = tokenizer_p.encode_plus("This is a input 1") input_p = tokenizer_p.pad(input_p) self.assert_padded_input_match( input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id ) # Using pad on single examples after tokenization input_r = tokenizer_r.encode_plus("This is a input 1") input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length") input_p = tokenizer_p.encode_plus("This is a input 1") input_p = tokenizer_p.pad(input_p, max_length=max_length, padding="max_length") self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) # Using pad after tokenization input_r = tokenizer_r.batch_encode_plus( ["This is a input 1", "This is a much longer input whilch should be padded"] ) input_r = tokenizer_r.pad(input_r) input_p = tokenizer_p.batch_encode_plus( ["This is a input 1", "This is a much longer input whilch should be padded"] ) input_p = tokenizer_p.pad(input_p) self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id) # Using pad after tokenization input_r = tokenizer_r.batch_encode_plus( ["This is a input 1", "This is a much longer input whilch should be padded"] ) input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length") input_p = tokenizer_p.batch_encode_plus( ["This is a input 1", "This is a much longer input whilch should be padded"] ) input_p = tokenizer_p.pad(input_p, max_length=max_length, padding="max_length") self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id) # Test padding nested empty lists (in some use-cases, there is no any token id in the `input_ids` list). input_r = tokenizer_r.pad({"input_ids": [[], []]}, max_length=max_length, padding="max_length") input_p = tokenizer_p.pad({"input_ids": [[], []]}, max_length=max_length, padding="max_length") self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id) def test_padding_different_model_input_name(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id) pad_token_id = tokenizer_p.pad_token_id input_r = tokenizer_r.batch_encode_plus( ["This is a input 1", "This is a much longer input whilch should be padded"] ) input_p = tokenizer_r.batch_encode_plus( ["This is a input 1", "This is a much longer input whilch should be padded"] ) # rename encoded batch to "inputs" input_r["inputs"] = input_r[tokenizer_r.model_input_names[0]] del input_r[tokenizer_r.model_input_names[0]] input_p["inputs"] = input_p[tokenizer_p.model_input_names[0]] del input_p[tokenizer_p.model_input_names[0]] # Renaming `input_ids` to `inputs` tokenizer_r.model_input_names = ["inputs"] + tokenizer_r.model_input_names[1:] tokenizer_p.model_input_names = ["inputs"] + tokenizer_p.model_input_names[1:] input_r = tokenizer_r.pad(input_r, padding="longest") input_p = tokenizer_r.pad(input_p, padding="longest") max_length = len(input_p["inputs"][0]) self.assert_batch_padded_input_match( input_r, input_p, max_length, pad_token_id, model_main_input_name="inputs" ) def test_save_pretrained(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) tmpdirname2 = tempfile.mkdtemp() tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2) tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2) # make sure that all ".json" files are saved in the correct format for file_path in tokenizer_r_files + tokenizer_p_files: if os.path.exists(file_path) and file_path.endswith(".json"): check_json_file_has_correct_format(file_path) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) tokenizer_r_files = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f) self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files) # Checks everything loads correctly in the same way tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2) tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(tokenizer_rp, key)) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(tmpdirname2) # Save tokenizer rust, legacy_format=True tmpdirname2 = tempfile.mkdtemp() tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=True) tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2) # Checks it save with the same files self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files) # Checks everything loads correctly in the same way tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2) tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(tokenizer_rp, key)) shutil.rmtree(tmpdirname2) # Save tokenizer rust, legacy_format=False tmpdirname2 = tempfile.mkdtemp() tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=False) tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) # Checks everything loads correctly in the same way tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2) tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(tokenizer_rp, key)) shutil.rmtree(tmpdirname2) def test_embeded_special_tokens(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) sentence = "A, <mask> AllenNLP sentence." tokens_r = tokenizer_r.encode_plus( sentence, add_special_tokens=True, ) tokens_p = tokenizer_p.encode_plus( sentence, add_special_tokens=True, ) for key in tokens_p.keys(): self.assertEqual(tokens_r[key], tokens_p[key]) if "token_type_ids" in tokens_r: self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"])) tokens_r = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"]) tokens_p = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"]) self.assertSequenceEqual(tokens_r, tokens_p) def test_compare_add_special_tokens(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) simple_num_special_tokens_to_add = tokenizer_r.num_special_tokens_to_add(pair=False) # pair_num_special_tokens_to_add = tokenizer_r.num_special_tokens_to_add(pair=True) for text in ["", " "]: # tokenize() no_special_tokens = tokenizer_r.tokenize(text, add_special_tokens=False) with_special_tokens = tokenizer_r.tokenize(text, add_special_tokens=True) self.assertEqual( len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add ) # encode() no_special_tokens = tokenizer_r.encode(text, add_special_tokens=False) with_special_tokens = tokenizer_r.encode(text, add_special_tokens=True) self.assertEqual( len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add ) # encode_plus() no_special_tokens = tokenizer_r.encode_plus(text, add_special_tokens=False) with_special_tokens = tokenizer_r.encode_plus(text, add_special_tokens=True) for key in no_special_tokens.keys(): self.assertEqual( len(no_special_tokens[key]), len(with_special_tokens[key]) - simple_num_special_tokens_to_add, ) # # batch_encode_plus no_special_tokens = tokenizer_r.batch_encode_plus([text, text], add_special_tokens=False) with_special_tokens = tokenizer_r.batch_encode_plus([text, text], add_special_tokens=True) for key in no_special_tokens.keys(): for i_no, i_with in zip(no_special_tokens[key], with_special_tokens[key]): self.assertEqual(len(i_no), len(i_with) - simple_num_special_tokens_to_add) def test_compare_prepare_for_model(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) string_sequence = "Asserting that both tokenizers are equal" python_output = tokenizer_p.prepare_for_model( tokenizer_p.encode(string_sequence, add_special_tokens=False) ) rust_output = tokenizer_r.prepare_for_model( tokenizer_r.encode(string_sequence, add_special_tokens=False) ) for key in python_output: self.assertEqual(python_output[key], rust_output[key]) def test_special_tokens_initialization(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): added_tokens = [AddedToken("<special>", lstrip=True)] tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) r_output = tokenizer_r.encode("Hey this is a <special> token") special_token_id = tokenizer_r.encode("<special>", add_special_tokens=False)[0] self.assertTrue(special_token_id in r_output) if self.test_slow_tokenizer: # in rust fast, you lose the information of the AddedToken when initializing with `additional_special_tokens` tokenizer_cr = self.rust_tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs, from_slow=True ) tokenizer_p = self.tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) p_output = tokenizer_p.encode("Hey this is a <special> token") cr_output = tokenizer_cr.encode("Hey this is a <special> token") self.assertEqual(p_output, r_output) self.assertEqual(cr_output, r_output) self.assertTrue(special_token_id in p_output) self.assertTrue(special_token_id in cr_output) def test_special_tokens_initialization_with_non_empty_additional_special_tokens(self): # This test no longer support rust tokenizers, because the only file that should be looked # at by the fast tokenizer with the new saving format is `tokenizer_config.json`. # The previous behaviour is very strange too. Fast tokenizer should not save 3 files, but just one. Can never do slow from fast. tokenizer_list = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer())) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(tmp_dir) # only legacy save will check this tokenizer_path = "tokenizer_config.json" with open(os.path.join(tmp_dir, tokenizer_path), encoding="utf-8") as json_file: tokenizer_config = json.load(json_file) tokenizer_config["additional_special_tokens"] = ["an_additional_special_token"] with open(os.path.join(tmp_dir, tokenizer_path), "w", encoding="utf-8") as outfile: json.dump(tokenizer_config, outfile) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files # TODO ArthurZ ... Ok so for legacy we have to support this I guess..... (special_tokens_map + additional) tokenizer_without_change_in_init = tokenizer_class.from_pretrained(tmp_dir) self.assertIn( "an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens ) self.assertIn("an_additional_special_token", tokenizer_without_change_in_init.get_vocab()) self.assertEqual( ["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"]) ), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained new_added_tokens = [AddedToken("a_new_additional_special_token", lstrip=True)] tokenizer = tokenizer_class.from_pretrained( tmp_dir, additional_special_tokens=new_added_tokens, ) self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens) self.assertEqual( ["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"]) ), ) def test_training_new_tokenizer(self): # This feature only exists for fast tokenizers if not self.test_rust_tokenizer: return tokenizer = self.get_rust_tokenizer() new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100) # Test we can use the new tokenizer with something not seen during training inputs = new_tokenizer(["This is the first sentence", "This sentence is different 🤗."]) self.assertEqual(len(inputs["input_ids"]), 2) decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True) expected_result = "This is the first sentence" if tokenizer.backend_tokenizer.normalizer is not None: expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result) self.assertEqual(expected_result, decoded_input) # We check that the parameters of the tokenizer remained the same # Check we have the same number of added_tokens for both pair and non-pair inputs. self.assertEqual(tokenizer.num_special_tokens_to_add(False), new_tokenizer.num_special_tokens_to_add(False)) self.assertEqual(tokenizer.num_special_tokens_to_add(True), new_tokenizer.num_special_tokens_to_add(True)) # Check we have the correct max_length for both pair and non-pair inputs. self.assertEqual(tokenizer.max_len_single_sentence, new_tokenizer.max_len_single_sentence) self.assertEqual(tokenizer.max_len_sentences_pair, new_tokenizer.max_len_sentences_pair) # Assert the set of special tokens match as we didn't ask to change them self.assertSequenceEqual( tokenizer.all_special_tokens_extended, new_tokenizer.all_special_tokens_extended, ) self.assertDictEqual(tokenizer.special_tokens_map, new_tokenizer.special_tokens_map) def test_training_new_tokenizer_with_special_tokens_change(self): # This feature only exists for fast tokenizers if not self.test_rust_tokenizer: return tokenizer = self.get_rust_tokenizer() # Test with a special tokens map class_signature = inspect.signature(tokenizer.__class__) if "cls_token" in class_signature.parameters: new_tokenizer = tokenizer.train_new_from_iterator( SMALL_TRAINING_CORPUS, 100, special_tokens_map={tokenizer.cls_token: "<cls>"} ) cls_id = new_tokenizer.get_vocab()["<cls>"] self.assertEqual(new_tokenizer.cls_token, "<cls>") self.assertEqual(new_tokenizer.cls_token_id, cls_id) # Create a new mapping from the special tokens defined in the original tokenizer special_tokens_list = SpecialTokensMixin.SPECIAL_TOKENS_ATTRIBUTES.copy() special_tokens_list.remove("additional_special_tokens") special_tokens_map = {} for token in special_tokens_list: # Get the private one to avoid unnecessary warnings. if getattr(tokenizer, f"_{token}") is not None: special_token = getattr(tokenizer, token) special_tokens_map[special_token] = f"{special_token}a" # Train new tokenizer new_tokenizer = tokenizer.train_new_from_iterator( SMALL_TRAINING_CORPUS, 100, special_tokens_map=special_tokens_map ) # Check the changes for token in special_tokens_list: # Get the private one to avoid unnecessary warnings. if getattr(tokenizer, f"_{token}") is None: continue special_token = getattr(tokenizer, token) if special_token in special_tokens_map: new_special_token = getattr(new_tokenizer, token) self.assertEqual(special_tokens_map[special_token], new_special_token) new_id = new_tokenizer.get_vocab()[new_special_token] self.assertEqual(getattr(new_tokenizer, f"{token}_id"), new_id) # Check if the AddedToken / string format has been kept for special_token in tokenizer.all_special_tokens_extended: if isinstance(special_token, AddedToken) and special_token.content not in special_tokens_map: # The special token must appear identically in the list of the new tokenizer. self.assertTrue( special_token in new_tokenizer.all_special_tokens_extended, f"'{special_token}' should be in {new_tokenizer.all_special_tokens_extended}", ) elif isinstance(special_token, AddedToken): # The special token must appear in the list of the new tokenizer as an object of type AddedToken with # the same parameters as the old AddedToken except the content that the user has requested to change. special_token_str = special_token.content new_special_token_str = special_tokens_map[special_token_str] find = False for candidate in new_tokenizer.all_special_tokens_extended: if ( isinstance(candidate, AddedToken) and candidate.content == new_special_token_str and candidate.lstrip == special_token.lstrip and candidate.rstrip == special_token.rstrip and candidate.normalized == special_token.normalized and candidate.single_word == special_token.single_word ): find = True break special_token.content = new_special_token_str self.assertTrue( find, f"'{special_token.__repr__()}' should appear as an `AddedToken` in the all_special_tokens_extended = " f"{[k for k in new_tokenizer.all_special_tokens_extended if str(k)==new_special_token_str]} but it is missing" ", this means that the new tokenizers did not keep the `rstrip`, `lstrip`, `normalized` etc attributes.", ) elif special_token not in special_tokens_map: # The special token must appear identically in the list of the new tokenizer. self.assertTrue( special_token in new_tokenizer.all_special_tokens_extended, f"'{special_token.__repr__()}' should be in {new_tokenizer.all_special_tokens_extended}", ) else: # The special token must appear in the list of the new tokenizer as an object of type string. self.assertTrue(special_tokens_map[special_token] in new_tokenizer.all_special_tokens_extended) # Test we can use the new tokenizer with something not seen during training inputs = new_tokenizer(["This is the first sentence", "This sentence is different 🤗."]) self.assertEqual(len(inputs["input_ids"]), 2) decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True) expected_result = "This is the first sentence" if tokenizer.backend_tokenizer.normalizer is not None: expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result) self.assertEqual(expected_result, decoded_input) def test_tokenizer_mismatch_warning(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): with self.assertLogs("transformers", level="WARNING") as cm: try: if self.tokenizer_class == BertTokenizer: AlbertTokenizer.from_pretrained(pretrained_name) else: BertTokenizer.from_pretrained(pretrained_name) except EnvironmentError as e: # Some tokenizer will raised an error before reaching the logged warning because there are no # corresponding files to load error_message = str(e) except (TypeError, AttributeError): # Some tokenizers cannot be loaded into the target tokenizer at all and errors are returned, # here we just check that the warning has been logged before the error is raised pass finally: logged_msg_target = ( "The tokenizer class you load from this checkpoint is not the same type as the class " "this function is called from." ) raised_error_msg_target = "Can't load tokenizer for" self.assertTrue( cm.records[0].message.startswith(logged_msg_target) if len(cm.records) > 0 else False or raised_error_msg_target in error_message ) try: if self.rust_tokenizer_class == BertTokenizerFast: AlbertTokenizerFast.from_pretrained(pretrained_name) else: BertTokenizerFast.from_pretrained(pretrained_name) except (TypeError, AttributeError): # Some tokenizers cannot be loaded into the target tokenizer at all and errors are returned, # here we just check that the warning has been logged before the error is raised pass finally: self.assertTrue( cm.records[0].message.startswith( "The tokenizer class you load from this checkpoint is not the same type as the class" " this function is called from." ) ) @require_torch def test_saving_tokenizer_trainer(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): with tempfile.TemporaryDirectory() as tmp_dir: # Save the fast tokenizer files in a temporary directory tokenizer_old = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs, use_fast=True) tokenizer_old.save_pretrained(tmp_dir, legacy_format=False) # save only fast version # Initialize toy model for the trainer model = nn.Module() # Load tokenizer from a folder without legacy files tokenizer = self.rust_tokenizer_class.from_pretrained(tmp_dir) training_args = TrainingArguments(output_dir=tmp_dir, do_train=True, no_cuda=True) trainer = Trainer(model=model, args=training_args, tokenizer=tokenizer) # Should not raise an error trainer.save_model(os.path.join(tmp_dir, "checkpoint")) self.assertIn("tokenizer.json", os.listdir(os.path.join(tmp_dir, "checkpoint"))) def test_convert_tokens_to_string_format(self): tokenizers = self.get_tokenizers(fast=True, do_lower_case=True) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): tokens = ["this", "is", "a", "test"] string = tokenizer.convert_tokens_to_string(tokens) self.assertIsInstance(string, str) def test_save_slow_from_fast_and_reload_fast(self): if not self.test_slow_tokenizer or not self.test_rust_tokenizer: # we need both slow and fast versions return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): with tempfile.TemporaryDirectory() as tmp_dir_1: # Here we check that even if we have initialized a fast tokenizer with a tokenizer_file we can # still save only the slow version and use these saved files to rebuild a tokenizer tokenizer_fast_old_1 = self.rust_tokenizer_class.from_pretrained( pretrained_name, **kwargs, use_fast=True ) tokenizer_file = os.path.join(tmp_dir_1, "tokenizer.json") tokenizer_fast_old_1.backend_tokenizer.save(tokenizer_file) tokenizer_fast_old_2 = self.rust_tokenizer_class.from_pretrained( pretrained_name, **kwargs, use_fast=True, tokenizer_file=tokenizer_file ) tokenizer_fast_old_2.save_pretrained(tmp_dir_1, legacy_format=True) # save only slow version tokenizer_slow = self.tokenizer_class.from_pretrained(tmp_dir_1) with tempfile.TemporaryDirectory() as tmp_dir_2: tokenizer_slow.save_pretrained(tmp_dir_2) # Should not raise an error self.rust_tokenizer_class.from_pretrained(tmp_dir_2) # TODO This is ran for all models but only tests bert... def test_clean_up_tokenization_spaces(self): tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") assert tokenizer.clean_up_tokenization_spaces is True tokens = tokenizer.encode("This shouldn't be! He'll go.") decoded = tokenizer.decode(tokens) assert decoded == "[CLS] this shouldn't be! he'll go. [SEP]" tokenizer.clean_up_tokenization_spaces = False decoded = tokenizer.decode(tokens) assert decoded == "[CLS] this shouldn ' t be ! he ' ll go . [SEP]" assert decoded == tokenizer.decode(tokens, clean_up_tokenization_spaces=False) # Fast from slow with tempfile.TemporaryDirectory() as tmp_dir_2: tokenizer.save_pretrained(tmp_dir_2) tokenizer_fast = BertTokenizerFast.from_pretrained(tmp_dir_2) del tokenizer assert tokenizer_fast.clean_up_tokenization_spaces is False decoded = tokenizer_fast.decode(tokens) # fast and slow don't have the same output when we don't cleanup # tokenization space. Here `be!` vs `be !` and `go.` vs `go .` assert decoded == "[CLS] this shouldn ' t be! he ' ll go. [SEP]" tokenizer_fast.clean_up_tokenization_spaces = True assert tokenizer_fast.clean_up_tokenization_spaces is True decoded = tokenizer_fast.decode(tokens) assert decoded == "[CLS] this shouldn't be! he'll go. [SEP]" # Slow from fast with tempfile.TemporaryDirectory() as tmp_dir_2: tokenizer_fast.clean_up_tokenization_spaces = False tokenizer_fast.save_pretrained(tmp_dir_2) tokenizer = BertTokenizer.from_pretrained(tmp_dir_2) assert tokenizer.clean_up_tokenization_spaces is False decoded = tokenizer.decode(tokens) assert decoded == "[CLS] this shouldn ' t be ! he ' ll go . [SEP]" tokenizer.clean_up_tokenization_spaces = True decoded = tokenizer.decode(tokens) assert decoded == "[CLS] this shouldn't be! he'll go. [SEP]" def test_split_special_tokens(self): if not self.test_slow_tokenizer: return for tokenizer, pretrained_name, kwargs in self.tokenizers_list: special_token = "[SPECIAL_TOKEN]" with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) if not tokenizer.is_fast: # bloom, gptneox etc only have a fast tokenizer.add_special_tokens( { "additional_special_tokens": [ AddedToken(special_token, rstrip=True, lstrip=True, normalized=True, special=True) ] } ) encoded_special_token = tokenizer.encode(special_token, add_special_tokens=False) self.assertEqual(len(encoded_special_token), 1) encoded_split_special_token = tokenizer.encode( special_token, add_special_tokens=False, split_special_tokens=True ) if len(encoded_split_special_token) == 1: # if we have subword tokenization or special vocab self.assertTrue( encoded_split_special_token[0] != tokenizer.convert_tokens_to_ids(special_token) ) else: self.assertTrue(len(encoded_split_special_token) > 1) def test_added_tokens_serialization(self): # Utility to test the added vocab def _test_added_vocab_and_eos(expected, tokenizer_class, expected_eos, temp_dir): tokenizer = tokenizer_class.from_pretrained(temp_dir) self.assertTrue(str(expected_eos) not in tokenizer.additional_special_tokens) self.assertIn(new_eos, tokenizer.added_tokens_decoder.values()) self.assertEqual(tokenizer.added_tokens_decoder[tokenizer.eos_token_id], new_eos) self.assertDictEqual(expected, tokenizer.added_tokens_decoder) return tokenizer new_eos = AddedToken("[NEW_EOS]", rstrip=False, lstrip=True, normalized=False, special=True) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): # Load a slow tokenizer from the hub, init with the new token for fast to also include it tokenizer = self.tokenizer_class.from_pretrained(pretrained_name, eos_token=new_eos) EXPECTED_ADDED_TOKENS_DECODER = tokenizer.added_tokens_decoder with self.subTest("Hub -> Slow: Test loading a slow tokenizer from the hub)"): self.assertEqual(tokenizer._eos_token, new_eos) self.assertIn(new_eos, list(tokenizer.added_tokens_decoder.values())) with tempfile.TemporaryDirectory() as tmp_dir_2: tokenizer.save_pretrained(tmp_dir_2) with self.subTest( "Hub -> Slow -> Slow: Test saving this slow tokenizer and reloading it in the fast class" ): _test_added_vocab_and_eos( EXPECTED_ADDED_TOKENS_DECODER, self.tokenizer_class, new_eos, tmp_dir_2 ) if self.rust_tokenizer_class is not None: with self.subTest( "Hub -> Slow -> Fast: Test saving this slow tokenizer and reloading it in the fast class" ): tokenizer_fast = _test_added_vocab_and_eos( EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_2 ) with tempfile.TemporaryDirectory() as tmp_dir_3: tokenizer_fast.save_pretrained(tmp_dir_3) with self.subTest( "Hub -> Slow -> Fast -> Fast: Test saving this fast tokenizer and reloading it in the fast class" ): _test_added_vocab_and_eos( EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_3 ) with self.subTest( "Hub -> Slow -> Fast -> Slow: Test saving this slow tokenizer and reloading it in the slow class" ): _test_added_vocab_and_eos( EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_3 ) with self.subTest("Hub -> Fast: Test loading a fast tokenizer from the hub)"): if self.rust_tokenizer_class is not None: tokenizer_fast = self.rust_tokenizer_class.from_pretrained(pretrained_name, eos_token=new_eos) self.assertEqual(tokenizer_fast._eos_token, new_eos) self.assertIn(new_eos, list(tokenizer_fast.added_tokens_decoder.values())) # We can't test the following because for BC we kept the default rstrip lstrip in slow not fast. Will comment once normalization is alright with self.subTest("Hub -> Fast == Hub -> Slow: make sure slow and fast tokenizer match"): self.assertDictEqual(EXPECTED_ADDED_TOKENS_DECODER, tokenizer_fast.added_tokens_decoder) EXPECTED_ADDED_TOKENS_DECODER = tokenizer_fast.added_tokens_decoder with tempfile.TemporaryDirectory() as tmp_dir_4: tokenizer_fast.save_pretrained(tmp_dir_4) with self.subTest("Hub -> Fast -> Fast: saving Fast1 locally and loading"): _test_added_vocab_and_eos( EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_4 ) with self.subTest("Hub -> Fast -> Slow: saving Fast1 locally and loading"): _test_added_vocab_and_eos( EXPECTED_ADDED_TOKENS_DECODER, self.tokenizer_class, new_eos, tmp_dir_4 ) def test_special_token_addition(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): # Create tokenizer and add an additional special token tokenizer_1 = tokenizer.from_pretrained(pretrained_name) tokenizer_1.add_special_tokens({"additional_special_tokens": ["<tok>"]}) self.assertEqual(tokenizer_1.additional_special_tokens, ["<tok>"]) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_1.save_pretrained(tmp_dir) # Load the above tokenizer and add the same special token a second time tokenizer_2 = tokenizer.from_pretrained(pretrained_name) tokenizer_2.add_special_tokens({"additional_special_tokens": ["<tok>"]}) self.assertEqual(tokenizer_2.additional_special_tokens, ["<tok>"]) tokenizer_2.add_special_tokens({"additional_special_tokens": ["<tok>", "<other>"]}) self.assertEqual(tokenizer_2.additional_special_tokens, ["<tok>", "<other>"]) tokenizer_2.add_special_tokens({"additional_special_tokens": ["<other>", "<another>"]}) self.assertEqual(tokenizer_2.additional_special_tokens, ["<other>", "<another>"]) tokenizer_2.add_special_tokens( {"additional_special_tokens": ["<tok>"]}, replace_additional_special_tokens=False, ) self.assertEqual(tokenizer_2.additional_special_tokens, ["<other>", "<another>", "<tok>"])
0
hf_public_repos/transformers
hf_public_repos/transformers/tests/test_sequence_feature_extraction_common.py
# coding=utf-8 # Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class SequenceFeatureExtractionTestMixin(FeatureExtractionSavingTestMixin): # to overwrite at feature extractactor specific tests feat_extract_tester = None feature_extraction_class = None @property def feat_extract_dict(self): return self.feat_extract_tester.prepare_feat_extract_dict() def test_feat_extract_common_properties(self): feat_extract = self.feature_extraction_class(**self.feat_extract_dict) self.assertTrue(hasattr(feat_extract, "feature_size")) self.assertTrue(hasattr(feat_extract, "sampling_rate")) self.assertTrue(hasattr(feat_extract, "padding_value")) def test_batch_feature(self): speech_inputs = self.feat_extract_tester.prepare_inputs_for_common() feat_extract = self.feature_extraction_class(**self.feat_extract_dict) input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}) self.assertTrue(all(len(x) == len(y) for x, y in zip(speech_inputs, processed_features[input_name]))) speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(equal_length=True) processed_features = BatchFeature({input_name: speech_inputs}, tensor_type="np") batch_features_input = processed_features[input_name] if len(batch_features_input.shape) < 3: batch_features_input = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size) ) @require_torch def test_batch_feature_pt(self): speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(equal_length=True) feat_extract = self.feature_extraction_class(**self.feat_extract_dict) input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}, tensor_type="pt") batch_features_input = processed_features[input_name] if len(batch_features_input.shape) < 3: batch_features_input = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size) ) @require_tf def test_batch_feature_tf(self): speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(equal_length=True) feat_extract = self.feature_extraction_class(**self.feat_extract_dict) input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}, tensor_type="tf") batch_features_input = processed_features[input_name] if len(batch_features_input.shape) < 3: batch_features_input = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size) ) def _check_padding(self, numpify=False): def _inputs_have_equal_length(input): length = len(input[0]) for input_slice in input[1:]: if len(input_slice) != length: return False return True def _inputs_are_equal(input_1, input_2): if len(input_1) != len(input_2): return False for input_slice_1, input_slice_2 in zip(input_1, input_2): if not np.allclose(np.asarray(input_slice_1), np.asarray(input_slice_2), atol=1e-3): return False return True feat_extract = self.feature_extraction_class(**self.feat_extract_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(numpify=numpify) input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}) pad_diff = self.feat_extract_tester.seq_length_diff pad_max_length = self.feat_extract_tester.max_seq_length + pad_diff pad_min_length = self.feat_extract_tester.min_seq_length batch_size = self.feat_extract_tester.batch_size feature_size = self.feat_extract_tester.feature_size # test padding for List[int] + numpy input_1 = feat_extract.pad(processed_features, padding=False) input_1 = input_1[input_name] input_2 = feat_extract.pad(processed_features, padding="longest") input_2 = input_2[input_name] input_3 = feat_extract.pad(processed_features, padding="max_length", max_length=len(speech_inputs[-1])) input_3 = input_3[input_name] input_4 = feat_extract.pad(processed_features, padding="longest", return_tensors="np") input_4 = input_4[input_name] # max_length parameter has to be provided when setting `padding="max_length"` with self.assertRaises(ValueError): feat_extract.pad(processed_features, padding="max_length")[input_name] input_5 = feat_extract.pad( processed_features, padding="max_length", max_length=pad_max_length, return_tensors="np" ) input_5 = input_5[input_name] self.assertFalse(_inputs_have_equal_length(input_1)) self.assertTrue(_inputs_have_equal_length(input_2)) self.assertTrue(_inputs_have_equal_length(input_3)) self.assertTrue(_inputs_are_equal(input_2, input_3)) self.assertTrue(len(input_1[0]) == pad_min_length) self.assertTrue(len(input_1[1]) == pad_min_length + pad_diff) self.assertTrue(input_4.shape[:2] == (batch_size, len(input_3[0]))) self.assertTrue(input_5.shape[:2] == (batch_size, pad_max_length)) if feature_size > 1: self.assertTrue(input_4.shape[2] == input_5.shape[2] == feature_size) # test padding for `pad_to_multiple_of` for List[int] + numpy input_6 = feat_extract.pad(processed_features, pad_to_multiple_of=10) input_6 = input_6[input_name] input_7 = feat_extract.pad(processed_features, padding="longest", pad_to_multiple_of=10) input_7 = input_7[input_name] input_8 = feat_extract.pad( processed_features, padding="max_length", pad_to_multiple_of=10, max_length=pad_max_length ) input_8 = input_8[input_name] input_9 = feat_extract.pad( processed_features, padding="max_length", pad_to_multiple_of=10, max_length=pad_max_length, return_tensors="np", ) input_9 = input_9[input_name] self.assertTrue(all(len(x) % 10 == 0 for x in input_6)) self.assertTrue(_inputs_are_equal(input_6, input_7)) expected_mult_pad_length = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10 self.assertTrue(all(len(x) == expected_mult_pad_length for x in input_8)) self.assertEqual(input_9.shape[:2], (batch_size, expected_mult_pad_length)) if feature_size > 1: self.assertTrue(input_9.shape[2] == feature_size) # Check padding value is correct padding_vector_sum = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_2[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1e-3 ) self.assertTrue( abs( np.asarray(input_2[1])[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) ) < 1e-3 ) self.assertTrue( abs( np.asarray(input_2[2])[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) ) < 1e-3 ) self.assertTrue( abs(input_5[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1e-3 ) self.assertTrue( abs(input_9[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length)) < 1e-3 ) def _check_truncation(self, numpify=False): def _inputs_have_equal_length(input): length = len(input[0]) for input_slice in input[1:]: if len(input_slice) != length: return False return True def _inputs_are_equal(input_1, input_2): if len(input_1) != len(input_2): return False for input_slice_1, input_slice_2 in zip(input_1, input_2): if not np.allclose(np.asarray(input_slice_1), np.asarray(input_slice_2), atol=1e-3): return False return True feat_extract = self.feature_extraction_class(**self.feat_extract_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(numpify=numpify) input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}) # truncate to smallest input_1 = feat_extract.pad( processed_features, padding="max_length", max_length=len(speech_inputs[0]), truncation=True ) input_1 = input_1[input_name] input_2 = feat_extract.pad(processed_features, padding="max_length", max_length=len(speech_inputs[0])) input_2 = input_2[input_name] self.assertTrue(_inputs_have_equal_length(input_1)) self.assertFalse(_inputs_have_equal_length(input_2)) # truncate to smallest with np input_3 = feat_extract.pad( processed_features, padding="max_length", max_length=len(speech_inputs[0]), return_tensors="np", truncation=True, ) input_3 = input_3[input_name] input_4 = feat_extract.pad( processed_features, padding="max_length", max_length=len(speech_inputs[0]), return_tensors="np" ) input_4 = input_4[input_name] self.assertTrue(_inputs_have_equal_length(input_3)) self.assertTrue(input_3.shape[1] == len(speech_inputs[0])) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(input_4)) # truncate to middle input_5 = feat_extract.pad( processed_features, padding="max_length", max_length=len(speech_inputs[1]), truncation=True, return_tensors="np", ) input_5 = input_5[input_name] input_6 = feat_extract.pad( processed_features, padding="max_length", max_length=len(speech_inputs[1]), truncation=True ) input_6 = input_6[input_name] input_7 = feat_extract.pad( processed_features, padding="max_length", max_length=len(speech_inputs[1]), return_tensors="np" ) input_7 = input_7[input_name] self.assertTrue(input_5.shape[1] == len(speech_inputs[1])) self.assertTrue(_inputs_have_equal_length(input_5)) self.assertTrue(_inputs_have_equal_length(input_6)) self.assertTrue(_inputs_are_equal(input_5, input_6)) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(input_7)) self.assertTrue(len(input_7[-1]) == len(speech_inputs[-1])) # padding has to be max_length when setting `truncation=True` with self.assertRaises(ValueError): feat_extract.pad(processed_features, truncation=True)[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(ValueError): feat_extract.pad(processed_features, padding="longest", truncation=True)[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(ValueError): feat_extract.pad(processed_features, padding="longest", truncation=True)[input_name] # max_length parameter has to be provided when setting `truncation=True` and padding="max_length" with self.assertRaises(ValueError): feat_extract.pad(processed_features, padding="max_length", truncation=True)[input_name] # test truncation for `pad_to_multiple_of` for List[int] + numpy pad_to_multiple_of = 12 input_8 = feat_extract.pad( processed_features, padding="max_length", max_length=len(speech_inputs[0]), pad_to_multiple_of=pad_to_multiple_of, truncation=True, ) input_8 = input_8[input_name] input_9 = feat_extract.pad( processed_features, padding="max_length", max_length=len(speech_inputs[0]), pad_to_multiple_of=pad_to_multiple_of, ) input_9 = input_9[input_name] # retrieve expected_length as multiple of pad_to_multiple_of expected_length = len(speech_inputs[0]) if expected_length % pad_to_multiple_of != 0: expected_length = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_8[0]) == expected_length) self.assertTrue(_inputs_have_equal_length(input_8)) self.assertFalse(_inputs_have_equal_length(input_9)) def test_padding_from_list(self): self._check_padding(numpify=False) def test_padding_from_array(self): self._check_padding(numpify=True) def test_truncation_from_list(self): self._check_truncation(numpify=False) def test_truncation_from_array(self): self._check_truncation(numpify=True) @require_torch def test_padding_accepts_tensors_pt(self): feat_extract = self.feature_extraction_class(**self.feat_extract_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_common() input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}) input_np = feat_extract.pad(processed_features, padding="longest", return_tensors="np")[input_name] input_pt = feat_extract.pad(processed_features, padding="longest", return_tensors="pt")[input_name] self.assertTrue(abs(input_np.astype(np.float32).sum() - input_pt.numpy().astype(np.float32).sum()) < 1e-2) @require_tf def test_padding_accepts_tensors_tf(self): feat_extract = self.feature_extraction_class(**self.feat_extract_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_common() input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}) input_np = feat_extract.pad(processed_features, padding="longest", return_tensors="np")[input_name] input_tf = feat_extract.pad(processed_features, padding="longest", return_tensors="tf")[input_name] self.assertTrue(abs(input_np.astype(np.float32).sum() - input_tf.numpy().astype(np.float32).sum()) < 1e-2) def test_attention_mask(self): feat_dict = self.feat_extract_dict feat_dict["return_attention_mask"] = True feat_extract = self.feature_extraction_class(**feat_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_common() input_lengths = [len(x) for x in speech_inputs] input_name = feat_extract.model_input_names[0] processed = BatchFeature({input_name: speech_inputs}) processed = feat_extract.pad(processed, padding="longest", return_tensors="np") self.assertIn("attention_mask", processed) self.assertListEqual(list(processed.attention_mask.shape), list(processed[input_name].shape[:2])) self.assertListEqual(processed.attention_mask.sum(-1).tolist(), input_lengths) def test_attention_mask_with_truncation(self): feat_dict = self.feat_extract_dict feat_dict["return_attention_mask"] = True feat_extract = self.feature_extraction_class(**feat_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_common() input_lengths = [len(x) for x in speech_inputs] input_name = feat_extract.model_input_names[0] processed = BatchFeature({input_name: speech_inputs}) max_length = min(input_lengths) processed_pad = feat_extract.pad( processed, padding="max_length", max_length=max_length, truncation=True, return_tensors="np" ) self.assertIn("attention_mask", processed_pad) self.assertListEqual( list(processed_pad.attention_mask.shape), [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1).tolist(), [max_length for x in speech_inputs] )
0
hf_public_repos/transformers
hf_public_repos/transformers/tests/test_configuration_utils.py
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPT2Config from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 config_common_kwargs = { "return_dict": False, "output_hidden_states": True, "output_attentions": True, "torchscript": True, "torch_dtype": "float16", "use_bfloat16": True, "tf_legacy_loss": True, "pruned_heads": {"a": 1}, "tie_word_embeddings": False, "is_decoder": True, "cross_attention_hidden_size": 128, "add_cross_attention": True, "tie_encoder_decoder": True, "max_length": 50, "min_length": 3, "do_sample": True, "early_stopping": True, "num_beams": 3, "num_beam_groups": 3, "diversity_penalty": 0.5, "temperature": 2.0, "top_k": 10, "top_p": 0.7, "typical_p": 0.2, "repetition_penalty": 0.8, "length_penalty": 0.8, "no_repeat_ngram_size": 5, "encoder_no_repeat_ngram_size": 5, "bad_words_ids": [1, 2, 3], "num_return_sequences": 3, "chunk_size_feed_forward": 5, "output_scores": True, "return_dict_in_generate": True, "forced_bos_token_id": 2, "forced_eos_token_id": 3, "remove_invalid_values": True, "architectures": ["BertModel"], "finetuning_task": "translation", "id2label": {0: "label"}, "label2id": {"label": "0"}, "tokenizer_class": "BertTokenizerFast", "prefix": "prefix", "bos_token_id": 6, "pad_token_id": 7, "eos_token_id": 8, "sep_token_id": 9, "decoder_start_token_id": 10, "exponential_decay_length_penalty": (5, 1.01), "suppress_tokens": [0, 1], "begin_suppress_tokens": 2, "task_specific_params": {"translation": "some_params"}, "problem_type": "regression", } @is_staging_test class ConfigPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-config") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-config-org") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-dynamic-config") except HTTPError: pass def test_push_to_hub(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) config.push_to_hub("test-config", token=self._token) new_config = BertConfig.from_pretrained(f"{USER}/test-config") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) # Reset repo delete_repo(token=self._token, repo_id="test-config") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(tmp_dir, repo_id="test-config", push_to_hub=True, token=self._token) new_config = BertConfig.from_pretrained(f"{USER}/test-config") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) def test_push_to_hub_in_organization(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) config.push_to_hub("valid_org/test-config-org", token=self._token) new_config = BertConfig.from_pretrained("valid_org/test-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) # Reset repo delete_repo(token=self._token, repo_id="valid_org/test-config-org") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(tmp_dir, repo_id="valid_org/test-config-org", push_to_hub=True, token=self._token) new_config = BertConfig.from_pretrained("valid_org/test-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) def test_push_to_hub_dynamic_config(self): CustomConfig.register_for_auto_class() config = CustomConfig(attribute=42) config.push_to_hub("test-dynamic-config", token=self._token) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map, {"AutoConfig": "custom_configuration.CustomConfig"}) new_config = AutoConfig.from_pretrained(f"{USER}/test-dynamic-config", trust_remote_code=True) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__, "CustomConfig") self.assertEqual(new_config.attribute, 42) class ConfigTestUtils(unittest.TestCase): def test_config_from_string(self): c = GPT2Config() # attempt to modify each of int/float/bool/str config records and verify they were updated n_embd = c.n_embd + 1 # int resid_pdrop = c.resid_pdrop + 1.0 # float scale_attn_weights = not c.scale_attn_weights # bool summary_type = c.summary_type + "foo" # str c.update_from_string( f"n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}" ) self.assertEqual(n_embd, c.n_embd, "mismatch for key: n_embd") self.assertEqual(resid_pdrop, c.resid_pdrop, "mismatch for key: resid_pdrop") self.assertEqual(scale_attn_weights, c.scale_attn_weights, "mismatch for key: scale_attn_weights") self.assertEqual(summary_type, c.summary_type, "mismatch for key: summary_type") def test_config_common_kwargs_is_complete(self): base_config = PretrainedConfig() missing_keys = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( missing_keys, [ "is_encoder_decoder", "_name_or_path", "_commit_hash", "_attn_implementation_internal", "transformers_version", ], ) keys_with_defaults = [key for key, value in config_common_kwargs.items() if value == getattr(base_config, key)] if len(keys_with_defaults) > 0: raise ValueError( "The following keys are set with the default values in" " `test_configuration_common.config_common_kwargs` pick another value for them:" f" {', '.join(keys_with_defaults)}." ) def test_nested_config_load_from_dict(self): config = AutoConfig.from_pretrained( "hf-internal-testing/tiny-random-CLIPModel", text_config={"num_hidden_layers": 2} ) self.assertNotIsInstance(config.text_config, dict) self.assertEqual(config.text_config.__class__.__name__, "CLIPTextConfig") def test_from_pretrained_subfolder(self): with self.assertRaises(OSError): # config is in subfolder, the following should not work without specifying the subfolder _ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder") config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder", subfolder="bert") self.assertIsNotNone(config) def test_cached_files_are_used_when_internet_is_down(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") # This check we did call the fake head request mock_head.assert_called() def test_legacy_load_from_url(self): # This test is for deprecated behavior and can be removed in v5 _ = BertConfig.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" ) def test_local_versioning(self): configuration = AutoConfig.from_pretrained("bert-base-cased") configuration.configuration_files = ["config.4.0.0.json"] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(tmp_dir) configuration.hidden_size = 2 json.dump(configuration.to_dict(), open(os.path.join(tmp_dir, "config.4.0.0.json"), "w")) # This should pick the new configuration file as the version of Transformers is > 4.0.0 new_configuration = AutoConfig.from_pretrained(tmp_dir) self.assertEqual(new_configuration.hidden_size, 2) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 configuration.configuration_files = ["config.42.0.0.json"] configuration.hidden_size = 768 configuration.save_pretrained(tmp_dir) shutil.move(os.path.join(tmp_dir, "config.4.0.0.json"), os.path.join(tmp_dir, "config.42.0.0.json")) new_configuration = AutoConfig.from_pretrained(tmp_dir) self.assertEqual(new_configuration.hidden_size, 768) def test_repo_versioning_before(self): # This repo has two configuration files, one for v4.0.0 and above with a different hidden size. repo = "hf-internal-testing/test-two-configs" import transformers as new_transformers new_transformers.configuration_utils.__version__ = "v4.0.0" new_configuration, kwargs = new_transformers.models.auto.AutoConfig.from_pretrained( repo, return_unused_kwargs=True ) self.assertEqual(new_configuration.hidden_size, 2) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(kwargs, {}) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers old_transformers.configuration_utils.__version__ = "v3.0.0" old_configuration = old_transformers.models.auto.AutoConfig.from_pretrained(repo) self.assertEqual(old_configuration.hidden_size, 768) def test_saving_config_with_custom_generation_kwargs_raises_warning(self): config = BertConfig(min_length=3) # `min_length = 3` is a non-default generation kwarg with tempfile.TemporaryDirectory() as tmp_dir: with self.assertLogs("transformers.configuration_utils", level="WARNING") as logs: config.save_pretrained(tmp_dir) self.assertEqual(len(logs.output), 1) self.assertIn("min_length", logs.output[0]) def test_has_non_default_generation_parameters(self): config = BertConfig() self.assertFalse(config._has_non_default_generation_parameters()) config = BertConfig(min_length=3) self.assertTrue(config._has_non_default_generation_parameters()) config = BertConfig(min_length=0) # `min_length = 0` is a default generation kwarg self.assertFalse(config._has_non_default_generation_parameters())
0
hf_public_repos/transformers
hf_public_repos/transformers/tests/test_modeling_flax_utils.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo, snapshot_download from requests.exceptions import HTTPError from transformers import BertConfig, BertModel, is_flax_available, is_torch_available from transformers.testing_utils import ( TOKEN, USER, is_pt_flax_cross_test, is_staging_test, require_flax, require_safetensors, require_torch, ) from transformers.utils import FLAX_WEIGHTS_NAME, SAFE_WEIGHTS_NAME if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "0.12" # assumed parallelism: 8 @require_flax @is_staging_test class FlaxModelPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-model-flax") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-model-flax-org") except HTTPError: pass def test_push_to_hub(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = FlaxBertModel(config) model.push_to_hub("test-model-flax", token=self._token) new_model = FlaxBertModel.from_pretrained(f"{USER}/test-model-flax") base_params = flatten_dict(unfreeze(model.params)) new_params = flatten_dict(unfreeze(new_model.params)) for key in base_params.keys(): max_diff = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # Reset repo delete_repo(token=self._token, repo_id="test-model-flax") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, repo_id="test-model-flax", push_to_hub=True, token=self._token) new_model = FlaxBertModel.from_pretrained(f"{USER}/test-model-flax") base_params = flatten_dict(unfreeze(model.params)) new_params = flatten_dict(unfreeze(new_model.params)) for key in base_params.keys(): max_diff = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_push_to_hub_in_organization(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = FlaxBertModel(config) model.push_to_hub("valid_org/test-model-flax-org", token=self._token) new_model = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org") base_params = flatten_dict(unfreeze(model.params)) new_params = flatten_dict(unfreeze(new_model.params)) for key in base_params.keys(): max_diff = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # Reset repo delete_repo(token=self._token, repo_id="valid_org/test-model-flax-org") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( tmp_dir, repo_id="valid_org/test-model-flax-org", push_to_hub=True, token=self._token ) new_model = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org") base_params = flatten_dict(unfreeze(model.params)) new_params = flatten_dict(unfreeze(new_model.params)) for key in base_params.keys(): max_diff = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def check_models_equal(model1, model2): models_are_equal = True flat_params_1 = flatten_dict(model1.params) flat_params_2 = flatten_dict(model2.params) for key in flat_params_1.keys(): if np.sum(np.abs(flat_params_1[key] - flat_params_2[key])) > 1e-4: models_are_equal = False return models_are_equal @require_flax class FlaxModelUtilsTest(unittest.TestCase): def test_model_from_pretrained_subfolder(self): config = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only") model = FlaxBertModel(config) subfolder = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(tmp_dir, subfolder)) with self.assertRaises(OSError): _ = FlaxBertModel.from_pretrained(tmp_dir) model_loaded = FlaxBertModel.from_pretrained(tmp_dir, subfolder=subfolder) self.assertTrue(check_models_equal(model, model_loaded)) def test_model_from_pretrained_subfolder_sharded(self): config = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only") model = FlaxBertModel(config) subfolder = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(tmp_dir, subfolder), max_shard_size="10KB") with self.assertRaises(OSError): _ = FlaxBertModel.from_pretrained(tmp_dir) model_loaded = FlaxBertModel.from_pretrained(tmp_dir, subfolder=subfolder) self.assertTrue(check_models_equal(model, model_loaded)) def test_model_from_pretrained_hub_subfolder(self): subfolder = "bert" model_id = "hf-internal-testing/tiny-random-bert-subfolder" with self.assertRaises(OSError): _ = FlaxBertModel.from_pretrained(model_id) model = FlaxBertModel.from_pretrained(model_id, subfolder=subfolder) self.assertIsNotNone(model) def test_model_from_pretrained_hub_subfolder_sharded(self): subfolder = "bert" model_id = "hf-internal-testing/tiny-random-bert-sharded-subfolder" with self.assertRaises(OSError): _ = FlaxBertModel.from_pretrained(model_id) model = FlaxBertModel.from_pretrained(model_id, subfolder=subfolder) self.assertIsNotNone(model) @require_safetensors def test_safetensors_save_and_load(self): model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) # No msgpack file, only a model.safetensors self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, FLAX_WEIGHTS_NAME))) new_model = FlaxBertModel.from_pretrained(tmp_dir) self.assertTrue(check_models_equal(model, new_model)) @require_flax @require_torch @is_pt_flax_cross_test def test_safetensors_save_and_load_pt_to_flax(self): model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-random-bert", from_pt=True) pt_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: pt_model.save_pretrained(tmp_dir) # Check we have a model.safetensors file self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) new_model = FlaxBertModel.from_pretrained(tmp_dir) # Check models are equal self.assertTrue(check_models_equal(model, new_model)) @require_safetensors def test_safetensors_load_from_hub(self): """ This test checks that we can load safetensors from a checkpoint that only has those on the Hub """ flax_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") # Can load from the Flax-formatted checkpoint safetensors_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-safetensors-only") self.assertTrue(check_models_equal(flax_model, safetensors_model)) @require_safetensors def test_safetensors_load_from_local(self): """ This test checks that we can load safetensors from a checkpoint that only has those on the Hub """ with tempfile.TemporaryDirectory() as tmp: location = snapshot_download("hf-internal-testing/tiny-bert-flax-only", cache_dir=tmp) flax_model = FlaxBertModel.from_pretrained(location) with tempfile.TemporaryDirectory() as tmp: location = snapshot_download("hf-internal-testing/tiny-bert-flax-safetensors-only", cache_dir=tmp) safetensors_model = FlaxBertModel.from_pretrained(location) self.assertTrue(check_models_equal(flax_model, safetensors_model)) @require_torch @require_safetensors @is_pt_flax_cross_test def test_safetensors_load_from_hub_from_safetensors_pt(self): """ This test checks that we can load safetensors from a checkpoint that only has those on the Hub. saved in the "pt" format. """ flax_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-msgpack") # Can load from the PyTorch-formatted checkpoint safetensors_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-safetensors") self.assertTrue(check_models_equal(flax_model, safetensors_model)) @require_torch @require_safetensors @is_pt_flax_cross_test def test_safetensors_load_from_local_from_safetensors_pt(self): """ This test checks that we can load safetensors from a checkpoint that only has those on the Hub. saved in the "pt" format. """ with tempfile.TemporaryDirectory() as tmp: location = snapshot_download("hf-internal-testing/tiny-bert-msgpack", cache_dir=tmp) flax_model = FlaxBertModel.from_pretrained(location) # Can load from the PyTorch-formatted checkpoint with tempfile.TemporaryDirectory() as tmp: location = snapshot_download("hf-internal-testing/tiny-bert-pt-safetensors", cache_dir=tmp) safetensors_model = FlaxBertModel.from_pretrained(location) self.assertTrue(check_models_equal(flax_model, safetensors_model)) @require_safetensors def test_safetensors_load_from_hub_from_safetensors_pt_without_torch_installed(self): """ This test checks that we cannot load safetensors from a checkpoint that only has safetensors saved in the "pt" format if torch isn't installed. """ if is_torch_available(): # This test verifies that a correct error message is shown when loading from a pt safetensors # PyTorch shouldn't be installed for this to work correctly. return # Cannot load from the PyTorch-formatted checkpoint without PyTorch installed with self.assertRaises(ModuleNotFoundError): _ = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-safetensors") @require_safetensors def test_safetensors_load_from_local_from_safetensors_pt_without_torch_installed(self): """ This test checks that we cannot load safetensors from a checkpoint that only has safetensors saved in the "pt" format if torch isn't installed. """ if is_torch_available(): # This test verifies that a correct error message is shown when loading from a pt safetensors # PyTorch shouldn't be installed for this to work correctly. return with tempfile.TemporaryDirectory() as tmp: location = snapshot_download("hf-internal-testing/tiny-bert-pt-safetensors", cache_dir=tmp) # Cannot load from the PyTorch-formatted checkpoint without PyTorch installed with self.assertRaises(ModuleNotFoundError): _ = FlaxBertModel.from_pretrained(location) @require_safetensors def test_safetensors_load_from_hub_msgpack_before_safetensors(self): """ This test checks that we'll first download msgpack weights before safetensors The safetensors file on that repo is a pt safetensors and therefore cannot be loaded without PyTorch """ FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-safetensors-msgpack") @require_safetensors def test_safetensors_load_from_local_msgpack_before_safetensors(self): """ This test checks that we'll first download msgpack weights before safetensors The safetensors file on that repo is a pt safetensors and therefore cannot be loaded without PyTorch """ with tempfile.TemporaryDirectory() as tmp: location = snapshot_download("hf-internal-testing/tiny-bert-pt-safetensors-msgpack", cache_dir=tmp) FlaxBertModel.from_pretrained(location) @require_safetensors def test_safetensors_flax_from_flax(self): model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) new_model = FlaxBertModel.from_pretrained(tmp_dir) self.assertTrue(check_models_equal(model, new_model)) @require_safetensors @require_torch def test_safetensors_flax_from_torch(self): hub_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) new_model = FlaxBertModel.from_pretrained(tmp_dir) self.assertTrue(check_models_equal(hub_model, new_model)) @require_safetensors def test_safetensors_flax_from_sharded_msgpack_with_sharded_safetensors_local(self): with tempfile.TemporaryDirectory() as tmp_dir: path = snapshot_download( "hf-internal-testing/tiny-bert-flax-safetensors-msgpack-sharded", cache_dir=tmp_dir ) # This should not raise even if there are two types of sharded weights FlaxBertModel.from_pretrained(path) @require_safetensors def test_safetensors_flax_from_sharded_msgpack_with_sharded_safetensors_hub(self): # This should not raise even if there are two types of sharded weights # This should discard the safetensors weights in favor of the msgpack sharded weights FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-safetensors-msgpack-sharded")
0
hf_public_repos/transformers
hf_public_repos/transformers/tests/test_backbone_common.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect import tempfile from transformers.testing_utils import require_torch, torch_device from transformers.utils.backbone_utils import BackboneType @require_torch class BackboneTesterMixin: all_model_classes = () has_attentions = True def test_config(self): config_class = self.config_class # test default config config = config_class() self.assertIsNotNone(config) num_stages = len(config.depths) if hasattr(config, "depths") else config.num_hidden_layers expected_stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, num_stages + 1)] self.assertEqual(config.stage_names, expected_stage_names) self.assertTrue(set(config.out_features).issubset(set(config.stage_names))) # Test out_features and out_indices are correctly set # out_features and out_indices both None config = config_class(out_features=None, out_indices=None) self.assertEqual(config.out_features, [config.stage_names[-1]]) self.assertEqual(config.out_indices, [len(config.stage_names) - 1]) # out_features and out_indices both set config = config_class(out_features=["stem", "stage1"], out_indices=[0, 1]) self.assertEqual(config.out_features, ["stem", "stage1"]) self.assertEqual(config.out_indices, [0, 1]) # Only out_features set config = config_class(out_features=["stage1", "stage3"]) self.assertEqual(config.out_features, ["stage1", "stage3"]) self.assertEqual(config.out_indices, [1, 3]) # Only out_indices set config = config_class(out_indices=[0, 2]) self.assertEqual(config.out_features, [config.stage_names[0], config.stage_names[2]]) self.assertEqual(config.out_indices, [0, 2]) # Error raised when out_indices do not correspond to out_features with self.assertRaises(ValueError): config = config_class(out_features=["stage1", "stage2"], out_indices=[0, 2]) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_config_save_pretrained(self): config_class = self.config_class config_first = config_class(out_indices=[0, 1, 2, 3]) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(tmpdirname) config_second = self.config_class.from_pretrained(tmpdirname) self.assertEqual(config_second.to_dict(), config_first.to_dict()) def test_channels(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertEqual(len(model.channels), len(config.out_features)) num_features = model.num_features out_indices = [config.stage_names.index(feat) for feat in config.out_features] out_channels = [num_features[idx] for idx in out_indices] self.assertListEqual(model.channels, out_channels) new_config = copy.deepcopy(config) new_config.out_features = None model = model_class(new_config) self.assertEqual(len(model.channels), 1) self.assertListEqual(model.channels, [num_features[-1]]) new_config = copy.deepcopy(config) new_config.out_indices = None model = model_class(new_config) self.assertEqual(len(model.channels), 1) self.assertListEqual(model.channels, [num_features[-1]]) def test_create_from_modified_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() result = model(**inputs_dict) self.assertEqual(len(result.feature_maps), len(config.out_features)) self.assertEqual(len(model.channels), len(config.out_features)) self.assertEqual(len(result.feature_maps), len(config.out_indices)) self.assertEqual(len(model.channels), len(config.out_indices)) # Check output of last stage is taken if out_features=None, out_indices=None modified_config = copy.deepcopy(config) modified_config.out_features = None model = model_class(modified_config) model.to(torch_device) model.eval() result = model(**inputs_dict) self.assertEqual(len(result.feature_maps), 1) self.assertEqual(len(model.channels), 1) modified_config = copy.deepcopy(config) modified_config.out_indices = None model = model_class(modified_config) model.to(torch_device) model.eval() result = model(**inputs_dict) self.assertEqual(len(result.feature_maps), 1) self.assertEqual(len(model.channels), 1) # Check backbone can be initialized with fresh weights modified_config = copy.deepcopy(config) modified_config.use_pretrained_backbone = False model = model_class(modified_config) model.to(torch_device) model.eval() result = model(**inputs_dict) def test_backbone_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for backbone_class in self.all_model_classes: backbone = backbone_class(config) self.assertTrue(hasattr(backbone, "backbone_type")) self.assertTrue(hasattr(backbone, "stage_names")) self.assertTrue(hasattr(backbone, "num_features")) self.assertTrue(hasattr(backbone, "out_indices")) self.assertTrue(hasattr(backbone, "out_features")) self.assertTrue(hasattr(backbone, "out_feature_channels")) self.assertTrue(hasattr(backbone, "channels")) self.assertIsInstance(backbone.backbone_type, BackboneType) # Verify num_features has been initialized in the backbone init self.assertIsNotNone(backbone.num_features) self.assertTrue(len(backbone.channels) == len(backbone.out_indices)) self.assertTrue(len(backbone.stage_names) == len(backbone.num_features)) self.assertTrue(len(backbone.channels) <= len(backbone.num_features)) self.assertTrue(len(backbone.out_feature_channels) == len(backbone.stage_names)) def test_backbone_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() batch_size = inputs_dict["pixel_values"].shape[0] for backbone_class in self.all_model_classes: backbone = backbone_class(config) backbone.to(torch_device) backbone.eval() outputs = backbone(**inputs_dict) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps, tuple) self.assertTrue(len(outputs.feature_maps) == len(backbone.channels)) for feature_map, n_channels in zip(outputs.feature_maps, backbone.channels): self.assertTrue(feature_map.shape[:2], (batch_size, n_channels)) self.assertIsNone(outputs.hidden_states) self.assertIsNone(outputs.attentions) # Test output_hidden_states=True outputs = backbone(**inputs_dict, output_hidden_states=True) self.assertIsNotNone(outputs.hidden_states) self.assertTrue(len(outputs.hidden_states), len(backbone.stage_names)) for hidden_state, n_channels in zip(outputs.hidden_states, backbone.channels): self.assertTrue(hidden_state.shape[:2], (batch_size, n_channels)) # Test output_attentions=True if self.has_attentions: outputs = backbone(**inputs_dict, output_attentions=True) self.assertIsNotNone(outputs.attentions) def test_backbone_stage_selection(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() batch_size = inputs_dict["pixel_values"].shape[0] for backbone_class in self.all_model_classes: config.out_indices = [-2, -1] backbone = backbone_class(config) backbone.to(torch_device) backbone.eval() outputs = backbone(**inputs_dict) # Test number of feature maps returned self.assertIsInstance(outputs.feature_maps, tuple) self.assertTrue(len(outputs.feature_maps) == 2) # Order of channels returned is same as order of channels iterating over stage names channels_from_stage_names = [ backbone.out_feature_channels[name] for name in backbone.stage_names if name in backbone.out_features ] self.assertEqual(backbone.channels, channels_from_stage_names) for feature_map, n_channels in zip(outputs.feature_maps, backbone.channels): self.assertTrue(feature_map.shape[:2], (batch_size, n_channels))
0
hf_public_repos/transformers
hf_public_repos/transformers/tests/test_configuration_common.py
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class ConfigTester(object): def __init__(self, parent, config_class=None, has_text_modality=True, common_properties=None, **kwargs): self.parent = parent self.config_class = config_class self.has_text_modality = has_text_modality self.inputs_dict = kwargs self.common_properties = common_properties def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) common_properties = ( ["hidden_size", "num_attention_heads", "num_hidden_layers"] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(["vocab_size"]) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(config, prop), msg=f"`{prop}` does not exist") # Test that config has the common properties as setter for idx, name in enumerate(common_properties): try: setattr(config, name, idx) self.parent.assertEqual( getattr(config, name), idx, msg=f"`{name} value {idx} expected, but was {getattr(config, name)}" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(common_properties): try: config = self.config_class(**{name: idx}) self.parent.assertEqual( getattr(config, name), idx, msg=f"`{name} value {idx} expected, but was {getattr(config, name)}" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def create_and_test_config_to_json_string(self): config = self.config_class(**self.inputs_dict) obj = json.loads(config.to_json_string()) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key], value) def create_and_test_config_to_json_file(self): config_first = self.config_class(**self.inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "config.json") config_first.to_json_file(json_file_path) config_second = self.config_class.from_json_file(json_file_path) self.parent.assertEqual(config_second.to_dict(), config_first.to_dict()) def create_and_test_config_from_and_save_pretrained(self): config_first = self.config_class(**self.inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(tmpdirname) config_second = self.config_class.from_pretrained(tmpdirname) self.parent.assertEqual(config_second.to_dict(), config_first.to_dict()) with self.parent.assertRaises(OSError): self.config_class.from_pretrained(f".{tmpdirname}") def create_and_test_config_from_and_save_pretrained_subfolder(self): config_first = self.config_class(**self.inputs_dict) subfolder = "test" with tempfile.TemporaryDirectory() as tmpdirname: sub_tmpdirname = os.path.join(tmpdirname, subfolder) config_first.save_pretrained(sub_tmpdirname) config_second = self.config_class.from_pretrained(tmpdirname, subfolder=subfolder) self.parent.assertEqual(config_second.to_dict(), config_first.to_dict()) def create_and_test_config_with_num_labels(self): config = self.config_class(**self.inputs_dict, num_labels=5) self.parent.assertEqual(len(config.id2label), 5) self.parent.assertEqual(len(config.label2id), 5) config.num_labels = 3 self.parent.assertEqual(len(config.id2label), 3) self.parent.assertEqual(len(config.label2id), 3) def check_config_can_be_init_without_params(self): if self.config_class.is_composition: with self.parent.assertRaises(ValueError): config = self.config_class() else: config = self.config_class() self.parent.assertIsNotNone(config) def check_config_arguments_init(self): kwargs = copy.deepcopy(config_common_kwargs) config = self.config_class(**kwargs) wrong_values = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.float16: wrong_values.append(("torch_dtype", config.torch_dtype, torch.float16)) elif getattr(config, key) != value: wrong_values.append((key, getattr(config, key), value)) if len(wrong_values) > 0: errors = "\n".join([f"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values]) raise ValueError(f"The following keys were not properly set in the config:\n{errors}") def run_common_tests(self): self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
0
hf_public_repos/transformers
hf_public_repos/transformers/tests/test_feature_extraction_utils.py
# coding=utf-8 # Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, Wav2Vec2FeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR = get_tests_dir("fixtures") class FeatureExtractorUtilTester(unittest.TestCase): def test_cached_files_are_used_when_internet_is_down(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = Wav2Vec2FeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2") # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = Wav2Vec2FeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2") # This check we did call the fake head request mock_head.assert_called() def test_legacy_load_from_url(self): # This test is for deprecated behavior and can be removed in v5 _ = Wav2Vec2FeatureExtractor.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" ) @is_staging_test class FeatureExtractorPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-feature-extractor") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-feature-extractor-org") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-dynamic-feature-extractor") except HTTPError: pass def test_push_to_hub(self): feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR) feature_extractor.push_to_hub("test-feature-extractor", token=self._token) new_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor") for k, v in feature_extractor.__dict__.items(): self.assertEqual(v, getattr(new_feature_extractor, k)) # Reset repo delete_repo(token=self._token, repo_id="test-feature-extractor") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( tmp_dir, repo_id="test-feature-extractor", push_to_hub=True, token=self._token ) new_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor") for k, v in feature_extractor.__dict__.items(): self.assertEqual(v, getattr(new_feature_extractor, k)) def test_push_to_hub_in_organization(self): feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR) feature_extractor.push_to_hub("valid_org/test-feature-extractor", token=self._token) new_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("valid_org/test-feature-extractor") for k, v in feature_extractor.__dict__.items(): self.assertEqual(v, getattr(new_feature_extractor, k)) # Reset repo delete_repo(token=self._token, repo_id="valid_org/test-feature-extractor") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( tmp_dir, repo_id="valid_org/test-feature-extractor-org", push_to_hub=True, token=self._token ) new_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org") for k, v in feature_extractor.__dict__.items(): self.assertEqual(v, getattr(new_feature_extractor, k)) def test_push_to_hub_dynamic_feature_extractor(self): CustomFeatureExtractor.register_for_auto_class() feature_extractor = CustomFeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR) feature_extractor.push_to_hub("test-dynamic-feature-extractor", token=self._token) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map, {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"}, ) new_feature_extractor = AutoFeatureExtractor.from_pretrained( f"{USER}/test-dynamic-feature-extractor", trust_remote_code=True ) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__, "CustomFeatureExtractor")
0
hf_public_repos/transformers
hf_public_repos/transformers/tests/test_modeling_flax_common.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect import json import random import tempfile from typing import List, Tuple import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import CaptureLogger, is_pt_flax_cross_test, require_flax, torch_device from transformers.utils import CONFIG_NAME, GENERATION_CONFIG_NAME, logging from transformers.utils.generic import ModelOutput if is_flax_available(): import os import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict from transformers import ( FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, FLAX_MODEL_MAPPING, FlaxAutoModel, FlaxAutoModelForSequenceClassification, FlaxBertModel, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.modeling_flax_utils import FLAX_WEIGHTS_INDEX_NAME, FLAX_WEIGHTS_NAME os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "0.12" # assumed parallelism: 8 if is_torch_available(): import torch def ids_tensor(shape, vocab_size, rng=None): """Creates a random int32 tensor of the shape within the vocab size.""" if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) output = np.array(values, dtype=jnp.int32).reshape(shape) return output def floats_tensor(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.random() * scale) return np.array(values, dtype=jnp.float32).reshape(shape) def random_attention_mask(shape, rng=None): attn_mask = ids_tensor(shape, vocab_size=2, rng=rng) # make sure that at least one token is attended to for each batch attn_mask[:, -1] = 1 return attn_mask def get_params(params, from_head_prefix=None): """Function extracts relevant parameters into flatten dict from model params, appends batch normalization statistics if present""" # If Both parameters and batch normalization statistics are present if "batch_stats" in params: # Extract only parameters for the specified head prefix (if specified) and add batch statistics if from_head_prefix is not None: extracted_params = flatten_dict(unfreeze(params["params"][from_head_prefix])) extracted_params.update(flatten_dict(params["batch_stats"][from_head_prefix])) else: extracted_params = flatten_dict(unfreeze(params["params"])) extracted_params.update(flatten_dict(params["batch_stats"])) # Only parameters are present else: if from_head_prefix is not None: extracted_params = flatten_dict(unfreeze(params[from_head_prefix])) else: extracted_params = flatten_dict(unfreeze(params)) return extracted_params @require_flax class FlaxModelTesterMixin: model_tester = None all_model_classes = () test_mismatched_shapes = True is_encoder_decoder = False test_head_masking = False has_attentions = True def _prepare_for_class(self, inputs_dict, model_class): inputs_dict = copy.deepcopy(inputs_dict) # hack for now until we have AutoModel classes if "ForMultipleChoice" in model_class.__name__: inputs_dict = { k: jnp.broadcast_to(v[:, None], (v.shape[0], self.model_tester.num_choices, v.shape[-1])) if isinstance(v, (jnp.ndarray, np.ndarray)) and k != "indices_prng_key" else v for k, v in inputs_dict.items() } return inputs_dict def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float): diff = np.abs((a - b)).max() self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).") def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assert_almost_equals(jnp.nan_to_num(tuple_object), jnp.nan_to_num(dict_object), 1e-5) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) # (Copied from tests.test_modeling_common.ModelTesterMixin.check_pt_flax_outputs) def check_pt_flax_outputs(self, fx_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None): """ Args: model_class: The class of the model that is currently testing. For example, ..., etc. Currently unused, but it could make debugging easier and faster. names: A string, or a list of strings. These specify what fx_outputs/pt_outputs represent in the model outputs. Currently unused, but in the future, we could use this information to make the error message clearer by giving the name(s) of the output tensor(s) with large difference(s) between PT and Flax. """ self.assertEqual(type(name), str) if attributes is not None: self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`") # Allow `ModelOutput` (e.g. `CLIPOutput` has `text_model_output` and `vision_model_output`). if isinstance(fx_outputs, ModelOutput): self.assertTrue( isinstance(pt_outputs, ModelOutput), f"{name}: `pt_outputs` should an instance of `ModelOutput` when `fx_outputs` is", ) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys, f"{name}: Output keys differ between Flax and PyTorch") # convert to the case of `tuple` # appending each key to the current (string) `name` attributes = tuple([f"{name}.{k}" for k in fx_keys]) self.check_pt_flax_outputs( fx_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes ) # Allow `list` (e.g. `TransfoXLModelOutput.mems` is a list of tensors.) elif type(fx_outputs) in [tuple, list]: self.assertEqual( type(fx_outputs), type(pt_outputs), f"{name}: Output types differ between Flax and PyTorch" ) self.assertEqual( len(fx_outputs), len(pt_outputs), f"{name}: Output lengths differ between Flax and PyTorch" ) if attributes is not None: # case 1: each output has assigned name (e.g. a tuple form of a `ModelOutput`) self.assertEqual( len(attributes), len(fx_outputs), f"{name}: The tuple `attributes` should have the same length as `fx_outputs`", ) else: # case 2: each output has no assigned name (e.g. hidden states of each layer) -> add an index to `name` attributes = tuple([f"{name}_{idx}" for idx in range(len(fx_outputs))]) for fx_output, pt_output, attr in zip(fx_outputs, pt_outputs, attributes): self.check_pt_flax_outputs(fx_output, pt_output, model_class, tol=tol, name=attr) elif isinstance(fx_outputs, jnp.ndarray): self.assertTrue( isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `fx_outputs` is" ) # Using `np.asarray` gives `ValueError: assignment destination is read-only` at the line `fx_outputs[fx_nans] = 0`. fx_outputs = np.array(fx_outputs) pt_outputs = pt_outputs.detach().to("cpu").numpy() self.assertEqual( fx_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between Flax and PyTorch" ) # deal with NumPy's scalars to make replacing nan values by 0 work. if np.isscalar(fx_outputs): fx_outputs = np.array([fx_outputs]) pt_outputs = np.array([pt_outputs]) fx_nans = np.isnan(fx_outputs) pt_nans = np.isnan(pt_outputs) pt_outputs[fx_nans] = 0 fx_outputs[fx_nans] = 0 pt_outputs[pt_nans] = 0 fx_outputs[pt_nans] = 0 max_diff = np.amax(np.abs(fx_outputs - pt_outputs)) self.assertLessEqual( max_diff, tol, f"{name}: Difference between PyTorch and Flax is {max_diff} (>= {tol})." ) else: raise ValueError( "`fx_outputs` should be an instance of `ModelOutput`, a `tuple`, or an instance of `jnp.ndarray`. Got" f" {type(fx_outputs)} instead." ) @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): # It might be better to put this inside the for loop below (because we modify the config there). # But logically, it is fine. config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions # prepare inputs prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist(), device=torch_device) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False fx_model = model_class(config, dtype=jnp.float32) fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state # send pytorch model to the correct device pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**prepared_inputs_dict) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = model_class.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**prepared_inputs_dict) fx_keys = tuple([k for k, v in fx_outputs_loaded.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs_loaded, pt_outputs, model_class) @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions # prepare inputs prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist(), device=torch_device) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False fx_model = model_class(config, dtype=jnp.float32) pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) # make sure weights are tied in PyTorch pt_model.tie_weights() # send pytorch model to the correct device pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**prepared_inputs_dict) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = pt_model_class.from_pretrained(tmpdirname, from_flax=True) # send pytorch model to the correct device pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs_loaded.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs_loaded, model_class) def test_from_pretrained_save_pretrained(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): model = model_class(config) prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) outputs = model(**prepared_inputs_dict).to_tuple() # verify that normal save_pretrained works as expected with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) # the config file (and the generation config file, if it can generate) should be saved self.assertTrue(os.path.exists(os.path.join(tmpdirname, CONFIG_NAME))) self.assertEqual( model.can_generate(), os.path.exists(os.path.join(tmpdirname, GENERATION_CONFIG_NAME)) ) model_loaded = model_class.from_pretrained(tmpdirname) outputs_loaded = model_loaded(**prepared_inputs_dict).to_tuple() for output_loaded, output in zip(outputs_loaded, outputs): self.assert_almost_equals(output_loaded, output, 1e-3) # verify that save_pretrained for distributed training # with `params=params` works as expected with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, params=model.params) model_loaded = model_class.from_pretrained(tmpdirname) outputs_loaded = model_loaded(**prepared_inputs_dict).to_tuple() for output_loaded, output in zip(outputs_loaded, outputs): self.assert_almost_equals(output_loaded, output, 1e-3) def test_save_load_from_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = base_class(config) base_params = get_params(model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname) base_param_from_head = get_params(head_model.params, from_head_prefix=head_model.base_model_prefix) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_save_load_to_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) base_params_from_head = get_params(model.params, from_head_prefix=model.base_model_prefix) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname) base_params = get_params(base_model.params) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @is_pt_flax_cross_test def test_save_load_from_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = base_class(config) base_params = get_params(model.params) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, base_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: # save pt model pt_model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname, from_pt=True) base_param_from_head = get_params(head_model.params, from_head_prefix=head_model.base_model_prefix) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @is_pt_flax_cross_test def test_save_load_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) base_params_from_head = get_params(model.params, from_head_prefix=model.base_model_prefix) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = get_params(base_model.params) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @is_pt_flax_cross_test def test_save_load_bf16_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) model.params = model.to_bf16(model.params) base_params_from_head = get_params(model.params, from_head_prefix=model.base_model_prefix) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = get_params(base_model.params) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_jit_compilation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def model_jitted(input_ids, attention_mask=None, **kwargs): return model(input_ids=input_ids, attention_mask=attention_mask, **kwargs) with self.subTest("JIT Enabled"): jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = model_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = [ "input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["input_ids", "attention_mask"] self.assertListEqual(arg_names[:2], expected_arg_names) def test_naming_convention(self): for model_class in self.all_model_classes: model_class_name = model_class.__name__ module_class_name = ( model_class_name[:-5] + "Module" if model_class_name[-5:] == "Model" else model_class_name + "Module" ) bert_modeling_flax_module = __import__(model_class.__module__, fromlist=[module_class_name]) module_cls = getattr(bert_modeling_flax_module, module_class_name) self.assertIsNotNone(module_cls) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length else: seq_length = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="Model does not output attentions") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_length = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_length) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_length) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # Question Answering model returns start_logits and end_logits if model_class in get_values(FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING): correct_outlen += 1 # start_logits and end_logits instead of only 1 output self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_load_with_mismatched_shapes(self): if not self.test_mismatched_shapes: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class not in get_values(FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING): continue with self.subTest(msg=f"Testing {model_class}"): with tempfile.TemporaryDirectory() as tmp_dir: model = model_class(config) model.save_pretrained(tmp_dir) # Fails when we don't set ignore_mismatched_sizes=True with self.assertRaises(ValueError): new_model = FlaxAutoModelForSequenceClassification.from_pretrained(tmp_dir, num_labels=42) with self.assertRaises(ValueError): new_model_without_prefix = FlaxAutoModel.from_pretrained(tmp_dir, vocab_size=10) logger = logging.get_logger("transformers.modeling_flax_utils") with CaptureLogger(logger) as cl: new_model = FlaxAutoModelForSequenceClassification.from_pretrained( tmp_dir, num_labels=42, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) logits = new_model(**inputs_dict)["logits"] self.assertEqual(logits.shape[1], 42) with CaptureLogger(logger) as cl: new_model_without_prefix = FlaxAutoModel.from_pretrained( tmp_dir, vocab_size=10, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) input_ids = ids_tensor((2, 8), 10) if self.is_encoder_decoder: new_model_without_prefix(input_ids, decoder_input_ids=input_ids) else: new_model_without_prefix(input_ids) def test_default_params_dtype(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # check if all params are still in float32 when dtype of computation is half-precision model = model_class(config, dtype=jnp.float16) types = jax.tree_util.tree_map(lambda x: x.dtype, model.params) types = flatten_dict(types) for name, type_ in types.items(): self.assertEquals(type_, jnp.float32, msg=f"param {name} is not initialized in fp32.") def test_to_bf16(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # cast all params to bf16 params = model.to_bf16(model.params) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) # test if all params are in bf16 for name, type_ in types.items(): self.assertEqual(type_, jnp.bfloat16, msg=f"param {name} is not in bf16.") # test masking flat_params = flatten_dict(params) key = random.choice(list(flat_params.keys())) # choose a random param mask = {path: path != key for path in flat_params} # don't cast the key mask = unflatten_dict(mask) params = model.to_bf16(model.params, mask) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) # test if all params are in bf16 except key for name, type_ in types.items(): if name == key: self.assertEqual(type_, jnp.float32, msg=f"param {name} should be in fp32.") else: self.assertEqual(type_, jnp.bfloat16, msg=f"param {name} is not in bf16.") def test_to_fp16(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # cast all params to fp16 params = model.to_fp16(model.params) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) # test if all params are in fp16 for name, type_ in types.items(): self.assertEqual(type_, jnp.float16, msg=f"param {name} is not in fp16.") # test masking flat_params = flatten_dict(params) key = random.choice(list(flat_params.keys())) # choose a random param mask = {path: path != key for path in flat_params} # don't cast the key mask = unflatten_dict(mask) params = model.to_fp16(model.params, mask) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) # test if all params are in fp16 except key for name, type_ in types.items(): if name == key: self.assertEqual(type_, jnp.float32, msg=f"param {name} should be in fp32.") else: self.assertEqual(type_, jnp.float16, msg=f"param {name} is not in fp16.") def test_to_fp32(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # cast all params to fp16 and back to fp32 params = model.to_fp16(model.params) params = model.to_fp32(params) # test if all params are in fp32 types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) for name, type_ in types.items(): self.assertEqual(type_, jnp.float32, msg=f"param {name} is not in fp32.") # test masking flat_params = flatten_dict(params) key = random.choice(list(flat_params.keys())) # choose a random param mask = {path: path != key for path in flat_params} # don't cast the key mask = unflatten_dict(mask) # cast to fp16 and back to fp32 with mask params = model.to_fp16(model.params) params = model.to_fp32(params, mask) # test if all params are in fp32 except key types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) for name, type_ in types.items(): if name == key: self.assertEqual(type_, jnp.float16, msg=f"param {name} should be in fp16.") else: self.assertEqual(type_, jnp.float32, msg=f"param {name} is not in fp32.") def test_save_load_in_fp16(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # convert weights to fp16 and save params = model.to_fp16(model.params) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, params=params) # load the weights again and check if they are still in fp16 model = model_class.from_pretrained(tmpdirname) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, model.params)) for name, type_ in types.items(): self.assertEqual(type_, jnp.float16, msg=f"param {name} is not in fp16.") def test_save_load_in_bf16(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # convert weights to bf16 and save params = model.to_bf16(model.params) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, params=params) # load the weights again and check if they are still in fp16 model = model_class.from_pretrained(tmpdirname) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, model.params)) for name, type_ in types.items(): self.assertEqual(type_, jnp.bfloat16, msg=f"param {name} is not in bf16.") def test_model_main_input_name(self): for model_class in self.all_model_classes: model_signature = inspect.signature(getattr(model_class, "__call__")) # The main input is the name of the argument after `self` observed_main_input_name = list(model_signature.parameters.keys())[1] self.assertEqual(model_class.main_input_name, observed_main_input_name) def test_headmasking(self): if not self.test_head_masking: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True def _prepare_layer_head_mask(i, attention_heads, num_hidden_layers): if i == 0: return np.concatenate([np.zeros(1, dtype=jnp.int32), np.ones(attention_heads - 1, dtype=jnp.int32)]) if i == num_hidden_layers - 1: return np.concatenate([np.zeros(attention_heads - 1, dtype=jnp.int32), np.ones(1, dtype=jnp.int32)]) return np.ones(attention_heads, dtype=jnp.int32) for model_class in self.all_model_classes: model = model_class(config) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False inputs = self._prepare_for_class(inputs_dict, model_class).copy() # Prepare head mask inputs["head_mask"] = np.stack( [ _prepare_layer_head_mask(i, config.num_attention_heads, config.num_hidden_layers) for i in range(config.num_hidden_layers) ] ) outputs = model(**inputs) def _check_attentions_validity(attentions): # Remove NaN for t in attentions: # Check we don't have more than 25% nans (arbitrary) self.assertLess(np.isnan(t).sum(), t.size / 4) attentions = [np.where(np.isnan(t), 0.0, t) for t in attentions] self.assertAlmostEqual(attentions[0][..., 0, :, :].sum(), 0.0) self.assertNotEqual(attentions[0][..., -1, :, :].sum(), 0.0) if len(attentions) > 2: # encoder-decodere models have only 2 layers in each modules self.assertNotEqual(attentions[1][..., 0, :, :].sum(), 0.0) self.assertAlmostEqual(attentions[-1][..., -2, :, :].sum(), 0.0) self.assertNotEqual(attentions[-1][..., -1, :, :].sum(), 0.0) if model.config.is_encoder_decoder: raise NotImplementedError("The test has not been implemented for encoder-decoder models yet.") else: _check_attentions_validity(outputs.attentions) def test_no_automatic_init(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: model = model_class(config, _do_init=False) # Check that accesing parmas raises an ValueError when _do_init is False with self.assertRaises(ValueError): params = model.params # Check if we params can be properly initialized when calling init_weights params = model.init_weights(model.key, model.input_shape) self.assertIsInstance(params, FrozenDict) # Check if all required parmas are initialized keys = set(flatten_dict(unfreeze(params)).keys()) self.assertTrue(all(k in keys for k in model.required_params)) # Check if the shapes match flat_params = flatten_dict(unfreeze(params)) for k, v in flatten_dict(unfreeze(model.params_shape_tree)).items(): self.assertEqual( v.shape, flat_params[k].shape, "Shapes of {} do not match. Expecting {}, got {}.".format(k, v.shape, flat_params[k].shape), ) # Check that setting params raises an ValueError when _do_init is False with self.assertRaises(ValueError): model.params = params # Check if we can do a forward pass inputs_dict["output_hidden_states"] = True inputs = self._prepare_for_class(inputs_dict, model_class).copy() model(**inputs, params=params) def test_from_pretrained_with_no_automatic_init(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True def _assert_all_params_initialised(model, params): # Check if all required parmas are loaded keys = set(flatten_dict(unfreeze(params)).keys()) self.assertTrue(all(k in keys for k in model.required_params)) # Check if the shapes match flat_params = flatten_dict(unfreeze(params)) for k, v in flatten_dict(unfreeze(model.params_shape_tree)).items(): self.assertEqual( v.shape, flat_params[k].shape, "Shapes of {} do not match. Expecting {}, got {}.".format(k, v.shape, flat_params[k].shape), ) for model_class in self.all_model_classes: # init the model model = model_class(config) # save the model in the temporary directory # load the saved model with _do_init=False with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model, params = model_class.from_pretrained(tmpdirname, _do_init=False) # Check that accesing parmas raises an ValueError when _do_init is False with self.assertRaises(ValueError): params = model.params # Check if all required parmas are loaded _assert_all_params_initialised(model, params) # Check that setting params raises an ValueError when _do_init is False with self.assertRaises(ValueError): model.params = params # Check if init_weights initializes missing keys from from_pretrained flat_params = flatten_dict(unfreeze(params)) random_key = random.choice(list(flat_params.keys())) flat_params.pop(random_key) params = freeze(unflatten_dict(flat_params)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, params=params) model, params = model_class.from_pretrained(tmpdirname, _do_init=False) params = model.init_weights(model.key, model.input_shape, params=params) # Check if all required parmas are loaded _assert_all_params_initialised(model, params) def test_checkpoint_sharding_from_hub(self): model = FlaxBertModel.from_pretrained("ArthurZ/flax-tiny-random-bert-sharded") # the model above is the same as the model below, just a sharded version. ref_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") for p1, p2 in zip(flatten_dict(model.params).values(), flatten_dict(ref_model.params).values()): assert np.allclose(np.array(p1), np.array(p2)) def test_checkpoint_sharding_local(self): model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") with tempfile.TemporaryDirectory() as tmp_dir: # We use the same folder for various sizes to make sure a new save erases the old checkpoint. for max_size in ["150kB", "150kiB", "200kB", "200kiB"]: model.save_pretrained(tmp_dir, max_shard_size=max_size) # Get each shard file and its size shard_to_size = {} for shard in os.listdir(tmp_dir): if shard.endswith(".msgpack"): shard_file = os.path.join(tmp_dir, shard) shard_to_size[shard_file] = os.path.getsize(shard_file) index_file = os.path.join(tmp_dir, FLAX_WEIGHTS_INDEX_NAME) # Check there is an index but no regular weight file self.assertTrue(os.path.isfile(index_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, FLAX_WEIGHTS_NAME))) # Check a file is bigger than max_size only when it has a single weight for shard_file, size in shard_to_size.items(): if max_size.endswith("kiB"): max_size_int = int(max_size[:-3]) * 2**10 else: max_size_int = int(max_size[:-2]) * 10**3 # Note: pickle adds some junk so the weight of the file can end up being slightly bigger than # the size asked for (since we count parameters) if size >= max_size_int + 50000: with open(shard_file, "rb") as state_f: state_file = from_bytes(FlaxBertModel, state_f.read()) self.assertEqual(len(state_file), 1) # Check the index and the shard files found match with open(index_file, "r", encoding="utf-8") as f: index = json.loads(f.read()) all_shards = set(index["weight_map"].values()) shards_found = {f for f in os.listdir(tmp_dir) if f.endswith(".msgpack")} self.assertSetEqual(all_shards, shards_found) # Finally, check the model can be reloaded new_model = FlaxBertModel.from_pretrained(tmp_dir) for p1, p2 in zip(flatten_dict(model.params).values(), flatten_dict(new_model.params).values()): self.assertTrue(np.allclose(np.array(p1), np.array(p2))) @is_pt_flax_cross_test def test_from_sharded_pt(self): model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded", from_pt=True) ref_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-fx-only") for key, ref_val in flatten_dict(ref_model.params).items(): val = flatten_dict(model.params)[key] assert np.allclose(np.array(val), np.array(ref_val)) def test_gradient_checkpointing(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # prepare inputs prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) remat_model = model_class(config) try: remat_model.enable_gradient_checkpointing() except NotImplementedError: continue outputs = model(**prepared_inputs_dict) remat_outputs = remat_model(**prepared_inputs_dict) # ensure that the dicts of outputs contain the same keys self.assertEqual(outputs.keys(), remat_outputs.keys()) outputs = outputs.to_tuple() remat_outputs = remat_outputs.to_tuple() # ensure that the outputs remain precisely equal for output, remat_output in zip(outputs, remat_outputs): self.assertTrue((output == remat_output).all())
0
hf_public_repos/transformers
hf_public_repos/transformers/tests/test_modeling_common.py
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import copy import gc import inspect import os import os.path import pickle import random import re import tempfile import warnings from collections import defaultdict from typing import Dict, List, Tuple import numpy as np from parameterized import parameterized from pytest import mark import transformers from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSequenceClassification, PretrainedConfig, PreTrainedModel, is_torch_available, logging, set_seed, ) from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES, MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from transformers.testing_utils import ( CaptureLogger, is_pt_flax_cross_test, is_pt_tf_cross_test, require_accelerate, require_bitsandbytes, require_flash_attn, require_safetensors, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_sdpa, slow, torch_device, ) from transformers.utils import ( CONFIG_NAME, GENERATION_CONFIG_NAME, SAFE_WEIGHTS_NAME, is_accelerate_available, is_flax_available, is_tf_available, is_torch_fx_available, is_torch_sdpa_available, ) from transformers.utils.generic import ContextManagers, ModelOutput if is_accelerate_available(): from accelerate.utils import compute_module_sizes if is_torch_available(): import torch from safetensors.torch import load_file as safe_load_file from safetensors.torch import save_file as safe_save_file from torch import nn from transformers import MODEL_MAPPING, AdaptiveEmbedding from transformers.modeling_utils import load_state_dict, no_init_weights from transformers.pytorch_utils import id_tensor_storage if is_tf_available(): import tensorflow as tf if is_flax_available(): import jax.numpy as jnp from tests.test_modeling_flax_utils import check_models_equal from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_fx_available(): from transformers.utils.fx import _FX_SUPPORTED_MODELS_WITH_KV_CACHE, symbolic_trace def _config_zero_init(config): configs_no_init = copy.deepcopy(config) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(configs_no_init, key, 1e-10) if isinstance(getattr(configs_no_init, key, None), PretrainedConfig): no_init_subconfig = _config_zero_init(getattr(configs_no_init, key)) setattr(configs_no_init, key, no_init_subconfig) return configs_no_init def _mock_init_weights(self, module): for name, param in module.named_parameters(recurse=False): # Use the first letter of the name to get a value and go from a <> -13 to z <> 12 value = ord(name[0].lower()) - 110 param.data.fill_(value) def _mock_all_init_weights(self): # Prune heads if needed if self.config.pruned_heads: self.prune_heads(self.config.pruned_heads) import transformers.modeling_utils if transformers.modeling_utils._init_weights: for module in self.modules(): module._is_hf_initialized = False # Initialize weights self.apply(self._initialize_weights) # Tie weights should be skipped when not initializing all weights # since from_pretrained(...) calls tie weights anyways self.tie_weights() @require_torch class ModelTesterMixin: model_tester = None all_model_classes = () all_generative_model_classes = () fx_compatible = False test_torchscript = True test_pruning = True test_resize_embeddings = True test_resize_position_embeddings = False test_head_masking = True test_mismatched_shapes = True test_missing_keys = True test_model_parallel = False is_encoder_decoder = False has_attentions = True model_split_percents = [0.5, 0.7, 0.9] def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if model_class.__name__ in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES): inputs_dict = { k: v.unsqueeze(1).expand(-1, self.model_tester.num_choices, -1).contiguous() if isinstance(v, torch.Tensor) and v.ndim > 1 else v for k, v in inputs_dict.items() } elif model_class.__name__ in get_values(MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES): inputs_dict.pop("attention_mask") if return_labels: if model_class.__name__ in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES): inputs_dict["labels"] = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=torch_device) elif model_class.__name__ in [ *get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES), *get_values(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES), ]: inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) inputs_dict["end_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class.__name__ in [ *get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES), *get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES), ]: inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class.__name__ in [ *get_values(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_CAUSAL_LM_MAPPING_NAMES), *get_values(MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING_NAMES), *get_values(MODEL_FOR_MASKED_LM_MAPPING_NAMES), *get_values(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES), ]: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) elif model_class.__name__ in get_values(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES): num_patches = self.model_tester.image_size // self.model_tester.patch_size inputs_dict["bool_masked_pos"] = torch.zeros( (self.model_tester.batch_size, num_patches**2), dtype=torch.long, device=torch_device ) elif model_class.__name__ in get_values(MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES): batch_size, num_channels, height, width = inputs_dict["pixel_values"].shape inputs_dict["labels"] = torch.zeros( [self.model_tester.batch_size, height, width], device=torch_device ).long() return inputs_dict def test_save_load(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_save_load(out1, out2): # make sure we don't have nans out_2 = out2.cpu().numpy() out_2[np.isnan(out_2)] = 0 out_1 = out1.cpu().numpy() out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): first = model(**self._prepare_for_class(inputs_dict, model_class))[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) # the config file (and the generation config file, if it can generate) should be saved self.assertTrue(os.path.exists(os.path.join(tmpdirname, CONFIG_NAME))) self.assertEqual( model.can_generate(), os.path.exists(os.path.join(tmpdirname, GENERATION_CONFIG_NAME)) ) model = model_class.from_pretrained(tmpdirname) model.to(torch_device) with torch.no_grad(): second = model(**self._prepare_for_class(inputs_dict, model_class))[0] if isinstance(first, tuple) and isinstance(second, tuple): for tensor1, tensor2 in zip(first, second): check_save_load(tensor1, tensor2) else: check_save_load(first, second) def test_from_pretrained_no_checkpoint(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) state_dict = model.state_dict() new_model = model_class.from_pretrained( pretrained_model_name_or_path=None, config=config, state_dict=state_dict ) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) def test_keep_in_fp32_modules(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class._keep_in_fp32_modules is None: return model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.float16) for name, param in model.named_parameters(): if any(n in model_class._keep_in_fp32_modules for n in name.split(".")): self.assertTrue(param.dtype == torch.float32) else: self.assertTrue(param.dtype == torch.float16, name) def test_save_load_keys_to_ignore_on_save(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) _keys_to_ignore_on_save = getattr(model, "_keys_to_ignore_on_save", None) if _keys_to_ignore_on_save is None: continue # check the keys are in the original state_dict for k in _keys_to_ignore_on_save: self.assertIn(k, model.state_dict().keys(), "\n".join(model.state_dict().keys())) # check that certain keys didn't get saved with the model with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) output_model_file = os.path.join(tmpdirname, SAFE_WEIGHTS_NAME) state_dict_saved = safe_load_file(output_model_file) for k in _keys_to_ignore_on_save: self.assertNotIn(k, state_dict_saved.keys(), "\n".join(state_dict_saved.keys())) # Test we can load the state dict in the model, necessary for the checkpointing API in Trainer. load_result = model.load_state_dict(state_dict_saved, strict=False) keys_to_ignore = set(model._keys_to_ignore_on_save) if hasattr(model, "_tied_weights_keys"): keys_to_ignore.update(set(model._tied_weights_keys)) self.assertTrue(len(load_result.missing_keys) == 0 or set(load_result.missing_keys) == keys_to_ignore) self.assertTrue(len(load_result.unexpected_keys) == 0) def test_gradient_checkpointing_backward_compatibility(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if not model_class.supports_gradient_checkpointing: continue config.gradient_checkpointing = True model = model_class(config) self.assertTrue(model.is_gradient_checkpointing) def test_gradient_checkpointing_enable_disable(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if not model_class.supports_gradient_checkpointing: continue # at init model should have gradient checkpointing disabled model = model_class(config) self.assertFalse(model.is_gradient_checkpointing) # check enable works model.gradient_checkpointing_enable() self.assertTrue(model.is_gradient_checkpointing) # Loop over all modules and check that relevant modules have gradient_checkpointing set to True for n, m in model.named_modules(): if hasattr(m, "gradient_checkpointing"): self.assertTrue( m.gradient_checkpointing, f"Module {n} does not have gradient_checkpointing set to True" ) # check disable works model.gradient_checkpointing_disable() self.assertFalse(model.is_gradient_checkpointing) # Loop over all modules and check that relevant modules have gradient_checkpointing set to False for n, m in model.named_modules(): if hasattr(m, "gradient_checkpointing"): self.assertFalse( m.gradient_checkpointing, f"Module {n} does not have gradient_checkpointing set to False" ) def test_save_load_fast_init_from_base(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if config.__class__ not in MODEL_MAPPING: return base_class = MODEL_MAPPING[config.__class__] if isinstance(base_class, tuple): base_class = base_class[0] for model_class in self.all_model_classes: if model_class == base_class: continue # make a copy of model class to not break future tests # from https://stackoverflow.com/questions/9541025/how-to-copy-a-python-class class CopyClass(model_class): pass model_class_copy = CopyClass # make sure that all keys are expected for test model_class_copy._keys_to_ignore_on_load_missing = [] # make init deterministic, but make sure that # non-initialized weights throw errors nevertheless model_class_copy._init_weights = _mock_init_weights model_class_copy.init_weights = _mock_all_init_weights model = base_class(config) state_dict = model.state_dict() # this will often delete a single weight of a multi-weight module # to test an edge case random_key_to_del = random.choice(list(state_dict.keys())) del state_dict[random_key_to_del] # check that certain keys didn't get saved with the model with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) torch.save(state_dict, os.path.join(tmpdirname, "pytorch_model.bin")) model_fast_init = model_class_copy.from_pretrained(tmpdirname) model_slow_init = model_class_copy.from_pretrained(tmpdirname, _fast_init=False) # Before we test anything for key in model_fast_init.state_dict().keys(): if isinstance(model_slow_init.state_dict()[key], torch.BoolTensor): max_diff = (model_slow_init.state_dict()[key] ^ model_fast_init.state_dict()[key]).sum().item() else: max_diff = (model_slow_init.state_dict()[key] - model_fast_init.state_dict()[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_fast_init_context_manager(self): # 1. Create a dummy class. Should have buffers as well? To make sure we test __init__ class MyClass(PreTrainedModel): config_class = PretrainedConfig def __init__(self, config=None): super().__init__(config if config is not None else PretrainedConfig()) self.linear = nn.Linear(10, 10, bias=True) self.embedding = nn.Embedding(10, 10) self.std = 1 def _init_weights(self, module): if isinstance(module, nn.Linear): module.weight.data = nn.init.kaiming_uniform_(module.weight.data, np.sqrt(5)) if module.bias is not None: module.bias.data.normal_(mean=0.0, std=self.std) # 2. Make sure a linear layer's reset params is properly skipped: with ContextManagers([no_init_weights(True)]): no_init_instance = MyClass() set_seed(0) expected_bias = torch.tensor( ([0.2975, 0.2131, -0.1379, -0.0796, -0.3012, -0.0057, -0.2381, -0.2439, -0.0174, 0.0475]) ) init_instance = MyClass() torch.testing.assert_allclose(init_instance.linear.bias, expected_bias, rtol=1e-3, atol=1e-4) set_seed(0) torch.testing.assert_allclose( init_instance.linear.weight, nn.init.kaiming_uniform_(no_init_instance.linear.weight, np.sqrt(5)) ) # 3. Make sure weights that are not present use init_weight_ and get expected values with tempfile.TemporaryDirectory() as tmpdirname: state_dict = init_instance.state_dict() del state_dict["linear.weight"] init_instance.config.save_pretrained(tmpdirname) torch.save(state_dict, os.path.join(tmpdirname, "pytorch_model.bin")) set_seed(0) model_fast_init = MyClass.from_pretrained(tmpdirname) set_seed(0) model_slow_init = MyClass.from_pretrained(tmpdirname, _fast_init=False) for key in model_fast_init.state_dict().keys(): max_diff = torch.max(torch.abs(model_slow_init.state_dict()[key] - model_fast_init.state_dict()[key])) self.assertLessEqual(max_diff.item(), 1e-3, msg=f"{key} not identical") def test_save_load_fast_init_to_base(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if config.__class__ not in MODEL_MAPPING: return base_class = MODEL_MAPPING[config.__class__] if isinstance(base_class, tuple): base_class = base_class[0] for model_class in self.all_model_classes: if model_class == base_class: continue # make a copy of model class to not break future tests # from https://stackoverflow.com/questions/9541025/how-to-copy-a-python-class class CopyClass(base_class): pass base_class_copy = CopyClass # make sure that all keys are expected for test base_class_copy._keys_to_ignore_on_load_missing = [] # make init deterministic, but make sure that # non-initialized weights throw errors nevertheless base_class_copy._init_weights = _mock_init_weights base_class_copy.init_weights = _mock_all_init_weights model = model_class(config) state_dict = model.state_dict() # this will often delete a single weight of a multi-weight module # to test an edge case random_key_to_del = random.choice(list(state_dict.keys())) del state_dict[random_key_to_del] # check that certain keys didn't get saved with the model with tempfile.TemporaryDirectory() as tmpdirname: model.config.save_pretrained(tmpdirname) torch.save(state_dict, os.path.join(tmpdirname, "pytorch_model.bin")) model_fast_init = base_class_copy.from_pretrained(tmpdirname) model_slow_init = base_class_copy.from_pretrained(tmpdirname, _fast_init=False) for key in model_fast_init.state_dict().keys(): if isinstance(model_slow_init.state_dict()[key], torch.BoolTensor): max_diff = torch.max( model_slow_init.state_dict()[key] ^ model_fast_init.state_dict()[key] ).item() else: max_diff = torch.max( torch.abs(model_slow_init.state_dict()[key] - model_fast_init.state_dict()[key]) ).item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_torch_save_load(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if config.__class__ not in MODEL_MAPPING: return base_class = MODEL_MAPPING[config.__class__] if isinstance(base_class, tuple): base_class = base_class[0] for model_class in self.all_model_classes: if model_class == base_class: continue # make a copy of model class to not break future tests # from https://stackoverflow.com/questions/9541025/how-to-copy-a-python-class class CopyClass(base_class): pass base_class_copy = CopyClass # make sure that all keys are expected for test base_class_copy._keys_to_ignore_on_load_missing = [] # make init deterministic, but make sure that # non-initialized weights throw errors nevertheless base_class_copy._init_weights = _mock_init_weights base_class_copy.init_weights = _mock_all_init_weights model = model_class(config) state_dict = model.state_dict() def check_equal(loaded): for key in state_dict.keys(): max_diff = torch.max( state_dict()[key] ^ loaded[key] if isinstance(state_dict[key], torch.BoolTensor) else torch.abs(state_dict[key] - loaded[key]) ).item() self.assertLessEqual(max_diff, 1e-6, msg=f"{key} not identical") # check that certain keys didn't get saved with the model with tempfile.TemporaryDirectory() as tmpdirname: pt_checkpoint_path = os.path.join(tmpdirname, "pytorch_model.bin") torch.save(state_dict, pt_checkpoint_path, _use_new_zipfile_serialization=True) check_equal(load_state_dict(pt_checkpoint_path)) torch.save(state_dict, pt_checkpoint_path, _use_new_zipfile_serialization=False) check_equal(load_state_dict(pt_checkpoint_path)) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_determinism(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_determinism(first, second): out_1 = first.cpu().numpy() out_2 = second.cpu().numpy() out_1 = out_1[~np.isnan(out_1)] out_2 = out_2[~np.isnan(out_2)] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): first = model(**self._prepare_for_class(inputs_dict, model_class))[0] second = model(**self._prepare_for_class(inputs_dict, model_class))[0] if isinstance(first, tuple) and isinstance(second, tuple): for tensor1, tensor2 in zip(first, second): check_determinism(tensor1, tensor2) else: check_determinism(first, second) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = [ "input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) elif model_class.__name__ in [*get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES)] and self.has_attentions: expected_arg_names = ["pixel_values", "output_hidden_states", "output_attentions", "return_dict"] self.assertListEqual(arg_names, expected_arg_names) elif model_class.__name__ in [*get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES)] and not self.has_attentions: expected_arg_names = ["pixel_values", "output_hidden_states", "return_dict"] self.assertListEqual(arg_names, expected_arg_names) else: expected_arg_names = [model.main_input_name] self.assertListEqual(arg_names[:1], expected_arg_names) def check_training_gradient_checkpointing(self, gradient_checkpointing_kwargs=None): if not self.model_tester.is_training: return for model_class in self.all_model_classes: if ( model_class.__name__ in [*get_values(MODEL_MAPPING_NAMES), *get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES)] or not model_class.supports_gradient_checkpointing ): continue config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_cache = False config.return_dict = True model = model_class(config) model.to(torch_device) model.gradient_checkpointing_enable(gradient_checkpointing_kwargs=gradient_checkpointing_kwargs) model.train() # unfreeze additional layers for p in model.parameters(): p.requires_grad_(True) optimizer = torch.optim.SGD(model.parameters(), lr=0.01) inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() optimizer.step() for k, v in model.named_parameters(): if v.requires_grad: self.assertTrue(v.grad is not None, f"{k} in {model_class.__name__} has no gradient!") def test_training(self): if not self.model_tester.is_training: return for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True if model_class.__name__ in [ *get_values(MODEL_MAPPING_NAMES), *get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES), ]: continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): # Scenario - 1 default behaviour self.check_training_gradient_checkpointing() def test_training_gradient_checkpointing_use_reentrant(self): # Scenario - 2 with `use_reentrant=True` - this is the default value that is used in pytorch's # torch.utils.checkpoint.checkpoint self.check_training_gradient_checkpointing(gradient_checkpointing_kwargs={"use_reentrant": True}) def test_training_gradient_checkpointing_use_reentrant_false(self): # Scenario - 3 with `use_reentrant=False` pytorch suggests users to use this value for # future releases: https://pytorch.org/docs/stable/checkpoint.html self.check_training_gradient_checkpointing(gradient_checkpointing_kwargs={"use_reentrant": False}) def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="Model does not output attentions") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning # Question Answering model returns start_logits and end_logits if model_class.__name__ in [ *get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES), *get_values(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES), ]: correct_outlen += 1 # start_logits and end_logits instead of only 1 output if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(self_attentions[0].shape[-4:]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) @slow def test_torchscript_simple(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() self._create_and_check_torchscript(config, inputs_dict) @slow def test_torchscript_output_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_attentions = True self._create_and_check_torchscript(config, inputs_dict) @slow def test_torchscript_output_hidden_state(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True self._create_and_check_torchscript(config, inputs_dict) # This is copied from `torch/testing/_internal/jit_utils.py::clear_class_registry` def clear_torch_jit_class_registry(self): torch._C._jit_clear_class_registry() torch.jit._recursive.concrete_type_store = torch.jit._recursive.ConcreteTypeStore() # torch 1.8 has no `_clear_class_state` in `torch.jit._state` if hasattr(torch.jit._state, "_clear_class_state"): torch.jit._state._clear_class_state() def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True for model_class in self.all_model_classes: for attn_implementation in ["eager", "sdpa"]: if attn_implementation == "sdpa" and (not model_class._supports_sdpa or not is_torch_sdpa_available()): continue configs_no_init._attn_implementation = attn_implementation model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) main_input_name = model_class.main_input_name try: if model.config.is_encoder_decoder: model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored similar to BART afterward main_input = inputs[main_input_name] attention_mask = inputs["attention_mask"] decoder_input_ids = inputs["decoder_input_ids"] decoder_attention_mask = inputs["decoder_attention_mask"] model(main_input, attention_mask, decoder_input_ids, decoder_attention_mask) traced_model = torch.jit.trace( model, (main_input, attention_mask, decoder_input_ids, decoder_attention_mask) ) elif "bbox" in inputs and "image" in inputs: # LayoutLMv2 requires additional inputs input_ids = inputs["input_ids"] bbox = inputs["bbox"] image = inputs["image"].tensor model(input_ids, bbox, image) traced_model = torch.jit.trace( model, (input_ids, bbox, image), check_trace=False ) # when traced model is checked, an error is produced due to name mangling elif "bbox" in inputs: # Bros requires additional inputs (bbox) input_ids = inputs["input_ids"] bbox = inputs["bbox"] model(input_ids, bbox) traced_model = torch.jit.trace( model, (input_ids, bbox), check_trace=False ) # when traced model is checked, an error is produced due to name mangling else: main_input = inputs[main_input_name] if model.config._attn_implementation == "sdpa": trace_input = {main_input_name: main_input} if "attention_mask" in inputs: trace_input["attention_mask"] = inputs["attention_mask"] else: self.skipTest("testing SDPA without attention_mask is not supported") model(main_input, attention_mask=inputs["attention_mask"]) # example_kwarg_inputs was introduced in torch==2.0, but it is fine here since SDPA has a requirement on torch>=2.1. traced_model = torch.jit.trace(model, example_kwarg_inputs=trace_input) else: model(main_input) traced_model = torch.jit.trace(model, (main_input,)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): if layer_name in loaded_model_state_dict: p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) # Avoid memory leak. Without this, each call increase RAM usage by ~20MB. # (Even with this call, there are still memory leak by ~0.04MB) self.clear_torch_jit_class_registry() def test_torch_fx(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() self._create_and_check_torch_fx_tracing(config, inputs_dict) def test_torch_fx_output_loss(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() self._create_and_check_torch_fx_tracing(config, inputs_dict, output_loss=True) def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False): if not is_torch_fx_available() or not self.fx_compatible: self.skipTest( f"Either torch.fx is not available, or the model type {config.model_type} is not compatible with torch.fx" ) configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=output_loss) try: if model.config.is_encoder_decoder: model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored similar to BART afterward labels = inputs.get("labels", None) input_names = [ "attention_mask", "decoder_attention_mask", "decoder_input_ids", "input_features", "input_ids", "input_values", ] if labels is not None: input_names.append("labels") filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} input_names = list(filtered_inputs.keys()) model_output = model(**filtered_inputs) traced_model = symbolic_trace(model, input_names) traced_output = traced_model(**filtered_inputs) else: input_names = [ "attention_mask", "bbox", "input_features", "input_ids", "input_values", "pixel_values", "token_type_ids", "visual_feats", "visual_pos", ] labels = inputs.get("labels", None) start_positions = inputs.get("start_positions", None) end_positions = inputs.get("end_positions", None) if labels is not None: input_names.append("labels") if start_positions is not None: input_names.append("start_positions") if end_positions is not None: input_names.append("end_positions") if model.config.model_type in _FX_SUPPORTED_MODELS_WITH_KV_CACHE: input_names.append("past_key_values") # Generally model_tester.prepare_config_and_inputs_for_common seem not to generate past key values inputs. if "past_key_values" not in inputs: batch_size = inputs[next(iter(inputs))].shape[0] num_heads = model.config.num_attention_heads head_dim = model.config.hidden_size // model.config.num_attention_heads cache_shape = (batch_size, num_heads, 0, head_dim) pkv = tuple( ( torch.rand(cache_shape, dtype=torch.float, device=torch_device), torch.rand(cache_shape, dtype=torch.float, device=torch_device), ) for i in range(model.config.num_hidden_layers) ) inputs["past_key_values"] = pkv filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} input_names = list(filtered_inputs.keys()) if model.__class__.__name__ in set(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES.values()) and ( not hasattr(model.config, "problem_type") or model.config.problem_type is None ): model.config.problem_type = "single_label_classification" traced_model = symbolic_trace(model, input_names) with torch.no_grad(): traced_output = traced_model(**filtered_inputs) model_output = model(**filtered_inputs) except Exception as e: self.fail(f"Couldn't trace module: {e}") def flatten_output(output): flatten = [] for x in output: if isinstance(x, (tuple, list)): flatten += flatten_output(x) elif not isinstance(x, torch.Tensor): continue else: flatten.append(x) return flatten model_output = flatten_output(model_output) traced_output = flatten_output(traced_output) num_outputs = len(model_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], traced_output[i]), f"traced {i}th output doesn't match model {i}th output for {model_class}", ) # Test that the model can be serialized and restored properly with tempfile.TemporaryDirectory() as tmp_dir_name: pkl_file_name = os.path.join(tmp_dir_name, "model.pkl") try: with open(pkl_file_name, "wb") as f: pickle.dump(traced_model, f) with open(pkl_file_name, "rb") as f: loaded = pickle.load(f) except Exception as e: self.fail(f"Couldn't serialize / deserialize the traced model: {e}") loaded_output = loaded(**filtered_inputs) loaded_output = flatten_output(loaded_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], loaded_output[i]), f"serialized model {i}th output doesn't match model {i}th output for {model_class}", ) # Avoid memory leak. Without this, each call increase RAM usage by ~20MB. # (Even with this call, there are still memory leak by ~0.04MB) self.clear_torch_jit_class_registry() def test_headmasking(self): if not self.test_head_masking: return global_rng.seed(42) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() global_rng.seed() inputs_dict["output_attentions"] = True config.output_hidden_states = True configs_no_init = _config_zero_init(config) # To be sure we have no Nan for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() # Prepare head_mask # Set require_grad after having prepared the tensor to avoid error (leaf variable has been moved into the graph interior) head_mask = torch.ones( self.model_tester.num_hidden_layers, self.model_tester.num_attention_heads, device=torch_device, ) head_mask[0, 0] = 0 head_mask[-1, :-1] = 0 head_mask.requires_grad_(requires_grad=True) inputs = self._prepare_for_class(inputs_dict, model_class).copy() inputs["head_mask"] = head_mask if model.config.is_encoder_decoder: signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] if "decoder_head_mask" in arg_names: # necessary diferentiation because of T5 model inputs["decoder_head_mask"] = head_mask if "cross_attn_head_mask" in arg_names: inputs["cross_attn_head_mask"] = head_mask outputs = model(**inputs, return_dict=True) # Test that we can get a gradient back for importance score computation output = sum(t.sum() for t in outputs[0]) output = output.sum() output.backward() multihead_outputs = head_mask.grad self.assertIsNotNone(multihead_outputs) self.assertEqual(len(multihead_outputs), self.model_tester.num_hidden_layers) def check_attentions_validity(attentions): # Remove Nan for t in attentions: self.assertLess( torch.sum(torch.isnan(t)), t.numel() / 4 ) # Check we don't have more than 25% nans (arbitrary) attentions = [ t.masked_fill(torch.isnan(t), 0.0) for t in attentions ] # remove them (the test is less complete) self.assertAlmostEqual(attentions[0][..., 0, :, :].flatten().sum().item(), 0.0) self.assertNotEqual(attentions[0][..., -1, :, :].flatten().sum().item(), 0.0) if len(attentions) > 2: # encoder-decoder models have only 2 layers in each module self.assertNotEqual(attentions[1][..., 0, :, :].flatten().sum().item(), 0.0) self.assertAlmostEqual(attentions[-1][..., -2, :, :].flatten().sum().item(), 0.0) self.assertNotEqual(attentions[-1][..., -1, :, :].flatten().sum().item(), 0.0) if model.config.is_encoder_decoder: check_attentions_validity(outputs.encoder_attentions) check_attentions_validity(outputs.decoder_attentions) check_attentions_validity(outputs.cross_attentions) else: check_attentions_validity(outputs.attentions) def test_head_pruning(self): if not self.test_pruning: return for model_class in self.all_model_classes: ( config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if "head_mask" in inputs_dict: del inputs_dict["head_mask"] inputs_dict["output_attentions"] = True config.output_hidden_states = False model = model_class(config=config) model.to(torch_device) model.eval() heads_to_prune = { 0: list(range(1, self.model_tester.num_attention_heads)), -1: [0], } model.prune_heads(heads_to_prune) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], 1) # TODO: To have this check, we will need at least 3 layers. Do we really need it? # self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads) self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1) def test_head_pruning_save_load_from_pretrained(self): if not self.test_pruning: return for model_class in self.all_model_classes: ( config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if "head_mask" in inputs_dict: del inputs_dict["head_mask"] inputs_dict["output_attentions"] = True config.output_hidden_states = False model = model_class(config=config) model.to(torch_device) model.eval() heads_to_prune = { 0: list(range(1, self.model_tester.num_attention_heads)), -1: [0], } model.prune_heads(heads_to_prune) with tempfile.TemporaryDirectory() as temp_dir_name: model.save_pretrained(temp_dir_name) model = model_class.from_pretrained(temp_dir_name) model.to(torch_device) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], 1) # TODO: To have this check, we will need at least 3 layers. Do we really need it? # self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads) self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1) def test_head_pruning_save_load_from_config_init(self): if not self.test_pruning: return for model_class in self.all_model_classes: ( config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if "head_mask" in inputs_dict: del inputs_dict["head_mask"] inputs_dict["output_attentions"] = True config.output_hidden_states = False heads_to_prune = { 0: list(range(1, self.model_tester.num_attention_heads)), -1: [0], } config.pruned_heads = heads_to_prune model = model_class(config=config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], 1) # TODO: To have this check, we will need at least 3 layers. Do we really need it? # self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads) self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1) def test_head_pruning_integration(self): if not self.test_pruning: return for model_class in self.all_model_classes: ( config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if "head_mask" in inputs_dict: del inputs_dict["head_mask"] inputs_dict["output_attentions"] = True config.output_hidden_states = False heads_to_prune = {1: [1, 2]} config.pruned_heads = heads_to_prune model = model_class(config=config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 0) self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2) with tempfile.TemporaryDirectory() as temp_dir_name: model.save_pretrained(temp_dir_name) model = model_class.from_pretrained(temp_dir_name) model.to(torch_device) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 0) self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2) heads_to_prune = {0: [0], 1: [1, 2]} model.prune_heads(heads_to_prune) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1) self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2) self.assertDictEqual(model.config.pruned_heads, {0: [0], 1: [1, 2]}) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1: seq_length = seq_length * self.model_tester.chunk_length else: seq_length = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = self.has_attentions # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] if config.is_encoder_decoder: # Seq2Seq models encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() decoder_hidden_states = outputs.decoder_hidden_states[0] decoder_hidden_states.retain_grad() if self.has_attentions: encoder_attentions = outputs.encoder_attentions[0] encoder_attentions.retain_grad() decoder_attentions = outputs.decoder_attentions[0] decoder_attentions.retain_grad() cross_attentions = outputs.cross_attentions[0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(decoder_hidden_states.grad) if self.has_attentions: self.assertIsNotNone(encoder_attentions.grad) self.assertIsNotNone(decoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) else: # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: attentions = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) if self.has_attentions: self.assertIsNotNone(attentions.grad) def test_feed_forward_chunking(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: torch.manual_seed(0) config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) model.eval() hidden_states_no_chunk = model(**self._prepare_for_class(inputs_dict, model_class))[0] torch.manual_seed(0) config.chunk_size_feed_forward = 1 model = model_class(config) model.to(torch_device) model.eval() hidden_states_with_chunk = model(**self._prepare_for_class(inputs_dict, model_class))[0] self.assertTrue(torch.allclose(hidden_states_no_chunk, hidden_states_with_chunk, atol=1e-3)) def test_resize_position_vector_embeddings(self): if not self.test_resize_position_embeddings: return ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() max_position_embeddings = config.max_position_embeddings # Retrieve the embeddings and clone theme if model.config.is_encoder_decoder: encoder_model_embed, decoder_model_embed = model.get_position_embeddings() encoder_cloned_embeddings = encoder_model_embed.weight.clone() decoder_cloned_embeddings = decoder_model_embed.weight.clone() else: model_embed = model.get_position_embeddings() cloned_embeddings = model_embed.weight.clone() # Check that resizing the position embeddings with a larger max_position_embeddings increases # the model's postion embeddings size model.resize_position_embeddings(max_position_embeddings + 10) self.assertEqual(model.config.max_position_embeddings, max_position_embeddings + 10) # Check that it actually resizes the embeddings matrix if model.config.is_encoder_decoder: encoder_model_embed, decoder_model_embed = model.get_position_embeddings() self.assertEqual(encoder_model_embed.weight.shape[0], encoder_cloned_embeddings.shape[0] + 10) self.assertEqual(decoder_model_embed.weight.shape[0], decoder_cloned_embeddings.shape[0] + 10) else: model_embed = model.get_position_embeddings() self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the position embeddings with a smaller max_position_embeddings decreases # the model's max_position_embeddings model.resize_position_embeddings(max_position_embeddings - 5) self.assertEqual(model.config.max_position_embeddings, max_position_embeddings - 5) # Check that it actually resizes the embeddings matrix if model.config.is_encoder_decoder: encoder_model_embed, decoder_model_embed = model.get_position_embeddings() self.assertEqual(encoder_model_embed.weight.shape[0], encoder_cloned_embeddings.shape[0] - 5) self.assertEqual(decoder_model_embed.weight.shape[0], decoder_cloned_embeddings.shape[0] - 5) else: model_embed = model.get_position_embeddings() self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 5) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that adding and removing tokens has not modified the first part of the embedding matrix. models_equal = True if model.config.is_encoder_decoder: for p1, p2 in zip(encoder_cloned_embeddings, encoder_model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False for p1, p2 in zip(decoder_cloned_embeddings, decoder_model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False else: for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_resize_tokens_embeddings(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.vocab_size # Retrieve the embeddings and clone theme model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = model_embed.weight.clone() # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1) # make sure that decoder_input_ids are resized as well if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that adding and removing tokens has not modified the first part of the embedding matrix. models_equal = True for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) model_vocab_size = config.vocab_size model.resize_token_embeddings(model_vocab_size + 10, pad_to_multiple_of=1) self.assertTrue(model.config.vocab_size + 10, model_vocab_size) model_embed = model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=64) self.assertTrue(model_embed.weight.shape[0] // 64, 0) self.assertTrue(model_embed.weight.shape[0], model.config.vocab_size) self.assertTrue(model.config.vocab_size, model.vocab_size) model_embed = model.resize_token_embeddings(model_vocab_size + 13, pad_to_multiple_of=64) self.assertTrue(model_embed.weight.shape[0] // 64, 0) # Check that resizing a model to a multiple of pad_to_multiple leads to a model of exactly that size target_dimension = 128 model_embed = model.resize_token_embeddings(target_dimension, pad_to_multiple_of=64) self.assertTrue(model_embed.weight.shape[0], target_dimension) with self.assertRaisesRegex( ValueError, "Asking to pad the embedding matrix to a multiple of `1.3`, which is not and integer. Please make sure to pass an integer", ): model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=1.3) def test_resize_embeddings_untied(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return original_config.tie_word_embeddings = False # if model cannot untied embeddings -> leave test if original_config.tie_word_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config).to(torch_device) # if no output embeddings -> leave test if model.get_output_embeddings() is None: continue # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_vocab_size = config.vocab_size model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1) if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) def test_model_common_attributes(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Embedding, AdaptiveEmbedding)) model.set_input_embeddings(nn.Embedding(10, 10)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model_main_input_name(self): for model_class in self.all_model_classes: model_signature = inspect.signature(getattr(model_class, "forward")) # The main input is the name of the argument after `self` observed_main_input_name = list(model_signature.parameters.keys())[1] self.assertEqual(model_class.main_input_name, observed_main_input_name) def test_correct_missing_keys(self): if not self.test_missing_keys: return config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) base_model_prefix = model.base_model_prefix if hasattr(model, base_model_prefix): extra_params = {k: v for k, v in model.named_parameters() if not k.startswith(base_model_prefix)} extra_params.update({k: v for k, v in model.named_buffers() if not k.startswith(base_model_prefix)}) # Some models define this as None if model._keys_to_ignore_on_load_missing: for key in model._keys_to_ignore_on_load_missing: extra_params.pop(key, None) if not extra_params: # In that case, we *are* on a head model, but every # single key is not actual parameters and this is # tested in `test_tied_model_weights_key_ignore` test. continue with tempfile.TemporaryDirectory() as temp_dir_name: model.base_model.save_pretrained(temp_dir_name) model, loading_info = model_class.from_pretrained(temp_dir_name, output_loading_info=True) self.assertGreater(len(loading_info["missing_keys"]), 0, model.__class__.__name__) def test_tie_model_weights(self): if not self.test_torchscript: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_same_values(layer_1, layer_2): equal = True for p1, p2 in zip(layer_1.weight, layer_2.weight): if p1.data.ne(p2.data).sum() > 0: equal = False return equal for model_class in self.all_model_classes: config.torchscript = True model_not_tied = model_class(config) if model_not_tied.get_output_embeddings() is None: continue config_tied = copy.deepcopy(config) config_tied.torchscript = False model_tied = model_class(config_tied) params_tied = list(model_tied.parameters()) # Check that the embedding layer and decoding layer are the same in size and in value # self.assertTrue(check_same_values(embeddings, decoding)) # # Check that after modification, they remain the same. # embeddings.weight.data.div_(2) # # Check that the embedding layer and decoding layer are the same in size and in value # self.assertTrue(embeddings.weight.shape, decoding.weight.shape) # self.assertTrue(check_same_values(embeddings, decoding)) # # Check that after modification, they remain the same. # decoding.weight.data.div_(4) # # Check that the embedding layer and decoding layer are the same in size and in value # self.assertTrue(embeddings.weight.shape, decoding.weight.shape) # self.assertTrue(check_same_values(embeddings, decoding)) # Check that after resize they remain tied. model_tied.resize_token_embeddings(config.vocab_size + 10) params_tied_2 = list(model_tied.parameters()) self.assertEqual(len(params_tied_2), len(params_tied)) # decoding.weight.data.mul_(20) # # Check that the embedding layer and decoding layer are the same in size and in value # self.assertTrue(model.transformer.wte.weight.shape, model.lm_head.weight.shape) # self.assertTrue(check_same_values(model.transformer.wte, model.lm_head)) @require_safetensors def test_can_use_safetensors(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model_tied = model_class(config) with tempfile.TemporaryDirectory() as d: try: model_tied.save_pretrained(d, safe_serialization=True) except Exception as e: raise Exception(f"Class {model_class.__name__} cannot be saved using safetensors: {e}") model_reloaded, infos = model_class.from_pretrained(d, output_loading_info=True) # Checking the state dicts are correct reloaded_state = model_reloaded.state_dict() for k, v in model_tied.state_dict().items(): self.assertIn(k, reloaded_state, f"Key {k} is missing from reloaded") torch.testing.assert_close( v, reloaded_state[k], msg=lambda x: f"{model_class.__name__}: Tensor {k}: {x}" ) # Checking there was no complain of missing weights self.assertEqual(infos["missing_keys"], []) # Checking the tensor sharing are correct ptrs = defaultdict(list) for k, v in model_tied.state_dict().items(): ptrs[v.data_ptr()].append(k) shared_ptrs = {k: v for k, v in ptrs.items() if len(v) > 1} for _, shared_names in shared_ptrs.items(): reloaded_ptrs = {reloaded_state[k].data_ptr() for k in shared_names} self.assertEqual( len(reloaded_ptrs), 1, f"The shared pointers are incorrect, found different pointers for keys {shared_names}", ) def test_load_save_without_tied_weights(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() config.tie_word_embeddings = False for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as d: model.save_pretrained(d) model_reloaded, infos = model_class.from_pretrained(d, output_loading_info=True) # Checking the state dicts are correct reloaded_state = model_reloaded.state_dict() for k, v in model.state_dict().items(): self.assertIn(k, reloaded_state, f"Key {k} is missing from reloaded") torch.testing.assert_close( v, reloaded_state[k], msg=lambda x: f"{model_class.__name__}: Tensor {k}: {x}" ) # Checking there was no complain of missing weights self.assertEqual(infos["missing_keys"], []) def test_tied_weights_keys(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() config.tie_word_embeddings = True for model_class in self.all_model_classes: model_tied = model_class(config) ptrs = collections.defaultdict(list) for name, tensor in model_tied.state_dict().items(): ptrs[id_tensor_storage(tensor)].append(name) # These are all the pointers of shared tensors. tied_params = [names for _, names in ptrs.items() if len(names) > 1] tied_weight_keys = model_tied._tied_weights_keys if model_tied._tied_weights_keys is not None else [] # Detect we get a hit for each key for key in tied_weight_keys: if not any(re.search(key, p) for group in tied_params for p in group): raise ValueError(f"{key} is not a tied weight key for {model_class}.") # Removed tied weights found from tied params -> there should only be one left after for key in tied_weight_keys: for i in range(len(tied_params)): tied_params[i] = [p for p in tied_params[i] if re.search(key, p) is None] tied_params = [group for group in tied_params if len(group) > 1] self.assertListEqual( tied_params, [], f"Missing `_tied_weights_keys` for {model_class}: add all of {tied_params} except one.", ) def test_model_weights_reload_no_missing_tied_weights(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) # We are nuking ALL weights on file, so every parameter should # yell on load. We're going to detect if we yell too much, or too little. placeholder_dict = {"tensor": torch.tensor([1, 2])} safe_save_file(placeholder_dict, os.path.join(tmp_dir, "model.safetensors"), metadata={"format": "pt"}) model_reloaded, infos = model_class.from_pretrained(tmp_dir, output_loading_info=True) prefix = f"{model_reloaded.base_model_prefix}." params = dict(model_reloaded.named_parameters()) params.update(dict(model_reloaded.named_buffers())) param_names = {k[len(prefix) :] if k.startswith(prefix) else k for k in params.keys()} missing_keys = set(infos["missing_keys"]) extra_missing = missing_keys - param_names # Remove tied weights from extra missing: they are normally not warned as missing if their tied # counterpart is present but here there are no weights at all so we do get the warning. ptrs = collections.defaultdict(list) for name, tensor in model_reloaded.state_dict().items(): ptrs[id_tensor_storage(tensor)].append(name) tied_params = [names for _, names in ptrs.items() if len(names) > 1] for group in tied_params: group = {k[len(prefix) :] if k.startswith(prefix) else k for k in group} # We remove the group from extra_missing if not all weights from group are in it if len(group - extra_missing) > 0: extra_missing = extra_missing - set(group) self.assertEqual( extra_missing, set(), f"This model {model_class.__name__} might be missing some `keys_to_ignore`: {extra_missing}. " f"For debugging, tied parameters are {tied_params}", ) missed_missing = param_names - missing_keys # Remove nonpersistent buffers from missed_missing buffers = [n for n, _ in model_reloaded.named_buffers()] nonpersistent_buffers = {n for n in buffers if n not in model_reloaded.state_dict()} nonpersistent_buffers = { k[len(prefix) :] if k.startswith(prefix) else k for k in nonpersistent_buffers } missed_missing = missed_missing - nonpersistent_buffers if model_reloaded._keys_to_ignore_on_load_missing is None: expected_missing = set() else: expected_missing = set(model_reloaded._keys_to_ignore_on_load_missing) self.assertEqual( missed_missing, expected_missing, f"This model {model_class.__name__} ignores keys {missed_missing} but they look like real" " parameters. If they are non persistent buffers make sure to instantiate them with" " `persistent=False`", ) def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(t): t[t != t] = 0 return t def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): with torch.no_grad(): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values(), dict_object.values() ): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) if self.has_attentions: tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence( model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True} ) # Don't copy this method to model specific test file! # TODO: remove this method once the issues are all fixed! def _make_attention_mask_non_null(self, inputs_dict): """Make sure no sequence has all zeros as attention mask""" for k in ["attention_mask", "encoder_attention_mask", "decoder_attention_mask"]: if k in inputs_dict: attention_mask = inputs_dict[k] # Make sure no all 0s attention masks - to avoid failure at this moment. # Put `1` at the beginning of sequences to make it still work when combining causal attention masks. # TODO: remove this line once a fix regarding large negative values for attention mask is done. attention_mask = torch.cat( [torch.ones_like(attention_mask[:, :1], dtype=attention_mask.dtype), attention_mask[:, 1:]], dim=-1 ) # Here we make the first sequence with all 0s as attention mask. # Currently, this will fail for `TFWav2Vec2Model`. This is caused by the different large negative # values, like `1e-4`, `1e-9`, `1e-30` and `-inf` for attention mask across models/frameworks. # TODO: enable this block once the large negative values thing is cleaned up. # (see https://github.com/huggingface/transformers/issues/14859) # attention_mask = torch.cat( # [torch.zeros_like(attention_mask[:1], dtype=attention_mask.dtype), attention_mask[1:]], # dim=0 # ) inputs_dict[k] = attention_mask # Don't copy this method to model specific test file! # TODO: remove this method once the issues are all fixed! def _postprocessing_to_ignore_test_cases(self, tf_outputs, pt_outputs, model_class): """For temporarily ignoring some failed test cases (issues to be fixed)""" tf_keys = {k for k, v in tf_outputs.items() if v is not None} pt_keys = {k for k, v in pt_outputs.items() if v is not None} key_differences = tf_keys.symmetric_difference(pt_keys) if model_class.__name__ in [ "FlaubertWithLMHeadModel", "FunnelForPreTraining", "ElectraForPreTraining", "XLMWithLMHeadModel", ]: for k in key_differences: if k in ["loss", "losses"]: tf_keys.discard(k) pt_keys.discard(k) elif model_class.__name__.startswith("GPT2"): # `TFGPT2` has `past_key_values` as a tensor while `GPT2` has it as a tuple. tf_keys.discard("past_key_values") pt_keys.discard("past_key_values") # create new outputs from the remaining fields new_tf_outputs = type(tf_outputs)(**{k: tf_outputs[k] for k in tf_keys}) new_pt_outputs = type(pt_outputs)(**{k: pt_outputs[k] for k in pt_keys}) return new_tf_outputs, new_pt_outputs # Copied from tests.test_modeling_tf_common.TFModelTesterMixin.check_pt_tf_outputs def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None): """Check the outputs from PyTorch and TensorFlow models are close enough. Checks are done in a recursive way. Args: model_class: The class of the model that is currently testing. For example, `TFBertModel`, TFBertForMaskedLM`, `TFBertForSequenceClassification`, etc. Mainly used for providing more informative error messages. name (`str`): The name of the output. For example, `output.hidden_states`, `output.attentions`, etc. attributes (`Tuple[str]`): The names of the output's element if the output is a tuple/list with each element being a named field in the output. """ self.assertEqual(type(name), str) if attributes is not None: self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`") # Allow `ModelOutput` (e.g. `CLIPOutput` has `text_model_output` and `vision_model_output`). if isinstance(tf_outputs, ModelOutput): self.assertTrue( isinstance(pt_outputs, ModelOutput), f"{name}: `pt_outputs` should an instance of `ModelOutput` when `tf_outputs` is", ) # Don't copy this block to model specific test file! # TODO: remove this method and this line after issues are fixed tf_outputs, pt_outputs = self._postprocessing_to_ignore_test_cases(tf_outputs, pt_outputs, model_class) tf_keys = [k for k, v in tf_outputs.items() if v is not None] pt_keys = [k for k, v in pt_outputs.items() if v is not None] self.assertEqual(tf_keys, pt_keys, f"{name}: Output keys differ between TF and PyTorch") # convert to the case of `tuple` # appending each key to the current (string) `name` attributes = tuple([f"{name}.{k}" for k in tf_keys]) self.check_pt_tf_outputs( tf_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes ) # Allow `list` (e.g. `TransfoXLModelOutput.mems` is a list of tensors.) elif type(tf_outputs) in [tuple, list]: self.assertEqual(type(tf_outputs), type(pt_outputs), f"{name}: Output types differ between TF and PyTorch") self.assertEqual(len(tf_outputs), len(pt_outputs), f"{name}: Output lengths differ between TF and PyTorch") if attributes is not None: # case 1: each output has assigned name (e.g. a tuple form of a `ModelOutput`) self.assertEqual( len(attributes), len(tf_outputs), f"{name}: The tuple `attributes` should have the same length as `tf_outputs`", ) else: # case 2: each output has no assigned name (e.g. hidden states of each layer) -> add an index to `name` attributes = tuple([f"{name}_{idx}" for idx in range(len(tf_outputs))]) for tf_output, pt_output, attr in zip(tf_outputs, pt_outputs, attributes): self.check_pt_tf_outputs(tf_output, pt_output, model_class, tol=tol, name=attr) elif isinstance(tf_outputs, tf.Tensor): self.assertTrue( isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `tf_outputs` is" ) tf_outputs = tf_outputs.numpy() pt_outputs = pt_outputs.detach().to("cpu").numpy() self.assertEqual( tf_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between TF and PyTorch" ) # deal with NumPy's scalars to make replacing nan values by 0 work. if np.isscalar(tf_outputs): tf_outputs = np.array([tf_outputs]) pt_outputs = np.array([pt_outputs]) tf_nans = np.isnan(tf_outputs) pt_nans = np.isnan(pt_outputs) pt_outputs[tf_nans] = 0 tf_outputs[tf_nans] = 0 pt_outputs[pt_nans] = 0 tf_outputs[pt_nans] = 0 max_diff = np.amax(np.abs(tf_outputs - pt_outputs)) self.assertLessEqual(max_diff, tol, f"{name}: Difference between PyTorch and TF is {max_diff} (>= {tol}).") else: raise ValueError( "`tf_outputs` should be an instance of `ModelOutput`, a `tuple`, or an instance of `tf.Tensor`. Got" f" {type(tf_outputs)} instead." ) def prepare_tf_inputs_from_pt_inputs(self, pt_inputs_dict): tf_inputs_dict = {} for key, tensor in pt_inputs_dict.items(): # skip key that does not exist in tf if isinstance(tensor, bool): tf_inputs_dict[key] = tensor elif key == "input_values": tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.float32) elif key == "pixel_values": tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.float32) elif key == "input_features": tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.float32) # other general float inputs elif tensor.is_floating_point(): tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.float32) else: tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.int32) return tf_inputs_dict def check_pt_tf_models(self, tf_model, pt_model, pt_inputs_dict): tf_inputs_dict = self.prepare_tf_inputs_from_pt_inputs(pt_inputs_dict) # send pytorch inputs to the correct device pt_inputs_dict = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs_dict.items() } # send pytorch model to the correct device pt_model.to(torch_device) # Check predictions on first output (logits/hidden-states) are close enough given low-level computational differences pt_model.eval() with torch.no_grad(): pt_outputs = pt_model(**pt_inputs_dict) tf_outputs = tf_model(tf_inputs_dict) # tf models returned loss is usually a tensor rather than a scalar. # (see `hf_compute_loss`: it uses `tf.keras.losses.Reduction.NONE`) # Change it here to a scalar to match PyTorch models' loss tf_loss = getattr(tf_outputs, "loss", None) if tf_loss is not None: tf_outputs.loss = tf.math.reduce_mean(tf_loss) self.check_pt_tf_outputs(tf_outputs, pt_outputs, type(pt_model)) @is_pt_tf_cross_test def test_pt_tf_model_equivalence(self, allow_missing_keys=False): import transformers for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() tf_model_class_name = "TF" + model_class.__name__ # Add the "TF" at the beginning if not hasattr(transformers, tf_model_class_name): # transformers does not have this model in TF version yet return # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions # Make sure no sequence has all zeros as attention mask, otherwise some tests fail due to the inconsistency # of the usage `1e-4`, `1e-9`, `1e-30`, `-inf`. # TODO: Use a uniform value for all models, make sure all tests pass without this processing, and remove it. self._make_attention_mask_non_null(inputs_dict) tf_model_class = getattr(transformers, tf_model_class_name) pt_model = model_class(config) tf_model = tf_model_class(config) pt_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs_dict_with_labels = self._prepare_for_class( inputs_dict, model_class, # Not all models accept "labels" in the forward pass (yet :) ) return_labels=True if "labels" in inspect.signature(model_class.forward).parameters.keys() else False, ) # make sure only tf inputs are forward that actually exist in function args tf_input_keys = set(inspect.signature(tf_model.call).parameters.keys()) # remove all head masks tf_input_keys.discard("head_mask") tf_input_keys.discard("cross_attn_head_mask") tf_input_keys.discard("decoder_head_mask") pt_inputs_dict = {k: v for k, v in pt_inputs_dict.items() if k in tf_input_keys} pt_inputs_dict_with_labels = {k: v for k, v in pt_inputs_dict_with_labels.items() if k in tf_input_keys} # For some models (e.g. base models), there is no label returned. # Set the input dict to `None` to avoid check outputs twice for the same input dicts. if not set(pt_inputs_dict_with_labels.keys()).symmetric_difference(pt_inputs_dict.keys()): pt_inputs_dict_with_labels = None # Check we can load pt model in tf and vice-versa with model => model functions # Here requires `tf_inputs_dict` to build `tf_model` tf_inputs_dict = self.prepare_tf_inputs_from_pt_inputs(pt_inputs_dict) tf_model = transformers.load_pytorch_model_in_tf2_model( tf_model, pt_model, tf_inputs=tf_inputs_dict, allow_missing_keys=allow_missing_keys ) pt_model = transformers.load_tf2_model_in_pytorch_model( pt_model, tf_model, allow_missing_keys=allow_missing_keys ) # Original test: check without `labels` self.check_pt_tf_models(tf_model, pt_model, pt_inputs_dict) # check with `labels` if pt_inputs_dict_with_labels: self.check_pt_tf_models(tf_model, pt_model, pt_inputs_dict_with_labels) # Check we can load pt model in tf and vice-versa with checkpoint => model functions with tempfile.TemporaryDirectory() as tmpdirname: pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin") torch.save(pt_model.state_dict(), pt_checkpoint_path) tf_model = transformers.load_pytorch_checkpoint_in_tf2_model( tf_model, pt_checkpoint_path, allow_missing_keys=allow_missing_keys ) tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5") tf_model.save_weights(tf_checkpoint_path) pt_model = transformers.load_tf2_checkpoint_in_pytorch_model( pt_model, tf_checkpoint_path, allow_missing_keys=allow_missing_keys ) # Original test: check without `labels` self.check_pt_tf_models(tf_model, pt_model, pt_inputs_dict) # check with `labels` if pt_inputs_dict_with_labels: self.check_pt_tf_models(tf_model, pt_model, pt_inputs_dict_with_labels) def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float): diff = np.abs((a - b)).max() self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).") def check_pt_flax_outputs(self, fx_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None): """ Args: model_class: The class of the model that is currently testing. For example, ..., etc. Currently unused, but it could make debugging easier and faster. names: A string, or a list of strings. These specify what fx_outputs/pt_outputs represent in the model outputs. Currently unused, but in the future, we could use this information to make the error message clearer by giving the name(s) of the output tensor(s) with large difference(s) between PT and Flax. """ self.assertEqual(type(name), str) if attributes is not None: self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`") # Allow `ModelOutput` (e.g. `CLIPOutput` has `text_model_output` and `vision_model_output`). if isinstance(fx_outputs, ModelOutput): self.assertTrue( isinstance(pt_outputs, ModelOutput), f"{name}: `pt_outputs` should an instance of `ModelOutput` when `fx_outputs` is", ) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys, f"{name}: Output keys differ between Flax and PyTorch") # convert to the case of `tuple` # appending each key to the current (string) `name` attributes = tuple([f"{name}.{k}" for k in fx_keys]) self.check_pt_flax_outputs( fx_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes ) # Allow `list` (e.g. `TransfoXLModelOutput.mems` is a list of tensors.) elif type(fx_outputs) in [tuple, list]: self.assertEqual( type(fx_outputs), type(pt_outputs), f"{name}: Output types differ between Flax and PyTorch" ) self.assertEqual( len(fx_outputs), len(pt_outputs), f"{name}: Output lengths differ between Flax and PyTorch" ) if attributes is not None: # case 1: each output has assigned name (e.g. a tuple form of a `ModelOutput`) self.assertEqual( len(attributes), len(fx_outputs), f"{name}: The tuple `attributes` should have the same length as `fx_outputs`", ) else: # case 2: each output has no assigned name (e.g. hidden states of each layer) -> add an index to `name` attributes = tuple([f"{name}_{idx}" for idx in range(len(fx_outputs))]) for fx_output, pt_output, attr in zip(fx_outputs, pt_outputs, attributes): self.check_pt_flax_outputs(fx_output, pt_output, model_class, tol=tol, name=attr) elif isinstance(fx_outputs, jnp.ndarray): self.assertTrue( isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `fx_outputs` is" ) # Using `np.asarray` gives `ValueError: assignment destination is read-only` at the line `fx_outputs[fx_nans] = 0`. fx_outputs = np.array(fx_outputs) pt_outputs = pt_outputs.detach().to("cpu").numpy() self.assertEqual( fx_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between Flax and PyTorch" ) # deal with NumPy's scalars to make replacing nan values by 0 work. if np.isscalar(fx_outputs): fx_outputs = np.array([fx_outputs]) pt_outputs = np.array([pt_outputs]) fx_nans = np.isnan(fx_outputs) pt_nans = np.isnan(pt_outputs) pt_outputs[fx_nans] = 0 fx_outputs[fx_nans] = 0 pt_outputs[pt_nans] = 0 fx_outputs[pt_nans] = 0 max_diff = np.amax(np.abs(fx_outputs - pt_outputs)) self.assertLessEqual( max_diff, tol, f"{name}: Difference between PyTorch and Flax is {max_diff} (>= {tol})." ) else: raise ValueError( "`fx_outputs` should be an instance of `ModelOutput`, a `tuple`, or an instance of `jnp.ndarray`. Got" f" {type(fx_outputs)} instead." ) @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): # no flax model exists for this class return # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions fx_model_class = getattr(transformers, fx_model_class_name) # load PyTorch class pt_model = model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False # load Flax class fx_model = fx_model_class(config, dtype=jnp.float32) # make sure only flax inputs are forward that actually exist in function args fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() # prepare inputs pt_inputs = self._prepare_for_class(inputs_dict, model_class) # remove function args that don't exist in Flax pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} # send pytorch inputs to the correct device pt_inputs = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs.items() } # convert inputs to Flax fx_inputs = {k: np.array(v.to("cpu")) for k, v in pt_inputs.items() if torch.is_tensor(v)} fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state # send pytorch model to the correct device pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = fx_model_class.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs_loaded.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs_loaded, pt_outputs, model_class) @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): # no flax model exists for this class return # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions fx_model_class = getattr(transformers, fx_model_class_name) # load PyTorch class pt_model = model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False # load Flax class fx_model = fx_model_class(config, dtype=jnp.float32) # make sure only flax inputs are forward that actually exist in function args fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() # prepare inputs pt_inputs = self._prepare_for_class(inputs_dict, model_class) # remove function args that don't exist in Flax pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} # send pytorch inputs to the correct device pt_inputs = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs.items() } # convert inputs to Flax fx_inputs = {k: np.array(v.to("cpu")) for k, v in pt_inputs.items() if torch.is_tensor(v)} pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) # make sure weights are tied in PyTorch pt_model.tie_weights() # send pytorch model to the correct device pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = model_class.from_pretrained(tmpdirname, from_flax=True) # send pytorch model to the correct device pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs_loaded.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs_loaded, model_class) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] @require_torch_multi_gpu def test_multi_gpu_data_parallel_forward(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # some params shouldn't be scattered by nn.DataParallel # so just remove them if they are present. blacklist_non_batched_params = ["head_mask", "decoder_head_mask", "cross_attn_head_mask"] for k in blacklist_non_batched_params: inputs_dict.pop(k, None) # move input tensors to cuda:O for k, v in inputs_dict.items(): if torch.is_tensor(v): inputs_dict[k] = v.to(0) for model_class in self.all_model_classes: model = model_class(config=config) model.to(0) model.eval() # Wrap model in nn.DataParallel model = nn.DataParallel(model) with torch.no_grad(): _ = model(**self._prepare_for_class(inputs_dict, model_class)) @require_torch_multi_gpu def test_model_parallelization(self): if not self.test_model_parallel: return # a candidate for testing_utils def get_current_gpu_memory_use(): """returns a list of cuda memory allocations per GPU in MBs""" per_device_memory = [] for id in range(torch.cuda.device_count()): with torch.cuda.device(id): per_device_memory.append(torch.cuda.memory_allocated() >> 20) return per_device_memory # Needs a large model to see the difference. config = self.model_tester.get_large_model_config() for model_class in self.all_parallelizable_model_classes: torch.cuda.empty_cache() # 1. single gpu memory load + unload + memory measurements # Retrieve initial memory usage (can easily be ~0.6-1.5GB if cuda-kernels have been preloaded by previous tests) memory_at_start = get_current_gpu_memory_use() # Put model on device 0 and take a memory snapshot model = model_class(config) model.to("cuda:0") memory_after_model_load = get_current_gpu_memory_use() # The memory use on device 0 should be higher than it was initially. self.assertGreater(memory_after_model_load[0], memory_at_start[0]) del model gc.collect() torch.cuda.empty_cache() # 2. MP test # it's essential to re-calibrate the usage before the next stage memory_at_start = get_current_gpu_memory_use() # Spread model layers over multiple devices model = model_class(config) model.parallelize() memory_after_parallelization = get_current_gpu_memory_use() # Assert that the memory use on all devices is higher than it was when loaded only on CPU for n in range(len(model.device_map.keys())): self.assertGreater(memory_after_parallelization[n], memory_at_start[n]) # Assert that the memory use of device 0 is lower than it was when the entire model was loaded on it self.assertLess(memory_after_parallelization[0], memory_after_model_load[0]) # Assert that the memory use of device 1 is higher than it was when the entire model was loaded # on device 0 and device 1 wasn't used at all self.assertGreater(memory_after_parallelization[1], memory_after_model_load[1]) del model gc.collect() torch.cuda.empty_cache() @require_torch_multi_gpu def test_model_parallel_equal_results(self): if not self.test_model_parallel: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_parallelizable_model_classes: inputs_dict = self._prepare_for_class(inputs_dict, model_class) def cast_to_device(dictionary, device): output = {} for k, v in dictionary.items(): if isinstance(v, torch.Tensor): output[k] = v.to(device) else: output[k] = v return output model = model_class(config) output = model(**cast_to_device(inputs_dict, "cpu")) model.parallelize() parallel_output = model(**cast_to_device(inputs_dict, "cuda:0")) for value, parallel_value in zip(output, parallel_output): if isinstance(value, torch.Tensor): self.assertTrue(torch.allclose(value, parallel_value.to("cpu"), atol=1e-7)) elif isinstance(value, (Tuple, List)): for value_, parallel_value_ in zip(value, parallel_value): self.assertTrue(torch.allclose(value_, parallel_value_.to("cpu"), atol=1e-7)) def check_device_map_is_respected(self, model, device_map): for param_name, param in model.named_parameters(): # Find device in device_map while len(param_name) > 0 and param_name not in device_map: param_name = ".".join(param_name.split(".")[:-1]) if param_name not in device_map: raise ValueError("device map is incomplete, it does not contain any device for `param_name`.") param_device = device_map[param_name] if param_device in ["cpu", "disk"]: self.assertEqual(param.device, torch.device("meta")) else: self.assertEqual(param.device, torch.device(param_device)) @require_accelerate @mark.accelerate_tests @require_torch_gpu def test_disk_offload_bin(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class._no_split_modules is None: continue inputs_dict_class = self._prepare_for_class(inputs_dict, model_class) model = model_class(config).eval() model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict_class) model_size = compute_module_sizes(model)[""] with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir, safe_serialization=False) with self.assertRaises(ValueError): max_size = int(self.model_split_percents[0] * model_size) max_memory = {0: max_size, "cpu": max_size} # This errors out cause it's missing an offload folder new_model = model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) max_size = int(self.model_split_percents[1] * model_size) max_memory = {0: max_size, "cpu": max_size} new_model = model_class.from_pretrained( tmp_dir, device_map="auto", max_memory=max_memory, offload_folder=tmp_dir ) self.check_device_map_is_respected(new_model, new_model.hf_device_map) torch.manual_seed(0) new_output = new_model(**inputs_dict_class) self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) @require_accelerate @mark.accelerate_tests @require_torch_gpu def test_disk_offload_safetensors(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class._no_split_modules is None: continue inputs_dict_class = self._prepare_for_class(inputs_dict, model_class) model = model_class(config).eval() model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict_class) model_size = compute_module_sizes(model)[""] with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir) max_size = int(self.model_split_percents[1] * model_size) max_memory = {0: max_size, "cpu": max_size} # This doesn't error out as it's in safetensors and doesn't need an offload folder new_model = model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) self.check_device_map_is_respected(new_model, new_model.hf_device_map) torch.manual_seed(0) new_output = new_model(**inputs_dict_class) self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) @require_accelerate @mark.accelerate_tests @require_torch_gpu def test_cpu_offload(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class._no_split_modules is None: continue inputs_dict_class = self._prepare_for_class(inputs_dict, model_class) model = model_class(config).eval() model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict_class) model_size = compute_module_sizes(model)[""] # We test several splits of sizes to make sure it works. max_gpu_sizes = [int(p * model_size) for p in self.model_split_percents[1:]] with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir) for max_size in max_gpu_sizes: max_memory = {0: max_size, "cpu": model_size * 2} new_model = model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) # Making sure part of the model will actually end up offloaded self.assertSetEqual(set(new_model.hf_device_map.values()), {0, "cpu"}) self.check_device_map_is_respected(new_model, new_model.hf_device_map) torch.manual_seed(0) new_output = new_model(**inputs_dict_class) self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) @require_accelerate @mark.accelerate_tests @require_torch_multi_gpu def test_model_parallelism(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class._no_split_modules is None: continue inputs_dict_class = self._prepare_for_class(inputs_dict, model_class) model = model_class(config).eval() model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict_class) model_size = compute_module_sizes(model)[""] # We test several splits of sizes to make sure it works. max_gpu_sizes = [int(p * model_size) for p in self.model_split_percents[1:]] with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir) for max_size in max_gpu_sizes: max_memory = {0: max_size, 1: model_size * 2, "cpu": model_size * 2} new_model = model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) # Making sure part of the model will actually end up offloaded self.assertSetEqual(set(new_model.hf_device_map.values()), {0, 1}) self.check_device_map_is_respected(new_model, new_model.hf_device_map) torch.manual_seed(0) new_output = new_model(**inputs_dict_class) self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) def test_problem_types(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() problem_types = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if model_class.__name__ not in [ *get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES), ]: continue for problem_type in problem_types: with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}"): config.problem_type = problem_type["title"] config.num_labels = problem_type["num_labels"] model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) if problem_type["num_labels"] > 1: inputs["labels"] = inputs["labels"].unsqueeze(1).repeat(1, problem_type["num_labels"]) inputs["labels"] = inputs["labels"].to(problem_type["dtype"]) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=True) as warning_list: loss = model(**inputs).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message): raise ValueError( f"Something is going wrong in the regression problem: intercepted {w.message}" ) loss.backward() def test_load_with_mismatched_shapes(self): if not self.test_mismatched_shapes: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class.__name__ not in get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES): continue with self.subTest(msg=f"Testing {model_class}"): with tempfile.TemporaryDirectory() as tmp_dir: model = model_class(config) model.save_pretrained(tmp_dir) # Fails when we don't set ignore_mismatched_sizes=True with self.assertRaises(RuntimeError): new_model = AutoModelForSequenceClassification.from_pretrained(tmp_dir, num_labels=42) with self.assertRaises(RuntimeError): new_model_without_prefix = AutoModel.from_pretrained(tmp_dir, vocab_size=10) logger = logging.get_logger("transformers.modeling_utils") with CaptureLogger(logger) as cl: new_model = AutoModelForSequenceClassification.from_pretrained( tmp_dir, num_labels=42, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) new_model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) logits = new_model(**inputs).logits self.assertEqual(logits.shape[1], 42) with CaptureLogger(logger) as cl: new_model_without_prefix = AutoModel.from_pretrained( tmp_dir, vocab_size=10, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) input_ids = ids_tensor((2, 8), 10) new_model_without_prefix.to(torch_device) if self.is_encoder_decoder: new_model_without_prefix(input_ids, decoder_input_ids=input_ids) else: new_model_without_prefix(input_ids) def test_mismatched_shapes_have_properly_initialized_weights(self): if not self.test_mismatched_shapes: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: if model_class.__name__ not in get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES): continue with self.subTest(msg=f"Testing {model_class}"): with tempfile.TemporaryDirectory() as tmp_dir: model = model_class(configs_no_init) model.save_pretrained(tmp_dir) # Fails when we don't set ignore_mismatched_sizes=True with self.assertRaises(RuntimeError): new_model = AutoModelForSequenceClassification.from_pretrained(tmp_dir, num_labels=42) logger = logging.get_logger("transformers.modeling_utils") with CaptureLogger(logger) as cl: new_model = AutoModelForSequenceClassification.from_pretrained( tmp_dir, num_labels=42, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) for name, param in new_model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_matched_shapes_have_loaded_weights_when_some_mismatched_shapes_exist(self): # 1. Create a dummy class. Should have buffers as well? To make sure we test __init__ class MyClass(PreTrainedModel): config_class = PretrainedConfig def __init__(self, config=None): super().__init__(config if config is not None else PretrainedConfig()) self.linear = nn.Linear(10, config.num_labels, bias=True) self.embedding = nn.Embedding(10, 10) self.std = 1 def _init_weights(self, module): if isinstance(module, nn.Linear): module.weight.data = nn.init.kaiming_uniform_(module.weight.data, np.sqrt(5)) if module.bias is not None: module.bias.data = module.bias.data.normal_(mean=0.0, std=self.std) # Used to make sure the weights with matched shape are loaded correctly config = PretrainedConfig() config.num_labels = 3 model = MyClass(config=config) # Used to make sure the weights with mismatched shape are properly initialized set_seed(0) config = PretrainedConfig() config.num_labels = 4 # not to init. the weights during the creation: to match the logic in `from_pretrained`, so we can keep the # same sequence of random ops in the execution path to allow us to compare `target_model` and `new_model` below # for `linear` part. with ContextManagers([no_init_weights(True)]): target_model = MyClass(config=config) target_model.apply(target_model._initialize_weights) with tempfile.TemporaryDirectory() as tmpdirname: state_dict = model.state_dict() del state_dict["linear.weight"] model.config.save_pretrained(tmpdirname) torch.save(state_dict, os.path.join(tmpdirname, "pytorch_model.bin")) set_seed(0) new_model = MyClass.from_pretrained(tmpdirname, num_labels=4, ignore_mismatched_sizes=True) for key in new_model.state_dict().keys(): # check weight values for weights with matched shapes are identical # (i.e. correctly loaded from the checkpoint) if key not in ["linear.weight", "linear.bias"]: max_diff = torch.max(torch.abs(model.state_dict()[key] - new_model.state_dict()[key])) self.assertLessEqual( max_diff.item(), 1e-6, msg=f"the weight values for `{key}` in `new_model` and `model` are not identical", ) else: # check we have some mismatched shapes self.assertNotEqual( model.state_dict()[key].shape, new_model.state_dict()[key].shape, msg=f"the weight shapes for {key} in `model` and `new_model` should differ", ) # check the weights with mismatched shape are properly initialized max_diff = torch.max(torch.abs(new_model.state_dict()[key] - target_model.state_dict()[key])) self.assertLessEqual( max_diff.item(), 1e-6, msg=f"the weight values for `{key}` in `new_model` and `target_model` are not identical", ) def test_model_is_small(self): # Just a consistency check to make sure we are not running tests on 80M parameter models. config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) num_params = model.num_parameters() assert ( num_params < 1000000 ), f"{model_class} is too big for the common tests ({num_params})! It should have 1M max." @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow def test_flash_attn_2_conversion(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation="flash_attention_2" ).to(torch_device) for _, module in model.named_modules(): if "FlashAttention" in module.__class__.__name__: return self.assertTrue(False, "FlashAttention2 modules not found in model") @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow def test_flash_attn_2_inference(self): for model_class in self.all_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) model_fa.to(torch_device) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) model.to(torch_device) dummy_input = inputs_dict[model.main_input_name][:1] if dummy_input.dtype in [torch.float32, torch.float16]: dummy_input = dummy_input.to(torch.bfloat16) dummy_attention_mask = inputs_dict.get("attention_mask", None) if dummy_attention_mask is not None: dummy_attention_mask = dummy_attention_mask[:1] dummy_attention_mask[:, 1:] = 1 dummy_attention_mask[:, :1] = 0 if model.config.is_encoder_decoder: decoder_input_ids = inputs_dict.get("decoder_input_ids", dummy_input)[:1] outputs = model(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) outputs_fa = model_fa(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) else: outputs = model(dummy_input, output_hidden_states=True) outputs_fa = model_fa(dummy_input, output_hidden_states=True) logits = ( outputs.hidden_states[-1] if not model.config.is_encoder_decoder else outputs.decoder_hidden_states[-1] ) logits_fa = ( outputs_fa.hidden_states[-1] if not model.config.is_encoder_decoder else outputs_fa.decoder_hidden_states[-1] ) assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2) if model.config.is_encoder_decoder: other_inputs = { "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": dummy_attention_mask, "output_hidden_states": True, } if dummy_attention_mask is not None: other_inputs["attention_mask"] = dummy_attention_mask outputs = model(dummy_input, **other_inputs) outputs_fa = model_fa(dummy_input, **other_inputs) else: other_inputs = { "output_hidden_states": True, } if dummy_attention_mask is not None: other_inputs["attention_mask"] = dummy_attention_mask outputs = model(dummy_input, **other_inputs) outputs_fa = model_fa(dummy_input, **other_inputs) logits = ( outputs.hidden_states[-1] if not model.config.is_encoder_decoder else outputs.decoder_hidden_states[-1] ) logits_fa = ( outputs_fa.hidden_states[-1] if not model.config.is_encoder_decoder else outputs_fa.decoder_hidden_states[-1] ) assert torch.allclose(logits_fa[1:], logits[1:], atol=4e-2, rtol=4e-2) # check with inference + dropout model.train() _ = model_fa(dummy_input, **other_inputs) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow def test_flash_attn_2_inference_padding_right(self): for model_class in self.all_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) model_fa.to(torch_device) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) model.to(torch_device) dummy_input = inputs_dict[model.main_input_name][:1] if dummy_input.dtype in [torch.float32, torch.float16]: dummy_input = dummy_input.to(torch.bfloat16) dummy_attention_mask = inputs_dict.get("attention_mask", None) if dummy_attention_mask is not None: dummy_attention_mask = dummy_attention_mask[:1] dummy_attention_mask[:, :-1] = 1 dummy_attention_mask[:, -1:] = 0 if model.config.is_encoder_decoder: decoder_input_ids = inputs_dict.get("decoder_input_ids", dummy_input)[:1] outputs = model(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) outputs_fa = model_fa(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) else: outputs = model(dummy_input, output_hidden_states=True) outputs_fa = model_fa(dummy_input, output_hidden_states=True) logits = ( outputs.hidden_states[-1] if not model.config.is_encoder_decoder else outputs.decoder_hidden_states[-1] ) logits_fa = ( outputs_fa.hidden_states[-1] if not model.config.is_encoder_decoder else outputs_fa.decoder_hidden_states[-1] ) assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2) if model.config.is_encoder_decoder: other_inputs = { "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": dummy_attention_mask, "output_hidden_states": True, } if dummy_attention_mask is not None: other_inputs["attention_mask"] = dummy_attention_mask outputs = model(dummy_input, **other_inputs) outputs_fa = model_fa(dummy_input, **other_inputs) else: other_inputs = { "output_hidden_states": True, } if dummy_attention_mask is not None: other_inputs["attention_mask"] = dummy_attention_mask outputs = model(dummy_input, **other_inputs) outputs_fa = model_fa(dummy_input, **other_inputs) logits = ( outputs.hidden_states[-1] if not model.config.is_encoder_decoder else outputs.decoder_hidden_states[-1] ) logits_fa = ( outputs_fa.hidden_states[-1] if not model.config.is_encoder_decoder else outputs_fa.decoder_hidden_states[-1] ) assert torch.allclose(logits_fa[:-1], logits[:-1], atol=4e-2, rtol=4e-2) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow def test_flash_attn_2_generate_left_padding(self): for model_class in self.all_generative_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.float16, low_cpu_mem_usage=True).to( torch_device ) dummy_input = inputs_dict[model.main_input_name] if dummy_input.dtype in [torch.float32, torch.bfloat16]: dummy_input = dummy_input.to(torch.float16) dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) # make sure we do left padding dummy_attention_mask[:, :-1] = 0 dummy_attention_mask[:, -1:] = 1 out = model.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=1, do_sample=False ) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation="flash_attention_2", low_cpu_mem_usage=True, ).to(torch_device) out_fa = model.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=1, do_sample=False ) self.assertTrue(torch.allclose(out, out_fa)) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow def test_flash_attn_2_generate_padding_right(self): for model_class in self.all_generative_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.float16, low_cpu_mem_usage=True).to( torch_device ) dummy_input = inputs_dict[model.main_input_name] if dummy_input.dtype in [torch.float32, torch.bfloat16]: dummy_input = dummy_input.to(torch.float16) dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) # make sure we do right padding dummy_attention_mask[:, :-1] = 1 dummy_attention_mask[:, -1:] = 0 out = model.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=1, do_sample=False ) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation="flash_attention_2", low_cpu_mem_usage=True, ).to(torch_device) out_fa = model.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=1, do_sample=False ) self.assertTrue(torch.allclose(out, out_fa)) @parameterized.expand([("float16",), ("bfloat16",), ("float32",)]) @require_torch_sdpa @slow def test_eager_matches_sdpa_inference(self, torch_dtype: str): if not self.all_model_classes[0]._supports_sdpa: self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA") if torch_device == "cpu" and torch_dtype == "float16": self.skipTest("float16 not supported on cpu") # Not sure whether it's fine to put torch.XXX in a decorator if torch is not available so hacking it here instead. if torch_dtype == "float16": torch_dtype = torch.float16 elif torch_dtype == "bfloat16": torch_dtype = torch.bfloat16 elif torch_dtype == "float32": torch_dtype = torch.float32 atols = { ("cpu", False, torch.float32): 1e-6, ("cpu", False, torch.bfloat16): 1e-2, ("cpu", True, torch.float32): 1e-6, ("cpu", True, torch.bfloat16): 1e-2, ("cuda", False, torch.float32): 1e-6, ("cuda", False, torch.bfloat16): 1e-2, ("cuda", False, torch.float16): 1e-3, ("cuda", True, torch.float32): 1e-6, ("cuda", True, torch.bfloat16): 1e-2, ("cuda", True, torch.float16): 5e-3, } rtols = { ("cpu", False, torch.float32): 1e-4, ("cpu", False, torch.bfloat16): 1e-2, ("cpu", True, torch.float32): 1e-4, ("cpu", True, torch.bfloat16): 1e-2, ("cuda", False, torch.float32): 1e-4, ("cuda", False, torch.bfloat16): 1e-2, ("cuda", False, torch.float16): 1e-3, ("cuda", True, torch.float32): 1e-4, ("cuda", True, torch.bfloat16): 3e-2, ("cuda", True, torch.float16): 5e-3, } def get_mean_reldiff(failcase, x, ref, atol, rtol): return f"{failcase}: mean relative difference: {((x - ref).abs() / (ref.abs() + 1e-12)).mean():.3e}, torch atol = {atol}, torch rtol = {rtol}" for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) is_encoder_decoder = model.config.is_encoder_decoder with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_sdpa = model_class.from_pretrained(tmpdirname, torch_dtype=torch_dtype) model_sdpa = model_sdpa.eval().to(torch_device) self.assertTrue(model_sdpa.config._attn_implementation == "sdpa") model_eager = model_class.from_pretrained( tmpdirname, torch_dtype=torch_dtype, attn_implementation="eager", ) model_eager = model_eager.eval().to(torch_device) self.assertTrue(model_eager.config._attn_implementation == "eager") for name, submodule in model_eager.named_modules(): if "SdpaAttention" in submodule.__class__.__name__: raise ValueError("The eager model should not have SDPA attention layers") has_sdpa = False for name, submodule in model_sdpa.named_modules(): if "SdpaAttention" in submodule.__class__.__name__: has_sdpa = True break if not has_sdpa and model_sdpa.config.model_type != "falcon": raise ValueError("The SDPA model should have SDPA attention layers") # We use these for loops instead of parameterized.expand just for the interest of avoiding loading/saving 8 times the model, # but it would be nicer to have an efficient way to use parameterized.expand fail_cases = [] for padding_side in ["left", "right"]: for use_mask in [False, True]: for batch_size in [1, 5]: dummy_input = inputs_dict[model.main_input_name] if dummy_input.dtype in [torch.float32, torch.bfloat16, torch.float16]: dummy_input = dummy_input.to(torch_dtype) dummy_input = dummy_input[:batch_size] if dummy_input.shape[0] != batch_size: if dummy_input.dtype in [torch.float32, torch.bfloat16, torch.float16]: extension = torch.rand( batch_size - dummy_input.shape[0], *dummy_input.shape[1:], dtype=torch_dtype, device=torch_device, ) dummy_input = torch.cat((dummy_input, extension), dim=0).to(torch_device) else: extension = torch.randint( high=5, size=(batch_size - dummy_input.shape[0], *dummy_input.shape[1:]), dtype=dummy_input.dtype, device=torch_device, ) dummy_input = torch.cat((dummy_input, extension), dim=0).to(torch_device) if not use_mask: dummy_attention_mask = None else: dummy_attention_mask = inputs_dict.get("attention_mask", None) if dummy_attention_mask is None: if is_encoder_decoder: seqlen = inputs_dict.get("decoder_input_ids", dummy_input).shape[-1] else: seqlen = dummy_input.shape[-1] dummy_attention_mask = ( torch.ones(batch_size, seqlen).to(torch.int64).to(torch_device) ) dummy_attention_mask = dummy_attention_mask[:batch_size] if dummy_attention_mask.shape[0] != batch_size: extension = torch.ones( batch_size - dummy_attention_mask.shape[0], *dummy_attention_mask.shape[1:], dtype=dummy_attention_mask.dtype, device=torch_device, ) dummy_attention_mask = torch.cat((dummy_attention_mask, extension), dim=0) dummy_attention_mask = dummy_attention_mask.to(torch_device) dummy_attention_mask[:] = 1 if padding_side == "left": dummy_attention_mask[-1, :-1] = 1 dummy_attention_mask[-1, -4:] = 0 elif padding_side == "right": dummy_attention_mask[-1, 1:] = 1 dummy_attention_mask[-1, :3] = 0 for enable_kernels in [False, True]: failcase = f"padding_side={padding_side}, use_mask={use_mask}, batch_size={batch_size}, enable_kernels={enable_kernels}" if is_encoder_decoder: decoder_input_ids = inputs_dict.get("decoder_input_ids", dummy_input)[:batch_size] if decoder_input_ids.shape[0] != batch_size: extension = torch.ones( batch_size - decoder_input_ids.shape[0], *decoder_input_ids.shape[1:], dtype=decoder_input_ids.dtype, device=torch_device, ) decoder_input_ids = torch.cat((decoder_input_ids, extension), dim=0) decoder_input_ids = decoder_input_ids.to(torch_device) # TODO: never an `attention_mask` arg here? other_inputs = { "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": dummy_attention_mask, "output_hidden_states": True, } else: other_inputs = { "output_hidden_states": True, } # Otherwise fails for e.g. WhisperEncoderModel if "attention_mask" in inspect.signature(model_eager.forward).parameters: other_inputs["attention_mask"] = dummy_attention_mask # TODO: test gradients as well (& for FA2 as well!) with torch.no_grad(): with torch.backends.cuda.sdp_kernel( enable_flash=enable_kernels, enable_math=True, enable_mem_efficient=enable_kernels, ): outputs_eager = model_eager(dummy_input, **other_inputs) outputs_sdpa = model_sdpa(dummy_input, **other_inputs) logits_eager = ( outputs_eager.hidden_states[-1] if not is_encoder_decoder else outputs_eager.decoder_hidden_states[-1] ) logits_sdpa = ( outputs_sdpa.hidden_states[-1] if not is_encoder_decoder else outputs_sdpa.decoder_hidden_states[-1] ) if torch_device in ["cpu", "cuda"]: atol = atols[torch_device, enable_kernels, torch_dtype] rtol = rtols[torch_device, enable_kernels, torch_dtype] else: atol = 1e-7 rtol = 1e-4 # Masked tokens output slightly deviates - we don't mind that. if use_mask: if padding_side == "left": sub_sdpa = logits_sdpa[:-1] sub_eager = logits_eager[:-1] if not torch.allclose(sub_sdpa, sub_eager, atol=atol, rtol=rtol): fail_cases.append( get_mean_reldiff(failcase, sub_sdpa, sub_eager, atol, rtol) ) sub_sdpa = logits_sdpa[-1, :-4] sub_eager = logits_eager[-1, :-4] if not torch.allclose(sub_sdpa, sub_eager, atol=atol, rtol=rtol): fail_cases.append( get_mean_reldiff(failcase, sub_sdpa, sub_eager, atol, rtol) ) # Testing the padding tokens is not really meaningful but anyway # sub_sdpa = logits_sdpa[-1, -4:] # sub_eager = logits_eager[-1, -4:] # if not torch.allclose(sub_sdpa, sub_eager, atol=atol, rtol=rtol): # fail_cases.append(get_mean_reldiff(failcase, sub_sdpa, sub_eager, 4e-2, 4e-2)) elif padding_side == "right": sub_sdpa = logits_sdpa[:-1] sub_eager = logits_eager[:-1] if not torch.allclose(sub_sdpa, sub_eager, atol=atol, rtol=rtol): fail_cases.append( get_mean_reldiff(failcase, sub_sdpa, sub_eager, atol, rtol) ) sub_sdpa = logits_sdpa[-1, 3:] sub_eager = logits_eager[-1, 3:] if not torch.allclose(sub_sdpa, sub_eager, atol=atol, rtol=rtol): fail_cases.append( get_mean_reldiff(failcase, sub_sdpa, sub_eager, atol, rtol) ) # Testing the padding tokens is not really meaningful but anyway # sub_sdpa = logits_sdpa[-1, :3] # sub_eager = logits_eager[-1, :3] # if not torch.allclose(sub_sdpa, sub_eager, atol=atol, rtol=rtol): # fail_cases.append(get_mean_reldiff(failcase, sub_sdpa, sub_eager, 4e-2, 4e-2)) else: if not torch.allclose(logits_sdpa, logits_eager, atol=atol, rtol=rtol): fail_cases.append( get_mean_reldiff(failcase, logits_sdpa, logits_eager, atol, rtol) ) self.assertTrue(len(fail_cases) == 0, "\n".join(fail_cases)) @require_torch_sdpa @slow def test_eager_matches_sdpa_generate(self): max_new_tokens = 30 if len(self.all_generative_model_classes) == 0: self.skipTest(f"{self.__class__.__name__} tests a model that does support generate: skipping this test") for model_class in self.all_generative_model_classes: if not model_class._supports_sdpa: self.skipTest(f"{model_class.__name__} does not support SDPA") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() dummy_input = inputs_dict[model_class.main_input_name] if dummy_input.dtype in [torch.float32, torch.bfloat16]: dummy_input = dummy_input.to(torch.float16) # make sure that all models have enough positions for generation if hasattr(config, "max_position_embeddings"): config.max_position_embeddings = max_new_tokens + dummy_input.shape[1] + 1 model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) model_sdpa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, low_cpu_mem_usage=True, ).to(torch_device) self.assertTrue(model_sdpa.config._attn_implementation == "sdpa") model_eager = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, low_cpu_mem_usage=True, attn_implementation="eager", ).to(torch_device) self.assertTrue(model_eager.config._attn_implementation == "eager") for name, submodule in model_eager.named_modules(): if "SdpaAttention" in submodule.__class__.__name__: raise ValueError("The eager model should not have SDPA attention layers") has_sdpa = False for name, submodule in model_sdpa.named_modules(): if "SdpaAttention" in submodule.__class__.__name__: has_sdpa = True break if not has_sdpa: raise ValueError("The SDPA model should have SDPA attention layers") # Just test that a large cache works as expected res_eager = model_eager.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=max_new_tokens, do_sample=False ) res_sdpa = model_sdpa.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=max_new_tokens, do_sample=False ) self.assertTrue(torch.allclose(res_eager, res_sdpa)) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow def test_flash_attn_2_generate_use_cache(self): max_new_tokens = 30 for model_class in self.all_generative_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() dummy_input = inputs_dict[model_class.main_input_name] if dummy_input.dtype in [torch.float32, torch.bfloat16]: dummy_input = dummy_input.to(torch.float16) # make sure that all models have enough positions for generation if hasattr(config, "max_position_embeddings"): config.max_position_embeddings = max_new_tokens + dummy_input.shape[1] + 1 model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation="flash_attention_2", low_cpu_mem_usage=True, ).to(torch_device) # Just test that a large cache works as expected _ = model.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=max_new_tokens, do_sample=False, use_cache=True, ) @require_flash_attn @require_torch_gpu @require_bitsandbytes @mark.flash_attn_test @slow def test_flash_attn_2_fp32_ln(self): for model_class in self.all_generative_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) dummy_input = inputs_dict[model.main_input_name] dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) if model.config.is_encoder_decoder: dummy_decoder_input_ids = inputs_dict["decoder_input_ids"] dummy_decoder_attention_mask = inputs_dict["decoder_attention_mask"] model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation="flash_attention_2", low_cpu_mem_usage=True, load_in_4bit=True, ) for _, param in model.named_parameters(): # upcast only layer norms if (param.dtype == torch.float16) or (param.dtype == torch.bfloat16): param.data = param.data.to(torch.float32) if model.config.is_encoder_decoder: _ = model(dummy_input, decoder_input_ids=dummy_decoder_input_ids) # with attention mask _ = model( dummy_input, attention_mask=dummy_attention_mask, decoder_input_ids=dummy_decoder_input_ids, decoder_attention_mask=dummy_decoder_attention_mask, ) else: _ = model(dummy_input) # with attention mask _ = model(dummy_input, attention_mask=dummy_attention_mask) @is_pt_tf_cross_test def test_tf_from_pt_safetensors(self): for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() tf_model_class_name = "TF" + model_class.__name__ # Add the "TF" at the beginning if not hasattr(transformers, tf_model_class_name): # transformers does not have this model in TF version yet return tf_model_class = getattr(transformers, tf_model_class_name) pt_model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname, safe_serialization=True) tf_model_1 = tf_model_class.from_pretrained(tmpdirname, from_pt=True) pt_model.save_pretrained(tmpdirname, safe_serialization=False) tf_model_2 = tf_model_class.from_pretrained(tmpdirname, from_pt=True) # Check models are equal for p1, p2 in zip(tf_model_1.weights, tf_model_2.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) @is_pt_flax_cross_test def test_flax_from_pt_safetensors(self): for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() flax_model_class_name = "Flax" + model_class.__name__ # Add the "Flax at the beginning if not hasattr(transformers, flax_model_class_name): # transformers does not have this model in Flax version yet return flax_model_class = getattr(transformers, flax_model_class_name) pt_model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname, safe_serialization=True) flax_model_1 = flax_model_class.from_pretrained(tmpdirname, from_pt=True) pt_model.save_pretrained(tmpdirname, safe_serialization=False) flax_model_2 = flax_model_class.from_pretrained(tmpdirname, from_pt=True) # Check models are equal self.assertTrue(check_models_equal(flax_model_1, flax_model_2)) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow def test_flash_attn_2_from_config(self): for model_class in self.all_generative_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, _ = self.model_tester.prepare_config_and_inputs_for_common() # TODO: to change it in the future with other relevant auto classes fa2_model = AutoModelForCausalLM.from_config( config, attn_implementation="flash_attention_2", torch_dtype=torch.bfloat16 ).to(torch_device) dummy_input = torch.LongTensor([[0, 2, 3, 4], [0, 2, 3, 4]]).to(torch_device) dummy_attention_mask = torch.LongTensor([[1, 1, 1, 1], [0, 1, 1, 1]]).to(torch_device) fa2_correctly_converted = False for _, module in fa2_model.named_modules(): if "FlashAttention" in module.__class__.__name__: fa2_correctly_converted = True break self.assertTrue(fa2_correctly_converted) _ = fa2_model(input_ids=dummy_input, attention_mask=dummy_attention_mask) with tempfile.TemporaryDirectory() as tmpdirname: fa2_model.save_pretrained(tmpdirname) model_from_pretrained = AutoModelForCausalLM.from_pretrained(tmpdirname) self.assertTrue(model_from_pretrained.config._attn_implementation != "flash_attention_2") fa2_correctly_converted = False for _, module in model_from_pretrained.named_modules(): if "FlashAttention" in module.__class__.__name__: fa2_correctly_converted = True break self.assertFalse(fa2_correctly_converted) global_rng = random.Random() def ids_tensor(shape, vocab_size, rng=None, name=None): # Creates a random int32 tensor of the shape within the vocab size if rng is None: rng = global_rng total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) return torch.tensor(data=values, dtype=torch.long, device=torch_device).view(shape).contiguous() def random_attention_mask(shape, rng=None, name=None): attn_mask = ids_tensor(shape, vocab_size=2, rng=None, name=None) # make sure that at least one token is attended to for each batch # we choose the 1st token so this property of `at least one being non-zero` still holds after applying causal mask attn_mask[:, 0] = 1 return attn_mask def floats_tensor(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = global_rng total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.random() * scale) return torch.tensor(data=values, dtype=torch.float, device=torch_device).view(shape).contiguous()
0
hf_public_repos/transformers
hf_public_repos/transformers/tests/test_feature_extraction_common.py
# coding=utf-8 # Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class FeatureExtractionSavingTestMixin: test_cast_dtype = None def test_feat_extract_to_json_string(self): feat_extract = self.feature_extraction_class(**self.feat_extract_dict) obj = json.loads(feat_extract.to_json_string()) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key], value) def test_feat_extract_to_json_file(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "feat_extract.json") feat_extract_first.to_json_file(json_file_path) feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path) self.assertEqual(feat_extract_second.to_dict(), feat_extract_first.to_dict()) def test_feat_extract_from_and_save_pretrained(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: saved_file = feat_extract_first.save_pretrained(tmpdirname)[0] check_json_file_has_correct_format(saved_file) feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname) self.assertEqual(feat_extract_second.to_dict(), feat_extract_first.to_dict()) def test_init_without_params(self): feat_extract = self.feature_extraction_class() self.assertIsNotNone(feat_extract)
0
hf_public_repos/transformers
hf_public_repos/transformers/tests/test_image_transforms.py
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from parameterized import parameterized from transformers.testing_utils import require_flax, require_tf, require_torch, require_vision from transformers.utils.import_utils import is_flax_available, is_tf_available, is_torch_available, is_vision_available if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf if is_flax_available(): import jax if is_vision_available(): import PIL.Image from transformers.image_transforms import ( center_crop, center_to_corners_format, convert_to_rgb, corners_to_center_format, flip_channel_order, get_resize_output_image_size, id_to_rgb, normalize, pad, resize, rgb_to_id, to_channel_dimension_format, to_pil_image, ) def get_random_image(height, width, num_channels=3, channels_first=True): shape = (num_channels, height, width) if channels_first else (height, width, num_channels) random_array = np.random.randint(0, 256, shape, dtype=np.uint8) return random_array @require_vision class ImageTransformsTester(unittest.TestCase): @parameterized.expand( [ ("numpy_float_channels_first", (3, 4, 5), np.float32), ("numpy_float_channels_last", (4, 5, 3), np.float32), ("numpy_float_channels_first", (3, 4, 5), np.float64), ("numpy_float_channels_last", (4, 5, 3), np.float64), ("numpy_int_channels_first", (3, 4, 5), np.int32), ("numpy_uint_channels_first", (3, 4, 5), np.uint8), ] ) @require_vision def test_to_pil_image(self, name, image_shape, dtype): image = np.random.randint(0, 256, image_shape).astype(dtype) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) # make sure image is correctly rescaled self.assertTrue(np.abs(np.asarray(pil_image)).sum() > 0) @parameterized.expand( [ ("numpy_float_channels_first", (3, 4, 5), np.float32), ("numpy_float_channels_first", (3, 4, 5), np.float64), ("numpy_float_channels_last", (4, 5, 3), np.float32), ("numpy_float_channels_last", (4, 5, 3), np.float64), ] ) @require_vision def test_to_pil_image_from_float(self, name, image_shape, dtype): image = np.random.rand(*image_shape).astype(dtype) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) # make sure image is correctly rescaled self.assertTrue(np.abs(np.asarray(pil_image)).sum() > 0) # Make sure that an exception is raised if image is not in [0, 1] image = np.random.randn(*image_shape).astype(dtype) with self.assertRaises(ValueError): to_pil_image(image) @require_vision def test_to_pil_image_from_mask(self): # Make sure binary mask remains a binary mask image = np.random.randint(0, 2, (3, 4, 5)).astype(np.uint8) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) np_img = np.asarray(pil_image) self.assertTrue(np_img.min() == 0) self.assertTrue(np_img.max() == 1) image = np.random.randint(0, 2, (3, 4, 5)).astype(np.float32) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) np_img = np.asarray(pil_image) self.assertTrue(np_img.min() == 0) self.assertTrue(np_img.max() == 1) @require_tf def test_to_pil_image_from_tensorflow(self): # channels_first image = tf.random.uniform((3, 4, 5)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) # channels_last image = tf.random.uniform((4, 5, 3)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) @require_torch def test_to_pil_image_from_torch(self): # channels first image = torch.rand((3, 4, 5)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) # channels last image = torch.rand((4, 5, 3)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) @require_flax def test_to_pil_image_from_jax(self): key = jax.random.PRNGKey(0) # channel first image = jax.random.uniform(key, (3, 4, 5)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) # channel last image = jax.random.uniform(key, (4, 5, 3)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) def test_to_channel_dimension_format(self): # Test that function doesn't reorder if channel dim matches the input. image = np.random.rand(3, 4, 5) image = to_channel_dimension_format(image, "channels_first") self.assertEqual(image.shape, (3, 4, 5)) image = np.random.rand(4, 5, 3) image = to_channel_dimension_format(image, "channels_last") self.assertEqual(image.shape, (4, 5, 3)) # Test that function reorders if channel dim doesn't match the input. image = np.random.rand(3, 4, 5) image = to_channel_dimension_format(image, "channels_last") self.assertEqual(image.shape, (4, 5, 3)) image = np.random.rand(4, 5, 3) image = to_channel_dimension_format(image, "channels_first") self.assertEqual(image.shape, (3, 4, 5)) # Can pass in input_data_format and works if data format is ambiguous or unknown. image = np.random.rand(4, 5, 6) image = to_channel_dimension_format(image, "channels_first", input_channel_dim="channels_last") self.assertEqual(image.shape, (6, 4, 5)) def test_get_resize_output_image_size(self): image = np.random.randint(0, 256, (3, 224, 224)) # Test the output size defaults to (x, x) if an int is given. self.assertEqual(get_resize_output_image_size(image, 10), (10, 10)) self.assertEqual(get_resize_output_image_size(image, [10]), (10, 10)) self.assertEqual(get_resize_output_image_size(image, (10,)), (10, 10)) # Test the output size is the same as the input if a two element tuple/list is given. self.assertEqual(get_resize_output_image_size(image, (10, 20)), (10, 20)) self.assertEqual(get_resize_output_image_size(image, [10, 20]), (10, 20)) self.assertEqual(get_resize_output_image_size(image, (10, 20), default_to_square=True), (10, 20)) # To match pytorch behaviour, max_size is only relevant if size is an int self.assertEqual(get_resize_output_image_size(image, (10, 20), max_size=5), (10, 20)) # Test output size = (int(size * height / width), size) if size is an int and height > width image = np.random.randint(0, 256, (3, 50, 40)) self.assertEqual(get_resize_output_image_size(image, 20, default_to_square=False), (25, 20)) # Test output size = (size, int(size * width / height)) if size is an int and width <= height image = np.random.randint(0, 256, (3, 40, 50)) self.assertEqual(get_resize_output_image_size(image, 20, default_to_square=False), (20, 25)) # Test size is resized if longer size > max_size image = np.random.randint(0, 256, (3, 50, 40)) self.assertEqual(get_resize_output_image_size(image, 20, default_to_square=False, max_size=22), (22, 17)) # Test output size = (int(size * height / width), size) if size is an int and height > width and # input has 4 channels image = np.random.randint(0, 256, (4, 50, 40)) self.assertEqual( get_resize_output_image_size(image, 20, default_to_square=False, input_data_format="channels_first"), (25, 20), ) # Test correct channel dimension is returned if output size if height == 3 # Defaults to input format - channels first image = np.random.randint(0, 256, (3, 18, 97)) resized_image = resize(image, (3, 20)) self.assertEqual(resized_image.shape, (3, 3, 20)) # Defaults to input format - channels last image = np.random.randint(0, 256, (18, 97, 3)) resized_image = resize(image, (3, 20)) self.assertEqual(resized_image.shape, (3, 20, 3)) image = np.random.randint(0, 256, (3, 18, 97)) resized_image = resize(image, (3, 20), data_format="channels_last") self.assertEqual(resized_image.shape, (3, 20, 3)) image = np.random.randint(0, 256, (18, 97, 3)) resized_image = resize(image, (3, 20), data_format="channels_first") self.assertEqual(resized_image.shape, (3, 3, 20)) def test_resize(self): image = np.random.randint(0, 256, (3, 224, 224)) # Check the channel order is the same by default resized_image = resize(image, (30, 40)) self.assertIsInstance(resized_image, np.ndarray) self.assertEqual(resized_image.shape, (3, 30, 40)) # Check channel order is changed if specified resized_image = resize(image, (30, 40), data_format="channels_last") self.assertIsInstance(resized_image, np.ndarray) self.assertEqual(resized_image.shape, (30, 40, 3)) # Check PIL.Image.Image is returned if return_numpy=False resized_image = resize(image, (30, 40), return_numpy=False) self.assertIsInstance(resized_image, PIL.Image.Image) # PIL size is in (width, height) order self.assertEqual(resized_image.size, (40, 30)) # Check an image with float values between 0-1 is returned with values in this range image = np.random.rand(3, 224, 224) resized_image = resize(image, (30, 40)) self.assertIsInstance(resized_image, np.ndarray) self.assertEqual(resized_image.shape, (3, 30, 40)) self.assertTrue(np.all(resized_image >= 0)) self.assertTrue(np.all(resized_image <= 1)) # Check that an image with 4 channels is resized correctly image = np.random.randint(0, 256, (4, 224, 224)) resized_image = resize(image, (30, 40), input_data_format="channels_first") self.assertIsInstance(resized_image, np.ndarray) self.assertEqual(resized_image.shape, (4, 30, 40)) def test_normalize(self): image = np.random.randint(0, 256, (224, 224, 3)) / 255 # Test that exception is raised if inputs are incorrect # Not a numpy array image with self.assertRaises(ValueError): normalize(5, 5, 5) # Number of mean values != number of channels with self.assertRaises(ValueError): normalize(image, mean=(0.5, 0.6), std=1) # Number of std values != number of channels with self.assertRaises(ValueError): normalize(image, mean=1, std=(0.5, 0.6)) # Test result is correct - output data format is channels_first and normalization # correctly computed mean = (0.5, 0.6, 0.7) std = (0.1, 0.2, 0.3) expected_image = ((image - mean) / std).transpose((2, 0, 1)) normalized_image = normalize(image, mean=mean, std=std, data_format="channels_first") self.assertIsInstance(normalized_image, np.ndarray) self.assertEqual(normalized_image.shape, (3, 224, 224)) self.assertTrue(np.allclose(normalized_image, expected_image, atol=1e-6)) # Test image with 4 channels is normalized correctly image = np.random.randint(0, 256, (224, 224, 4)) / 255 mean = (0.5, 0.6, 0.7, 0.8) std = (0.1, 0.2, 0.3, 0.4) expected_image = (image - mean) / std self.assertTrue( np.allclose( normalize(image, mean=mean, std=std, input_data_format="channels_last"), expected_image, atol=1e-6 ) ) # Test float32 image input keeps float32 dtype image = np.random.randint(0, 256, (224, 224, 3)).astype(np.float32) / 255 mean = (0.5, 0.6, 0.7) std = (0.1, 0.2, 0.3) expected_image = ((image - mean) / std).astype(np.float32) normalized_image = normalize(image, mean=mean, std=std) self.assertEqual(normalized_image.dtype, np.float32) self.assertTrue(np.allclose(normalized_image, expected_image, atol=1e-6)) # Test float16 image input keeps float16 dtype image = np.random.randint(0, 256, (224, 224, 3)).astype(np.float16) / 255 mean = (0.5, 0.6, 0.7) std = (0.1, 0.2, 0.3) # The mean and std are cast to match the dtype of the input image cast_mean = np.array(mean, dtype=np.float16) cast_std = np.array(std, dtype=np.float16) expected_image = (image - cast_mean) / cast_std normalized_image = normalize(image, mean=mean, std=std) self.assertEqual(normalized_image.dtype, np.float16) self.assertTrue(np.allclose(normalized_image, expected_image, atol=1e-6)) # Test int image input is converted to float32 image = np.random.randint(0, 2, (224, 224, 3), dtype=np.uint8) mean = (0.5, 0.6, 0.7) std = (0.1, 0.2, 0.3) expected_image = (image.astype(np.float32) - mean) / std normalized_image = normalize(image, mean=mean, std=std) self.assertEqual(normalized_image.dtype, np.float32) self.assertTrue(np.allclose(normalized_image, expected_image, atol=1e-6)) def test_center_crop(self): image = np.random.randint(0, 256, (3, 224, 224)) # Test that exception is raised if inputs are incorrect with self.assertRaises(ValueError): center_crop(image, 10) # Test result is correct - output data format is channels_first and center crop # correctly computed expected_image = image[:, 52:172, 82:142].transpose(1, 2, 0) cropped_image = center_crop(image, (120, 60), data_format="channels_last") self.assertIsInstance(cropped_image, np.ndarray) self.assertEqual(cropped_image.shape, (120, 60, 3)) self.assertTrue(np.allclose(cropped_image, expected_image)) # Test that image is padded with zeros if crop size is larger than image size expected_image = np.zeros((300, 260, 3)) expected_image[38:262, 18:242, :] = image.transpose((1, 2, 0)) cropped_image = center_crop(image, (300, 260), data_format="channels_last") self.assertIsInstance(cropped_image, np.ndarray) self.assertEqual(cropped_image.shape, (300, 260, 3)) self.assertTrue(np.allclose(cropped_image, expected_image)) # Test image with 4 channels is cropped correctly image = np.random.randint(0, 256, (224, 224, 4)) expected_image = image[52:172, 82:142, :] self.assertTrue(np.allclose(center_crop(image, (120, 60), input_data_format="channels_last"), expected_image)) def test_center_to_corners_format(self): bbox_center = np.array([[10, 20, 4, 8], [15, 16, 3, 4]]) expected = np.array([[8, 16, 12, 24], [13.5, 14, 16.5, 18]]) self.assertTrue(np.allclose(center_to_corners_format(bbox_center), expected)) # Check that the function and inverse function are inverse of each other self.assertTrue(np.allclose(corners_to_center_format(center_to_corners_format(bbox_center)), bbox_center)) def test_corners_to_center_format(self): bbox_corners = np.array([[8, 16, 12, 24], [13.5, 14, 16.5, 18]]) expected = np.array([[10, 20, 4, 8], [15, 16, 3, 4]]) self.assertTrue(np.allclose(corners_to_center_format(bbox_corners), expected)) # Check that the function and inverse function are inverse of each other self.assertTrue(np.allclose(center_to_corners_format(corners_to_center_format(bbox_corners)), bbox_corners)) def test_rgb_to_id(self): # test list input rgb = [125, 4, 255] self.assertEqual(rgb_to_id(rgb), 16712829) # test numpy array input color = np.array( [ [ [213, 54, 165], [88, 207, 39], [156, 108, 128], ], [ [183, 194, 46], [137, 58, 88], [114, 131, 233], ], ] ) expected = np.array([[10827477, 2608984, 8416412], [3064503, 5782153, 15303538]]) self.assertTrue(np.allclose(rgb_to_id(color), expected)) def test_id_to_rgb(self): # test int input self.assertEqual(id_to_rgb(16712829), [125, 4, 255]) # test array input id_array = np.array([[10827477, 2608984, 8416412], [3064503, 5782153, 15303538]]) color = np.array( [ [ [213, 54, 165], [88, 207, 39], [156, 108, 128], ], [ [183, 194, 46], [137, 58, 88], [114, 131, 233], ], ] ) self.assertTrue(np.allclose(id_to_rgb(id_array), color)) def test_pad(self): # fmt: off image = np.array([[ [0, 1], [2, 3], ]]) # fmt: on # Test that exception is raised if unknown padding mode is specified with self.assertRaises(ValueError): pad(image, 10, mode="unknown") # Test that exception is raised if invalid padding is specified with self.assertRaises(ValueError): # Cannot pad on channel dimension pad(image, (5, 10, 10)) # Test image is padded equally on all sides is padding is an int # fmt: off expected_image = np.array([ [[0, 0, 0, 0], [0, 0, 1, 0], [0, 2, 3, 0], [0, 0, 0, 0]], ]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, 1))) # Test the left and right of each axis is padded (pad_left, pad_right) # fmt: off expected_image = np.array( [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 2, 3, 0], [0, 0, 0, 0, 0]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, (2, 1)))) # Test only one axis is padded (pad_left, pad_right) # fmt: off expected_image = np.array([[ [9, 9], [9, 9], [0, 1], [2, 3], [9, 9] ]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, ((2, 1), (0, 0)), constant_values=9))) # Test padding with a constant value # fmt: off expected_image = np.array([[ [8, 8, 0, 1, 9], [8, 8, 2, 3, 9], [8, 8, 7, 7, 9], [8, 8, 7, 7, 9] ]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), constant_values=((6, 7), (8, 9))))) # fmt: off image = np.array([[ [0, 1, 2], [3, 4, 5], [6, 7, 8], ]]) # fmt: on # Test padding with PaddingMode.REFLECT # fmt: off expected_image = np.array([[ [2, 1, 0, 1, 2, 1], [5, 4, 3, 4, 5, 4], [8, 7, 6, 7, 8, 7], [5, 4, 3, 4, 5, 4], [2, 1, 0, 1, 2, 1], ]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), mode="reflect"))) # Test padding with PaddingMode.REPLICATE # fmt: off expected_image = np.array([[ [0, 0, 0, 1, 2, 2], [3, 3, 3, 4, 5, 5], [6, 6, 6, 7, 8, 8], [6, 6, 6, 7, 8, 8], [6, 6, 6, 7, 8, 8], ]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), mode="replicate"))) # Test padding with PaddingMode.SYMMETRIC # fmt: off expected_image = np.array([[ [1, 0, 0, 1, 2, 2], [4, 3, 3, 4, 5, 5], [7, 6, 6, 7, 8, 8], [7, 6, 6, 7, 8, 8], [4, 3, 3, 4, 5, 5], ]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), mode="symmetric"))) # Test we can specify the output data format # Test padding with PaddingMode.REFLECT # fmt: off image = np.array([[ [0, 1], [2, 3], ]]) expected_image = np.array([ [[0], [1], [0], [1], [0]], [[2], [3], [2], [3], [2]], [[0], [1], [0], [1], [0]], [[2], [3], [2], [3], [2]] ]) # fmt: on self.assertTrue( np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), mode="reflect", data_format="channels_last")) ) # Test we can pad on an image with 2 channels # fmt: off image = np.array([ [[0, 1], [2, 3]], ]) expected_image = np.array([ [[0, 0], [0, 1], [2, 3]], [[0, 0], [0, 0], [0, 0]], ]) # fmt: on self.assertTrue( np.allclose( expected_image, pad(image, ((0, 1), (1, 0)), mode="constant", input_data_format="channels_last") ) ) @require_vision def test_convert_to_rgb(self): # Test that an RGBA image is converted to RGB image = np.array([[[1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.uint8) pil_image = PIL.Image.fromarray(image) self.assertEqual(pil_image.mode, "RGBA") self.assertEqual(pil_image.size, (2, 1)) # For the moment, numpy images are returned as is rgb_image = convert_to_rgb(image) self.assertEqual(rgb_image.shape, (1, 2, 4)) self.assertTrue(np.allclose(rgb_image, image)) # And PIL images are converted rgb_image = convert_to_rgb(pil_image) self.assertEqual(rgb_image.mode, "RGB") self.assertEqual(rgb_image.size, (2, 1)) self.assertTrue(np.allclose(np.array(rgb_image), np.array([[[1, 2, 3], [5, 6, 7]]], dtype=np.uint8))) # Test that a grayscale image is converted to RGB image = np.array([[0, 255]], dtype=np.uint8) pil_image = PIL.Image.fromarray(image) self.assertEqual(pil_image.mode, "L") self.assertEqual(pil_image.size, (2, 1)) rgb_image = convert_to_rgb(pil_image) self.assertEqual(rgb_image.mode, "RGB") self.assertEqual(rgb_image.size, (2, 1)) self.assertTrue(np.allclose(np.array(rgb_image), np.array([[[0, 0, 0], [255, 255, 255]]], dtype=np.uint8))) def test_flip_channel_order(self): # fmt: off img_channels_first = np.array([ [[ 0, 1, 2, 3], [ 4, 5, 6, 7]], [[ 8, 9, 10, 11], [12, 13, 14, 15]], [[16, 17, 18, 19], [20, 21, 22, 23]], ]) # fmt: on img_channels_last = np.moveaxis(img_channels_first, 0, -1) # fmt: off flipped_img_channels_first = np.array([ [[16, 17, 18, 19], [20, 21, 22, 23]], [[ 8, 9, 10, 11], [12, 13, 14, 15]], [[ 0, 1, 2, 3], [ 4, 5, 6, 7]], ]) # fmt: on flipped_img_channels_last = np.moveaxis(flipped_img_channels_first, 0, -1) self.assertTrue(np.allclose(flip_channel_order(img_channels_first), flipped_img_channels_first)) self.assertTrue( np.allclose(flip_channel_order(img_channels_first, "channels_last"), flipped_img_channels_last) ) self.assertTrue(np.allclose(flip_channel_order(img_channels_last), flipped_img_channels_last)) self.assertTrue( np.allclose(flip_channel_order(img_channels_last, "channels_first"), flipped_img_channels_first) ) # Can flip when the image has 2 channels # fmt: off img_channels_first = np.array([ [[ 0, 1, 2, 3], [ 4, 5, 6, 7]], [[ 8, 9, 10, 11], [12, 13, 14, 15]], ]) # fmt: on flipped_img_channels_first = img_channels_first[::-1, :, :] self.assertTrue( np.allclose( flip_channel_order(img_channels_first, input_data_format="channels_first"), flipped_img_channels_first ) )
0
hf_public_repos/transformers
hf_public_repos/transformers/tests/test_pipeline_mixin.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import json import os import random import unittest from pathlib import Path from transformers.testing_utils import ( is_pipeline_test, require_decord, require_pytesseract, require_timm, require_torch, require_torch_or_tf, require_vision, ) from transformers.utils import direct_transformers_import, logging from .pipelines.test_pipelines_audio_classification import AudioClassificationPipelineTests from .pipelines.test_pipelines_automatic_speech_recognition import AutomaticSpeechRecognitionPipelineTests from .pipelines.test_pipelines_conversational import ConversationalPipelineTests from .pipelines.test_pipelines_depth_estimation import DepthEstimationPipelineTests from .pipelines.test_pipelines_document_question_answering import DocumentQuestionAnsweringPipelineTests from .pipelines.test_pipelines_feature_extraction import FeatureExtractionPipelineTests from .pipelines.test_pipelines_fill_mask import FillMaskPipelineTests from .pipelines.test_pipelines_image_classification import ImageClassificationPipelineTests from .pipelines.test_pipelines_image_segmentation import ImageSegmentationPipelineTests from .pipelines.test_pipelines_image_to_image import ImageToImagePipelineTests from .pipelines.test_pipelines_image_to_text import ImageToTextPipelineTests from .pipelines.test_pipelines_mask_generation import MaskGenerationPipelineTests from .pipelines.test_pipelines_object_detection import ObjectDetectionPipelineTests from .pipelines.test_pipelines_question_answering import QAPipelineTests from .pipelines.test_pipelines_summarization import SummarizationPipelineTests from .pipelines.test_pipelines_table_question_answering import TQAPipelineTests from .pipelines.test_pipelines_text2text_generation import Text2TextGenerationPipelineTests from .pipelines.test_pipelines_text_classification import TextClassificationPipelineTests from .pipelines.test_pipelines_text_generation import TextGenerationPipelineTests from .pipelines.test_pipelines_text_to_audio import TextToAudioPipelineTests from .pipelines.test_pipelines_token_classification import TokenClassificationPipelineTests from .pipelines.test_pipelines_translation import TranslationPipelineTests from .pipelines.test_pipelines_video_classification import VideoClassificationPipelineTests from .pipelines.test_pipelines_visual_question_answering import VisualQuestionAnsweringPipelineTests from .pipelines.test_pipelines_zero_shot import ZeroShotClassificationPipelineTests from .pipelines.test_pipelines_zero_shot_audio_classification import ZeroShotAudioClassificationPipelineTests from .pipelines.test_pipelines_zero_shot_image_classification import ZeroShotImageClassificationPipelineTests from .pipelines.test_pipelines_zero_shot_object_detection import ZeroShotObjectDetectionPipelineTests pipeline_test_mapping = { "audio-classification": {"test": AudioClassificationPipelineTests}, "automatic-speech-recognition": {"test": AutomaticSpeechRecognitionPipelineTests}, "conversational": {"test": ConversationalPipelineTests}, "depth-estimation": {"test": DepthEstimationPipelineTests}, "document-question-answering": {"test": DocumentQuestionAnsweringPipelineTests}, "feature-extraction": {"test": FeatureExtractionPipelineTests}, "fill-mask": {"test": FillMaskPipelineTests}, "image-classification": {"test": ImageClassificationPipelineTests}, "image-segmentation": {"test": ImageSegmentationPipelineTests}, "image-to-image": {"test": ImageToImagePipelineTests}, "image-to-text": {"test": ImageToTextPipelineTests}, "mask-generation": {"test": MaskGenerationPipelineTests}, "object-detection": {"test": ObjectDetectionPipelineTests}, "question-answering": {"test": QAPipelineTests}, "summarization": {"test": SummarizationPipelineTests}, "table-question-answering": {"test": TQAPipelineTests}, "text2text-generation": {"test": Text2TextGenerationPipelineTests}, "text-classification": {"test": TextClassificationPipelineTests}, "text-generation": {"test": TextGenerationPipelineTests}, "text-to-audio": {"test": TextToAudioPipelineTests}, "token-classification": {"test": TokenClassificationPipelineTests}, "translation": {"test": TranslationPipelineTests}, "video-classification": {"test": VideoClassificationPipelineTests}, "visual-question-answering": {"test": VisualQuestionAnsweringPipelineTests}, "zero-shot": {"test": ZeroShotClassificationPipelineTests}, "zero-shot-audio-classification": {"test": ZeroShotAudioClassificationPipelineTests}, "zero-shot-image-classification": {"test": ZeroShotImageClassificationPipelineTests}, "zero-shot-object-detection": {"test": ZeroShotObjectDetectionPipelineTests}, } for task, task_info in pipeline_test_mapping.items(): test = task_info["test"] task_info["mapping"] = { "pt": getattr(test, "model_mapping", None), "tf": getattr(test, "tf_model_mapping", None), } # The default value `hf-internal-testing` is for running the pipeline testing against the tiny models on the Hub. # For debugging purpose, we can specify a local path which is the `output_path` argument of a previous run of # `utils/create_dummy_models.py`. TRANSFORMERS_TINY_MODEL_PATH = os.environ.get("TRANSFORMERS_TINY_MODEL_PATH", "hf-internal-testing") if TRANSFORMERS_TINY_MODEL_PATH == "hf-internal-testing": TINY_MODEL_SUMMARY_FILE_PATH = os.path.join(Path(__file__).parent.parent, "tests/utils/tiny_model_summary.json") else: TINY_MODEL_SUMMARY_FILE_PATH = os.path.join(TRANSFORMERS_TINY_MODEL_PATH, "reports", "tiny_model_summary.json") with open(TINY_MODEL_SUMMARY_FILE_PATH) as fp: tiny_model_summary = json.load(fp) PATH_TO_TRANSFORMERS = os.path.join(Path(__file__).parent.parent, "src/transformers") # Dynamically import the Transformers module to grab the attribute classes of the processor form their names. transformers_module = direct_transformers_import(PATH_TO_TRANSFORMERS) logger = logging.get_logger(__name__) class PipelineTesterMixin: model_tester = None pipeline_model_mapping = None supported_frameworks = ["pt", "tf"] def run_task_tests(self, task): """Run pipeline tests for a specific `task` Args: task (`str`): A task name. This should be a key in the mapping `pipeline_test_mapping`. """ if task not in self.pipeline_model_mapping: self.skipTest( f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')} is skipped: `{task}` is not in " f"`self.pipeline_model_mapping` for `{self.__class__.__name__}`." ) model_architectures = self.pipeline_model_mapping[task] if not isinstance(model_architectures, tuple): model_architectures = (model_architectures,) if not isinstance(model_architectures, tuple): raise ValueError(f"`model_architectures` must be a tuple. Got {type(model_architectures)} instead.") for model_architecture in model_architectures: model_arch_name = model_architecture.__name__ # Get the canonical name for _prefix in ["Flax", "TF"]: if model_arch_name.startswith(_prefix): model_arch_name = model_arch_name[len(_prefix) :] break tokenizer_names = [] processor_names = [] commit = None if model_arch_name in tiny_model_summary: tokenizer_names = tiny_model_summary[model_arch_name]["tokenizer_classes"] processor_names = tiny_model_summary[model_arch_name]["processor_classes"] if "sha" in tiny_model_summary[model_arch_name]: commit = tiny_model_summary[model_arch_name]["sha"] # Adding `None` (if empty) so we can generate tests tokenizer_names = [None] if len(tokenizer_names) == 0 else tokenizer_names processor_names = [None] if len(processor_names) == 0 else processor_names repo_name = f"tiny-random-{model_arch_name}" if TRANSFORMERS_TINY_MODEL_PATH != "hf-internal-testing": repo_name = model_arch_name self.run_model_pipeline_tests( task, repo_name, model_architecture, tokenizer_names, processor_names, commit ) def run_model_pipeline_tests(self, task, repo_name, model_architecture, tokenizer_names, processor_names, commit): """Run pipeline tests for a specific `task` with the give model class and tokenizer/processor class names Args: task (`str`): A task name. This should be a key in the mapping `pipeline_test_mapping`. repo_name (`str`): A model repository id on the Hub. model_architecture (`type`): A subclass of `PretrainedModel` or `PretrainedModel`. tokenizer_names (`List[str]`): A list of names of a subclasses of `PreTrainedTokenizerFast` or `PreTrainedTokenizer`. processor_names (`List[str]`): A list of names of subclasses of `BaseImageProcessor` or `FeatureExtractionMixin`. """ # Get an instance of the corresponding class `XXXPipelineTests` in order to use `get_test_pipeline` and # `run_pipeline_test`. pipeline_test_class_name = pipeline_test_mapping[task]["test"].__name__ for tokenizer_name in tokenizer_names: for processor_name in processor_names: if self.is_pipeline_test_to_skip( pipeline_test_class_name, model_architecture.config_class, model_architecture, tokenizer_name, processor_name, ): logger.warning( f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')} is skipped: test is " f"currently known to fail for: model `{model_architecture.__name__}` | tokenizer " f"`{tokenizer_name}` | processor `{processor_name}`." ) continue self.run_pipeline_test(task, repo_name, model_architecture, tokenizer_name, processor_name, commit) def run_pipeline_test(self, task, repo_name, model_architecture, tokenizer_name, processor_name, commit): """Run pipeline tests for a specific `task` with the give model class and tokenizer/processor class name The model will be loaded from a model repository on the Hub. Args: task (`str`): A task name. This should be a key in the mapping `pipeline_test_mapping`. repo_name (`str`): A model repository id on the Hub. model_architecture (`type`): A subclass of `PretrainedModel` or `PretrainedModel`. tokenizer_name (`str`): The name of a subclass of `PreTrainedTokenizerFast` or `PreTrainedTokenizer`. processor_name (`str`): The name of a subclass of `BaseImageProcessor` or `FeatureExtractionMixin`. """ repo_id = f"{TRANSFORMERS_TINY_MODEL_PATH}/{repo_name}" if TRANSFORMERS_TINY_MODEL_PATH != "hf-internal-testing": model_type = model_architecture.config_class.model_type repo_id = os.path.join(TRANSFORMERS_TINY_MODEL_PATH, model_type, repo_name) tokenizer = None if tokenizer_name is not None: tokenizer_class = getattr(transformers_module, tokenizer_name) tokenizer = tokenizer_class.from_pretrained(repo_id, revision=commit) processor = None if processor_name is not None: processor_class = getattr(transformers_module, processor_name) # If the required packages (like `Pillow` or `torchaudio`) are not installed, this will fail. try: processor = processor_class.from_pretrained(repo_id, revision=commit) except Exception: logger.warning( f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')} is skipped: Could not load the " f"processor from `{repo_id}` with `{processor_name}`." ) return # TODO: Maybe not upload such problematic tiny models to Hub. if tokenizer is None and processor is None: logger.warning( f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')} is skipped: Could not find or load " f"any tokenizer / processor from `{repo_id}`." ) return # TODO: We should check if a model file is on the Hub repo. instead. try: model = model_architecture.from_pretrained(repo_id, revision=commit) except Exception: logger.warning( f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')} is skipped: Could not find or load " f"the model from `{repo_id}` with `{model_architecture}`." ) return pipeline_test_class_name = pipeline_test_mapping[task]["test"].__name__ if self.is_pipeline_test_to_skip_more(pipeline_test_class_name, model.config, model, tokenizer, processor): logger.warning( f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')} is skipped: test is " f"currently known to fail for: model `{model_architecture.__name__}` | tokenizer " f"`{tokenizer_name}` | processor `{processor_name}`." ) return # validate validate_test_components(self, task, model, tokenizer, processor) if hasattr(model, "eval"): model = model.eval() # Get an instance of the corresponding class `XXXPipelineTests` in order to use `get_test_pipeline` and # `run_pipeline_test`. task_test = pipeline_test_mapping[task]["test"]() pipeline, examples = task_test.get_test_pipeline(model, tokenizer, processor) if pipeline is None: # The test can disable itself, but it should be very marginal # Concerns: Wav2Vec2ForCTC without tokenizer test (FastTokenizer don't exist) logger.warning( f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')} is skipped: Could not get the " "pipeline for testing." ) return task_test.run_pipeline_test(pipeline, examples) def run_batch_test(pipeline, examples): # Need to copy because `Conversation` are stateful if pipeline.tokenizer is not None and pipeline.tokenizer.pad_token_id is None: return # No batching for this and it's OK # 10 examples with batch size 4 means there needs to be a unfinished batch # which is important for the unbatcher def data(n): for _ in range(n): # Need to copy because Conversation object is mutated yield copy.deepcopy(random.choice(examples)) out = [] if task == "conversational": for item in pipeline(data(10), batch_size=4, max_new_tokens=5): out.append(item) else: for item in pipeline(data(10), batch_size=4): out.append(item) self.assertEqual(len(out), 10) run_batch_test(pipeline, examples) @is_pipeline_test def test_pipeline_audio_classification(self): self.run_task_tests(task="audio-classification") @is_pipeline_test def test_pipeline_automatic_speech_recognition(self): self.run_task_tests(task="automatic-speech-recognition") @is_pipeline_test def test_pipeline_conversational(self): self.run_task_tests(task="conversational") @is_pipeline_test @require_vision @require_timm @require_torch def test_pipeline_depth_estimation(self): self.run_task_tests(task="depth-estimation") @is_pipeline_test @require_pytesseract @require_torch @require_vision def test_pipeline_document_question_answering(self): self.run_task_tests(task="document-question-answering") @is_pipeline_test def test_pipeline_feature_extraction(self): self.run_task_tests(task="feature-extraction") @is_pipeline_test def test_pipeline_fill_mask(self): self.run_task_tests(task="fill-mask") @is_pipeline_test @require_torch_or_tf @require_vision def test_pipeline_image_classification(self): self.run_task_tests(task="image-classification") @is_pipeline_test @require_vision @require_timm @require_torch def test_pipeline_image_segmentation(self): self.run_task_tests(task="image-segmentation") @is_pipeline_test @require_vision def test_pipeline_image_to_text(self): self.run_task_tests(task="image-to-text") @unittest.skip(reason="`run_pipeline_test` is currently not implemented.") @is_pipeline_test @require_vision @require_torch def test_pipeline_mask_generation(self): self.run_task_tests(task="mask-generation") @is_pipeline_test @require_vision @require_timm @require_torch def test_pipeline_object_detection(self): self.run_task_tests(task="object-detection") @is_pipeline_test def test_pipeline_question_answering(self): self.run_task_tests(task="question-answering") @is_pipeline_test def test_pipeline_summarization(self): self.run_task_tests(task="summarization") @is_pipeline_test def test_pipeline_table_question_answering(self): self.run_task_tests(task="table-question-answering") @is_pipeline_test def test_pipeline_text2text_generation(self): self.run_task_tests(task="text2text-generation") @is_pipeline_test def test_pipeline_text_classification(self): self.run_task_tests(task="text-classification") @is_pipeline_test @require_torch_or_tf def test_pipeline_text_generation(self): self.run_task_tests(task="text-generation") @is_pipeline_test @require_torch def test_pipeline_text_to_audio(self): self.run_task_tests(task="text-to-audio") @is_pipeline_test def test_pipeline_token_classification(self): self.run_task_tests(task="token-classification") @is_pipeline_test def test_pipeline_translation(self): self.run_task_tests(task="translation") @is_pipeline_test @require_torch_or_tf @require_vision @require_decord def test_pipeline_video_classification(self): self.run_task_tests(task="video-classification") @is_pipeline_test @require_torch @require_vision def test_pipeline_visual_question_answering(self): self.run_task_tests(task="visual-question-answering") @is_pipeline_test def test_pipeline_zero_shot(self): self.run_task_tests(task="zero-shot") @is_pipeline_test @require_torch def test_pipeline_zero_shot_audio_classification(self): self.run_task_tests(task="zero-shot-audio-classification") @is_pipeline_test @require_vision def test_pipeline_zero_shot_image_classification(self): self.run_task_tests(task="zero-shot-image-classification") @is_pipeline_test @require_vision @require_torch def test_pipeline_zero_shot_object_detection(self): self.run_task_tests(task="zero-shot-object-detection") # This contains the test cases to be skipped without model architecture being involved. def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): """Skip some tests based on the classes or their names without the instantiated objects. This is to avoid calling `from_pretrained` (so reducing the runtime) if we already know the tests will fail. """ # No fix is required for this case. if ( pipeline_test_casse_name == "DocumentQuestionAnsweringPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): # `DocumentQuestionAnsweringPipelineTests` requires a fast tokenizer. return True return False def is_pipeline_test_to_skip_more(self, pipeline_test_casse_name, config, model, tokenizer, processor): # noqa """Skip some more tests based on the information from the instantiated objects.""" # No fix is required for this case. if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer is not None and getattr(tokenizer, "pad_token", None) is None and not tokenizer.__class__.__name__.endswith("Fast") ): # `QAPipelineTests` doesn't work with a slow tokenizer that has no pad token. return True return False def validate_test_components(test_case, task, model, tokenizer, processor): # TODO: Move this to tiny model creation script # head-specific (within a model type) necessary changes to the config # 1. for `BlenderbotForCausalLM` if model.__class__.__name__ == "BlenderbotForCausalLM": model.config.encoder_no_repeat_ngram_size = 0 # TODO: Change the tiny model creation script: don't create models with problematic tokenizers # Avoid `IndexError` in embedding layers CONFIG_WITHOUT_VOCAB_SIZE = ["CanineConfig"] if tokenizer is not None: config_vocab_size = getattr(model.config, "vocab_size", None) # For CLIP-like models if config_vocab_size is None: if hasattr(model.config, "text_config"): config_vocab_size = getattr(model.config.text_config, "vocab_size", None) elif hasattr(model.config, "text_encoder"): config_vocab_size = getattr(model.config.text_encoder, "vocab_size", None) if config_vocab_size is None and model.config.__class__.__name__ not in CONFIG_WITHOUT_VOCAB_SIZE: raise ValueError( "Could not determine `vocab_size` from model configuration while `tokenizer` is not `None`." )
0
hf_public_repos/transformers
hf_public_repos/transformers/tests/test_cache_utils.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import set_seed from transformers.testing_utils import is_torch_available, require_auto_gptq, require_torch, require_torch_gpu, slow if is_torch_available(): import torch from transformers import AutoModelForCausalLM, AutoTokenizer, DynamicCache, LlamaForCausalLM, SinkCache @require_torch class CacheTest(unittest.TestCase): def test_cache_equivalence(self): """Tests that we can convert back and forth between the legacy cache format and DynamicCache""" legacy_cache = () new_cache = DynamicCache() # Creates a new cache with 10 layers in both formats for layer_idx in range(10): new_key = torch.rand((2, 4, 8, 16)) new_value = torch.rand((2, 4, 8, 16)) new_cache.update(new_key, new_value, layer_idx) legacy_cache += ((new_key, new_value),) # Sanity check 1: they must have the same shapes self.assertTrue(len(legacy_cache), len(new_cache)) for layer_idx in range(10): self.assertTrue(len(legacy_cache[layer_idx]), len(legacy_cache[layer_idx])) for key_value_idx in range(2): self.assertTrue( legacy_cache[layer_idx][key_value_idx].shape == new_cache[layer_idx][key_value_idx].shape ) # Sanity check 2: we can get the sequence length in multiple ways with DynamicCache, and they return the # expected value self.assertTrue(legacy_cache[0][0].shape[-2] == new_cache[0][0].shape[-2] == new_cache.get_seq_length() == 8) # Sanity check 3: they must be equal, and both support indexing for layer_idx in range(10): for key_value_idx in range(2): self.assertTrue( torch.allclose(new_cache[layer_idx][key_value_idx], legacy_cache[layer_idx][key_value_idx]) ) # Test 1: We can convert from legacy to new with no changes from_legacy = DynamicCache.from_legacy_cache(legacy_cache) for layer_idx in range(10): for key_value_idx in range(2): self.assertTrue( torch.allclose(from_legacy[layer_idx][key_value_idx], legacy_cache[layer_idx][key_value_idx]) ) # Test 2: We can convert from new to legacy with no changes to_legacy = new_cache.to_legacy_cache() for layer_idx in range(10): for key_value_idx in range(2): self.assertTrue( torch.allclose(to_legacy[layer_idx][key_value_idx], new_cache[layer_idx][key_value_idx]) ) def test_reorder_cache_retrocompatibility(self): """Tests that Cache.reorder_cache is retrocompatible with the legacy code path""" legacy_reorder_fn = LlamaForCausalLM._reorder_cache # An example of a legacy `_reorder_cache` function legacy_cache = () new_cache = DynamicCache() # Creates a new cache with 10 layers in both formats for layer_idx in range(10): new_key = torch.rand((4, 4, 8, 16)) new_value = torch.rand((4, 4, 8, 16)) new_cache.update(new_key, new_value, layer_idx) legacy_cache += ((new_key, new_value),) # Let's create some dummy beam indices. From the shape above, it is equivalent to the case where num_beams=4 # and batch_size=1 beam_idx = torch.randint(low=0, high=4, size=(4,)) legacy_cache_reordered = legacy_reorder_fn(legacy_cache, beam_idx) new_cache.reorder_cache(beam_idx) # Let's check that the results are the same for layer_idx in range(10): for key_value_idx in range(2): self.assertTrue( torch.allclose( new_cache[layer_idx][key_value_idx], legacy_cache_reordered[layer_idx][key_value_idx] ) ) @require_torch_gpu @slow class CacheIntegrationTest(unittest.TestCase): def test_dynamic_cache_hard(self): tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf", padding_side="left") model = AutoModelForCausalLM.from_pretrained( "meta-llama/Llama-2-7b-hf", device_map="auto", torch_dtype=torch.float16 ) inputs = tokenizer(["Here's everything I know about cats. Cats"], return_tensors="pt").to(model.device) # DynamicCache and the legacy cache format should be equivalent set_seed(0) gen_out_legacy = model.generate(**inputs, do_sample=True, max_new_tokens=256) set_seed(0) gen_out = model.generate(**inputs, do_sample=True, max_new_tokens=256, past_key_values=DynamicCache()) self.assertListEqual(gen_out_legacy.tolist(), gen_out.tolist()) decoded = tokenizer.batch_decode(gen_out, skip_special_tokens=True) expected_text = ( "Here's everything I know about cats. Cats are mysterious creatures. They can't talk, and they don't like " "to be held. They don't play fetch, and they don't like to be hugged. But they do like to be petted.\n" "Cats are also very independent. They don't like to be told what to do, and they don't like to be told " "what to eat. They are also very territorial. They don't like to share their food or their toys.\nCats " "are also very curious. They like to explore, and they like to play. They are also very fast. They can " "run very fast, and they can jump very high.\nCats are also very smart. They can learn tricks, and they " "can solve problems. They are also very playful. They like to play with toys, and they like to play with " "other cats.\nCats are also very affectionate. They like to be petted, and they like to be held. They " "also like to be scratched.\nCats are also very clean. They like to groom themselves, and they like to " "clean their litter box.\nCats are also very independent. They don't" ) self.assertEqual(decoded[0], expected_text) def test_dynamic_cache_batched(self): tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf", padding_side="left") tokenizer.pad_token = tokenizer.eos_token model = AutoModelForCausalLM.from_pretrained( "meta-llama/Llama-2-7b-hf", device_map="auto", torch_dtype=torch.float16 ) inputs = tokenizer(["A sequence: 1, 2, 3, 4, 5", "A sequence: A, B, C"], padding=True, return_tensors="pt").to( model.device ) gen_out = model.generate(**inputs, do_sample=False, max_new_tokens=10, past_key_values=DynamicCache()) decoded = tokenizer.batch_decode(gen_out, skip_special_tokens=True) expected_text = ["A sequence: 1, 2, 3, 4, 5, 6, 7, 8,", "A sequence: A, B, C, D, E, F, G, H"] self.assertListEqual(decoded, expected_text) def test_dynamic_cache_beam_search(self): tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf", padding_side="left") model = AutoModelForCausalLM.from_pretrained( "meta-llama/Llama-2-7b-hf", device_map="auto", torch_dtype=torch.float16 ) inputs = tokenizer(["The best color is"], return_tensors="pt").to(model.device) gen_out = model.generate( **inputs, do_sample=False, max_new_tokens=20, num_beams=2, num_return_sequences=2, ) decoded = tokenizer.batch_decode(gen_out, skip_special_tokens=True) expected_text = [ "The best color is the one that makes you feel good.\nThe best color is the one that makes you feel good", "The best color is the one that suits you.\nThe best color is the one that suits you. The", ] self.assertListEqual(decoded, expected_text) @require_auto_gptq def test_sink_cache_hard(self): tokenizer = AutoTokenizer.from_pretrained("TheBloke/LLaMa-7B-GPTQ") model = AutoModelForCausalLM.from_pretrained("TheBloke/LLaMa-7B-GPTQ", device_map="auto") inputs = tokenizer(["Vaswani et al. (2017) introduced the Transformers"], return_tensors="pt").to(model.device) # Set up the SinkCache. Using a small window length to contain computational complexity. If this example is run # without a SinkCache, the last few tokens are gibberish (ends in "of the of the of a of a of") cache = SinkCache(window_length=508, num_sink_tokens=4) gen_out = model.generate(**inputs, do_sample=False, max_new_tokens=3000, past_key_values=cache) decoded = tokenizer.batch_decode(gen_out, skip_special_tokens=True) self.assertTrue(decoded[0].endswith("to perform a variety of tasks. The Transformer is a neural network")) def test_sink_cache_iterative_prompts(self): """Tests that SinkCache supports more than one new token at once, when shifting the cache""" tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta") model = AutoModelForCausalLM.from_pretrained( "HuggingFaceH4/zephyr-7b-beta", device_map="auto", torch_dtype=torch.float16 ) prompt = ( "Compose an engaging travel blog post about a recent trip to Hawaii, highlighting cultural experiences " "and must-see attractions." ) # Prepare generation settings cache = SinkCache(window_length=256, num_sink_tokens=4) input_ids = torch.tensor([], device=model.device, dtype=torch.int) for _ in range(3): # Tokenize the prompt with the correct chat template chat = [{"role": "user", "content": prompt}] tokenized_chat = tokenizer.apply_chat_template(chat, return_tensors="pt", add_generation_prompt=True).to( model.device ) input_ids = torch.cat((input_ids, tokenized_chat), dim=1) # Perform the generation gen_out = model.generate( input_ids, do_sample=False, max_new_tokens=100, past_key_values=cache, use_cache=True ) input_ids = gen_out # We went well beyond the cache length self.assertTrue(input_ids.shape[1] > cache.get_max_length() * 1.5) # And it still produces a coherent english decoded = tokenizer.batch_decode(input_ids, skip_special_tokens=True) last_output = ( "<|assistant|>\nAs the sun began to set over the Pacific Ocean, I found myself standing on the shores of " "Waikiki Beach, my heart filled with awe and wonder. I had just returned from a two-week journey to the " "beautiful island of Hawaii, and it had been an unforgettable experience filled with cultural experiences " "and must-see attractions that left me breathless.\n\nOne of the most memorable experiences of my trip " "was visiting the historic district of Honolulu. Here," ) self.assertTrue(decoded[0].endswith(last_output))
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/tools/test_image_question_answering.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from pathlib import Path from transformers import is_vision_available, load_tool from transformers.testing_utils import get_tests_dir from .test_tools_common import ToolTesterMixin if is_vision_available(): from PIL import Image class ImageQuestionAnsweringToolTester(unittest.TestCase, ToolTesterMixin): def setUp(self): self.tool = load_tool("image-question-answering") self.tool.setup() self.remote_tool = load_tool("image-question-answering", remote=True) def test_exact_match_arg(self): image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png") result = self.tool(image, "How many cats are sleeping on the couch?") self.assertEqual(result, "2") def test_exact_match_arg_remote(self): image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png") result = self.remote_tool(image, "How many cats are sleeping on the couch?") self.assertEqual(result, "2") def test_exact_match_kwarg(self): image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png") result = self.tool(image=image, question="How many cats are sleeping on the couch?") self.assertEqual(result, "2") def test_exact_match_kwarg_remote(self): image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png") result = self.remote_tool(image=image, question="How many cats are sleeping on the couch?") self.assertEqual(result, "2")
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/tools/test_text_to_speech.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class TextToSpeechToolTester(unittest.TestCase, ToolTesterMixin): def setUp(self): self.tool = load_tool("text-to-speech") self.tool.setup() def test_exact_match_arg(self): # SpeechT5 isn't deterministic torch.manual_seed(0) result = self.tool("hey") resulting_tensor = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3], torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485]), ) ) def test_exact_match_kwarg(self): # SpeechT5 isn't deterministic torch.manual_seed(0) result = self.tool("hey") resulting_tensor = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3], torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485]), ) )
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/tools/test_agent_types.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def get_new_path(suffix="") -> str: directory = tempfile.mkdtemp() return os.path.join(directory, str(uuid.uuid4()) + suffix) @require_soundfile @require_torch class AgentAudioTests(unittest.TestCase): def test_from_tensor(self): tensor = torch.rand(12, dtype=torch.float64) - 0.5 agent_type = AgentAudio(tensor) path = str(agent_type.to_string()) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(tensor, agent_type.to_raw(), atol=1e-4)) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(path)) # Ensure that the file contains the same value as the original tensor new_tensor, _ = sf.read(path) self.assertTrue(torch.allclose(tensor, torch.tensor(new_tensor), atol=1e-4)) def test_from_string(self): tensor = torch.rand(12, dtype=torch.float64) - 0.5 path = get_new_path(suffix=".wav") sf.write(path, tensor, 16000) agent_type = AgentAudio(path) self.assertTrue(torch.allclose(tensor, agent_type.to_raw(), atol=1e-4)) self.assertEqual(agent_type.to_string(), path) @require_vision @require_torch class AgentImageTests(unittest.TestCase): def test_from_tensor(self): tensor = torch.randint(0, 256, (64, 64, 3)) agent_type = AgentImage(tensor) path = str(agent_type.to_string()) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(tensor, agent_type._tensor, atol=1e-4)) self.assertIsInstance(agent_type.to_raw(), Image.Image) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(path)) def test_from_string(self): path = Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png" image = Image.open(path) agent_type = AgentImage(path) self.assertTrue(path.samefile(agent_type.to_string())) self.assertTrue(image == agent_type.to_raw()) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(path)) def test_from_image(self): path = Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png" image = Image.open(path) agent_type = AgentImage(image) self.assertFalse(path.samefile(agent_type.to_string())) self.assertTrue(image == agent_type.to_raw()) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(path)) class AgentTextTests(unittest.TestCase): def test_from_string(self): string = "Hey!" agent_type = AgentText(string) self.assertEqual(string, agent_type.to_string()) self.assertEqual(string, agent_type.to_raw()) self.assertEqual(string, agent_type)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/tools/test_text_question_answering.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin TEXT = """ Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning. In March 2021, Hugging Face raised $40 million in a Series B funding round.[3] On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5] """ class TextQuestionAnsweringToolTester(unittest.TestCase, ToolTesterMixin): def setUp(self): self.tool = load_tool("text-question-answering") self.tool.setup() self.remote_tool = load_tool("text-question-answering", remote=True) def test_exact_match_arg(self): result = self.tool(TEXT, "What did Hugging Face do in April 2021?") self.assertEqual(result, "launched the BigScience Research Workshop") def test_exact_match_arg_remote(self): result = self.remote_tool(TEXT, "What did Hugging Face do in April 2021?") self.assertEqual(result, "launched the BigScience Research Workshop") def test_exact_match_kwarg(self): result = self.tool(text=TEXT, question="What did Hugging Face do in April 2021?") self.assertEqual(result, "launched the BigScience Research Workshop") def test_exact_match_kwarg_remote(self): result = self.remote_tool(text=TEXT, question="What did Hugging Face do in April 2021?") self.assertEqual(result, "launched the BigScience Research Workshop")
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/tools/test_tools_common.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path from typing import List from transformers import is_torch_available, is_vision_available from transformers.testing_utils import get_tests_dir, is_tool_test from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText if is_torch_available(): import torch if is_vision_available(): from PIL import Image authorized_types = ["text", "image", "audio"] def create_inputs(input_types: List[str]): inputs = [] for input_type in input_types: if input_type == "text": inputs.append("Text input") elif input_type == "image": inputs.append( Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png").resize((512, 512)) ) elif input_type == "audio": inputs.append(torch.ones(3000)) elif isinstance(input_type, list): inputs.append(create_inputs(input_type)) else: raise ValueError(f"Invalid type requested: {input_type}") return inputs def output_types(outputs: List): output_types = [] for output in outputs: if isinstance(output, (str, AgentText)): output_types.append("text") elif isinstance(output, (Image.Image, AgentImage)): output_types.append("image") elif isinstance(output, (torch.Tensor, AgentAudio)): output_types.append("audio") else: raise ValueError(f"Invalid output: {output}") return output_types @is_tool_test class ToolTesterMixin: def test_inputs_outputs(self): self.assertTrue(hasattr(self.tool, "inputs")) self.assertTrue(hasattr(self.tool, "outputs")) inputs = self.tool.inputs for _input in inputs: if isinstance(_input, list): for __input in _input: self.assertTrue(__input in authorized_types) else: self.assertTrue(_input in authorized_types) outputs = self.tool.outputs for _output in outputs: self.assertTrue(_output in authorized_types) def test_call(self): inputs = create_inputs(self.tool.inputs) outputs = self.tool(*inputs) # There is a single output if len(self.tool.outputs) == 1: outputs = [outputs] self.assertListEqual(output_types(outputs), self.tool.outputs) def test_common_attributes(self): self.assertTrue(hasattr(self.tool, "description")) self.assertTrue(hasattr(self.tool, "default_checkpoint")) self.assertTrue(self.tool.description.startswith("This is a tool that")) def test_agent_types_outputs(self): inputs = create_inputs(self.tool.inputs) outputs = self.tool(*inputs) if not isinstance(outputs, list): outputs = [outputs] self.assertEqual(len(outputs), len(self.tool.outputs)) for output, output_type in zip(outputs, self.tool.outputs): agent_type = AGENT_TYPE_MAPPING[output_type] self.assertTrue(isinstance(output, agent_type)) def test_agent_types_inputs(self): inputs = create_inputs(self.tool.inputs) _inputs = [] for _input, input_type in zip(inputs, self.tool.inputs): if isinstance(input_type, list): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type]) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input)) # Should not raise an error outputs = self.tool(*inputs) if not isinstance(outputs, list): outputs = [outputs] self.assertEqual(len(outputs), len(self.tool.outputs))
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/tools/test_image_segmentation.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from pathlib import Path from transformers import is_vision_available, load_tool from transformers.testing_utils import get_tests_dir from .test_tools_common import ToolTesterMixin if is_vision_available(): from PIL import Image class ImageSegmentationToolTester(unittest.TestCase, ToolTesterMixin): def setUp(self): self.tool = load_tool("image-segmentation") self.tool.setup() self.remote_tool = load_tool("image-segmentation", remote=True) def test_exact_match_arg(self): image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png") result = self.tool(image, "cat") self.assertTrue(isinstance(result, Image.Image)) def test_exact_match_arg_remote(self): image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png") result = self.remote_tool(image, "cat") self.assertTrue(isinstance(result, Image.Image)) def test_exact_match_kwarg(self): image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png") result = self.tool(image=image, label="cat") self.assertTrue(isinstance(result, Image.Image)) def test_exact_match_kwarg_remote(self): image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png") result = self.remote_tool(image=image, label="cat") self.assertTrue(isinstance(result, Image.Image))
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/tools/test_text_summarization.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin TEXT = """ Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning. In March 2021, Hugging Face raised $40 million in a Series B funding round.[3] On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5] """ class TextSummarizationToolTester(unittest.TestCase, ToolTesterMixin): def setUp(self): self.tool = load_tool("summarization") self.tool.setup() self.remote_tool = load_tool("summarization", remote=True) def test_exact_match_arg(self): result = self.tool(TEXT) self.assertEqual( result, "Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf. In March 2021, Hugging Face raised $40 million in a Series B funding round. On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model. In 2022, the workshop concluded with the announcement of BLOOM.", ) def test_exact_match_arg_remote(self): result = self.remote_tool(TEXT) self.assertEqual( result, "Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf. In March 2021, Hugging Face raised $40 million in a Series B funding round. On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model. In 2022, the workshop concluded with the announcement of BLOOM.", ) def test_exact_match_kwarg(self): result = self.tool(text=TEXT) self.assertEqual( result, "Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf. In March 2021, Hugging Face raised $40 million in a Series B funding round. On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model. In 2022, the workshop concluded with the announcement of BLOOM.", ) def test_exact_match_kwarg_remote(self): result = self.remote_tool(text=TEXT) self.assertEqual( result, "Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf. In March 2021, Hugging Face raised $40 million in a Series B funding round. On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model. In 2022, the workshop concluded with the announcement of BLOOM.", )
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/tools/test_text_classification.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class TextClassificationToolTester(unittest.TestCase, ToolTesterMixin): def setUp(self): self.tool = load_tool("text-classification") self.tool.setup() self.remote_tool = load_tool("text-classification", remote=True) def test_exact_match_arg(self): result = self.tool("That's quite cool", ["positive", "negative"]) self.assertEqual(result, "positive") def test_exact_match_arg_remote(self): result = self.remote_tool("That's quite cool", ["positive", "negative"]) self.assertEqual(result, "positive") def test_exact_match_kwarg(self): result = self.tool(text="That's quite cool", labels=["positive", "negative"]) self.assertEqual(result, "positive") def test_exact_match_kwarg_remote(self): result = self.remote_tool(text="That's quite cool", labels=["positive", "negative"]) self.assertEqual(result, "positive")
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/tools/test_document_question_answering.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from datasets import load_dataset from transformers import load_tool from .test_tools_common import ToolTesterMixin class DocumentQuestionAnsweringToolTester(unittest.TestCase, ToolTesterMixin): def setUp(self): self.tool = load_tool("document-question-answering") self.tool.setup() self.remote_tool = load_tool("document-question-answering", remote=True) def test_exact_match_arg(self): dataset = load_dataset("hf-internal-testing/example-documents", split="test") document = dataset[0]["image"] result = self.tool(document, "When is the coffee break?") self.assertEqual(result, "11-14 to 11:39 a.m.") def test_exact_match_arg_remote(self): dataset = load_dataset("hf-internal-testing/example-documents", split="test") document = dataset[0]["image"] result = self.remote_tool(document, "When is the coffee break?") self.assertEqual(result, "11-14 to 11:39 a.m.") def test_exact_match_kwarg(self): dataset = load_dataset("hf-internal-testing/example-documents", split="test") document = dataset[0]["image"] self.tool(document=document, question="When is the coffee break?") def test_exact_match_kwarg_remote(self): dataset = load_dataset("hf-internal-testing/example-documents", split="test") document = dataset[0]["image"] result = self.remote_tool(document=document, question="When is the coffee break?") self.assertEqual(result, "11-14 to 11:39 a.m.")
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/tools/test_python_interpreter.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate # Fake function we will use as tool def add_two(x): return x + 2 class PythonInterpreterTester(unittest.TestCase): def test_evaluate_assign(self): code = "x = 3" state = {} result = evaluate(code, {}, state=state) assert result == 3 self.assertDictEqual(state, {"x": 3}) code = "x = y" state = {"y": 5} result = evaluate(code, {}, state=state) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(state, {"x": 5, "y": 5}) def test_evaluate_call(self): code = "y = add_two(x)" state = {"x": 3} result = evaluate(code, {"add_two": add_two}, state=state) assert result == 5 self.assertDictEqual(state, {"x": 3, "y": 5}) # Won't work without the tool with CaptureStdout() as out: result = evaluate(code, {}, state=state) assert result is None assert "tried to execute add_two" in out.out def test_evaluate_constant(self): code = "x = 3" state = {} result = evaluate(code, {}, state=state) assert result == 3 self.assertDictEqual(state, {"x": 3}) def test_evaluate_dict(self): code = "test_dict = {'x': x, 'y': add_two(x)}" state = {"x": 3} result = evaluate(code, {"add_two": add_two}, state=state) self.assertDictEqual(result, {"x": 3, "y": 5}) self.assertDictEqual(state, {"x": 3, "test_dict": {"x": 3, "y": 5}}) def test_evaluate_expression(self): code = "x = 3\ny = 5" state = {} result = evaluate(code, {}, state=state) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(state, {"x": 3, "y": 5}) def test_evaluate_f_string(self): code = "text = f'This is x: {x}.'" state = {"x": 3} result = evaluate(code, {}, state=state) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(state, {"x": 3, "text": "This is x: 3."}) def test_evaluate_if(self): code = "if x <= 3:\n y = 2\nelse:\n y = 5" state = {"x": 3} result = evaluate(code, {}, state=state) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(state, {"x": 3, "y": 2}) state = {"x": 8} result = evaluate(code, {}, state=state) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(state, {"x": 8, "y": 5}) def test_evaluate_list(self): code = "test_list = [x, add_two(x)]" state = {"x": 3} result = evaluate(code, {"add_two": add_two}, state=state) self.assertListEqual(result, [3, 5]) self.assertDictEqual(state, {"x": 3, "test_list": [3, 5]}) def test_evaluate_name(self): code = "y = x" state = {"x": 3} result = evaluate(code, {}, state=state) assert result == 3 self.assertDictEqual(state, {"x": 3, "y": 3}) def test_evaluate_subscript(self): code = "test_list = [x, add_two(x)]\ntest_list[1]" state = {"x": 3} result = evaluate(code, {"add_two": add_two}, state=state) assert result == 5 self.assertDictEqual(state, {"x": 3, "test_list": [3, 5]}) code = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']" state = {"x": 3} result = evaluate(code, {"add_two": add_two}, state=state) assert result == 5 self.assertDictEqual(state, {"x": 3, "test_dict": {"x": 3, "y": 5}}) def test_evaluate_for(self): code = "x = 0\nfor i in range(3):\n x = i" state = {} result = evaluate(code, {"range": range}, state=state) assert result == 2 self.assertDictEqual(state, {"x": 2, "i": 2})
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/tools/test_image_captioning.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from pathlib import Path from transformers import is_vision_available, load_tool from transformers.testing_utils import get_tests_dir from .test_tools_common import ToolTesterMixin if is_vision_available(): from PIL import Image class ImageCaptioningToolTester(unittest.TestCase, ToolTesterMixin): def setUp(self): self.tool = load_tool("image-captioning") self.tool.setup() self.remote_tool = load_tool("image-captioning", remote=True) def test_exact_match_arg(self): image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png") result = self.tool(image) self.assertEqual(result, "two cats sleeping on a couch") def test_exact_match_arg_remote(self): image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png") result = self.remote_tool(image) self.assertEqual(result, "two cats sleeping on a couch") def test_exact_match_kwarg(self): image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png") result = self.tool(image=image) self.assertEqual(result, "two cats sleeping on a couch") def test_exact_match_kwarg_remote(self): image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png") result = self.remote_tool(image=image) self.assertEqual(result, "two cats sleeping on a couch")
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/tools/test_translation.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import load_tool from transformers.tools.agent_types import AGENT_TYPE_MAPPING from .test_tools_common import ToolTesterMixin, output_types class TranslationToolTester(unittest.TestCase, ToolTesterMixin): def setUp(self): self.tool = load_tool("translation") self.tool.setup() self.remote_tool = load_tool("translation", remote=True) def test_exact_match_arg(self): result = self.tool("Hey, what's up?", src_lang="English", tgt_lang="French") self.assertEqual(result, "- Hé, comment ça va?") def test_exact_match_arg_remote(self): result = self.remote_tool("Hey, what's up?", src_lang="English", tgt_lang="French") self.assertEqual(result, "- Hé, comment ça va?") def test_exact_match_kwarg(self): result = self.tool(text="Hey, what's up?", src_lang="English", tgt_lang="French") self.assertEqual(result, "- Hé, comment ça va?") def test_exact_match_kwarg_remote(self): result = self.remote_tool(text="Hey, what's up?", src_lang="English", tgt_lang="French") self.assertEqual(result, "- Hé, comment ça va?") def test_call(self): inputs = ["Hey, what's up?", "English", "Spanish"] outputs = self.tool(*inputs) # There is a single output if len(self.tool.outputs) == 1: outputs = [outputs] self.assertListEqual(output_types(outputs), self.tool.outputs) def test_agent_types_outputs(self): inputs = ["Hey, what's up?", "English", "Spanish"] outputs = self.tool(*inputs) if not isinstance(outputs, list): outputs = [outputs] self.assertEqual(len(outputs), len(self.tool.outputs)) for output, output_type in zip(outputs, self.tool.outputs): agent_type = AGENT_TYPE_MAPPING[output_type] self.assertTrue(isinstance(output, agent_type)) def test_agent_types_inputs(self): inputs = ["Hey, what's up?", "English", "Spanish"] _inputs = [] for _input, input_type in zip(inputs, self.tool.inputs): if isinstance(input_type, list): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type]) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input)) # Should not raise an error outputs = self.tool(*inputs) if not isinstance(outputs, list): outputs = [outputs] self.assertEqual(len(outputs), len(self.tool.outputs))
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/tools/test_speech_to_text.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_torch_available, load_tool from .test_tools_common import ToolTesterMixin if is_torch_available(): import torch class SpeechToTextToolTester(unittest.TestCase, ToolTesterMixin): def setUp(self): self.tool = load_tool("speech-to-text") self.tool.setup() def test_exact_match_arg(self): result = self.tool(torch.ones(3000)) self.assertEqual(result, " you") def test_exact_match_kwarg(self): result = self.tool(audio=torch.ones(3000)) self.assertEqual(result, " you")
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/fsdp/test_fsdp.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import os import unittest from functools import partial from parameterized import parameterized import tests.trainer.test_trainer from tests.trainer.test_trainer import TrainerIntegrationCommon # noqa from transformers import is_torch_available from transformers.testing_utils import ( TestCasePlus, backend_device_count, execute_subprocess_async, mockenv_context, require_accelerate, require_fsdp, require_torch_accelerator, require_torch_multi_accelerator, slow, torch_device, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import FSDPOption, set_seed from transformers.utils import is_accelerate_available, is_torch_bf16_available_on_device if is_torch_available(): from transformers.pytorch_utils import is_torch_greater_or_equal_than_2_1 from transformers.trainer import FSDP_MODEL_NAME else: is_torch_greater_or_equal_than_2_1 = False # default torch.distributed port DEFAULT_MASTER_PORT = "10999" dtypes = ["fp16"] if is_torch_bf16_available_on_device(torch_device): dtypes += ["bf16"] sharding_strategies = ["full_shard", "shard_grad_op"] state_dict_types = ["FULL_STATE_DICT", "SHARDED_STATE_DICT"] set_seed(42) params = list(itertools.product(sharding_strategies, dtypes)) def get_master_port(real_launcher=False): """ When using a single gpu launcher emulation (i.e. not deepspeed or python -m torch.distributed) the issue is that once the port is tied it can't be used anywhere else outside of this process, since torch.dist doesn't free the port until the process exits. Therefore for the sake of being able to run both emulated launcher and normal launcher tests we need 2 distinct ports. This function will give the right port in the right context. For real launcher it'll give the base port, for emulated launcher it'll give the base port + 1. In both cases a string is returned. Args: `real_launcher`: whether a real launcher is going to be used, or the emulated one """ master_port_base = os.environ.get("DS_TEST_PORT", DEFAULT_MASTER_PORT) if not real_launcher: master_port_base = str(int(master_port_base) + 1) return master_port_base if is_torch_available(): from tests.trainer.test_trainer import ( # noqa RegressionModelConfig, RegressionPreTrainedModel, ) # hack to restore original logging level pre #21700 get_regression_trainer = partial(tests.trainer.test_trainer.get_regression_trainer, log_level="info") require_fsdp_version = require_fsdp if is_accelerate_available(): from accelerate.utils.constants import ( FSDP_PYTORCH_VERSION, FSDP_SHARDING_STRATEGY, ) require_fsdp_version = partial(require_fsdp, min_version=FSDP_PYTORCH_VERSION) def get_launcher(distributed=False, use_accelerate=False): # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) num_gpus = min(2, backend_device_count(torch_device)) if distributed else 1 master_port = get_master_port(real_launcher=True) if use_accelerate: return f"""accelerate launch --num_processes {num_gpus} --main_process_port {master_port} --use_fsdp --fsdp_auto_wrap_policy TRANSFORMER_BASED_WRAP --fsdp_state_dict_type SHARDED_STATE_DICT --fsdp_transformer_layer_cls_to_wrap BertLayer""".split() return f"torchrun --nnodes 1 --nproc-per-node {num_gpus} --master-port {master_port}".split() def _parameterized_custom_name_func(func, param_num, param): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param param_based_name = parameterized.to_safe_name("_".join(str(x) for x in param.args)) return f"{func.__name__}_{param_based_name}" @require_accelerate @require_torch_accelerator @require_fsdp_version class TrainerIntegrationFSDP(TestCasePlus, TrainerIntegrationCommon): def setUp(self): super().setUp() master_port = get_master_port(real_launcher=False) self.dist_env_1_gpu = { "MASTER_ADDR": "localhost", "MASTER_PORT": master_port, "RANK": "0", "LOCAL_RANK": "0", "WORLD_SIZE": "1", } self.fsdp_config = { "backward_prefetch": "backward_pre", "forward_prefetch": "False", "limit_all_gathers": "False", "use_orig_params": "True", "sync_module_states": "True", "activation_checkpointing": "False", "min_num_params": 1, } def tearDown(self): super().tearDown() @parameterized.expand(params, name_func=_parameterized_custom_name_func) def test_fsdp_config(self, sharding_strategy, dtype): output_dir = self.get_auto_remove_tmp_dir() kwargs = { "output_dir": output_dir, "train_len": 128, "save_steps": 5, "learning_rate": 0.1, "fsdp": f"{sharding_strategy} offload auto_wrap", "fsdp_config": self.fsdp_config, } kwargs[dtype] = True with mockenv_context(**self.dist_env_1_gpu): trainer = get_regression_trainer(**kwargs) self.assertEqual(trainer.args.fsdp[0], sharding_strategy) self.assertEqual(trainer.args.fsdp[1], FSDPOption.OFFLOAD) self.assertEqual(trainer.args.fsdp[2], FSDPOption.AUTO_WRAP) for k, v in trainer.args.fsdp_config.items(): self.assertEqual(v, self.fsdp_config[k]) self.assertEqual(os.environ.get("ACCELERATE_USE_FSDP", "false"), "true") @parameterized.expand(params, name_func=_parameterized_custom_name_func) @require_torch_multi_accelerator @slow def test_basic_run(self, sharding_strategy, dtype): launcher = get_launcher(distributed=True, use_accelerate=False) output_dir = self.get_auto_remove_tmp_dir() args = self.get_base_args(output_dir, 1, 50).split() + [f"--{dtype}"] fsdp_args = ["--fsdp", f"{sharding_strategy} auto_wrap", "--fsdp_transformer_layer_cls_to_wrap", "BertLayer"] script = [f"{self.examples_dir_str}/pytorch/text-classification/run_glue.py"] cmd = launcher + script + args + fsdp_args execute_subprocess_async(cmd, env=self.get_env()) @parameterized.expand(dtypes) @require_torch_multi_accelerator @slow @unittest.skipIf(not is_torch_greater_or_equal_than_2_1, reason="This test on pytorch 2.0 takes 4 hours.") def test_basic_run_with_cpu_offload(self, dtype): launcher = get_launcher(distributed=True, use_accelerate=False) output_dir = self.get_auto_remove_tmp_dir() args = self.get_base_args(output_dir, 1, 50).split() + [f"--{dtype}", "--max_steps", "10"] fsdp_args = ["--fsdp", "full_shard auto_wrap offload", "--fsdp_transformer_layer_cls_to_wrap", "BertLayer"] script = [f"{self.examples_dir_str}/pytorch/text-classification/run_glue.py"] cmd = launcher + script + args + fsdp_args execute_subprocess_async(cmd, env=self.get_env()) @parameterized.expand(state_dict_types, name_func=_parameterized_custom_name_func) @require_torch_multi_accelerator @slow def test_training_and_can_resume_normally(self, state_dict_type): output_dir = self.get_auto_remove_tmp_dir("./xxx", after=False) sharding_strategy = "full_shard" use_accelerate = state_dict_type == "SHARDED_STATE_DICT" launcher = get_launcher(True, use_accelerate=use_accelerate) args = self.get_base_args(output_dir, 2, 25).split() script = [f"{self.examples_dir_str}/pytorch/text-classification/run_glue.py"] logs = self.run_cmd_and_get_logs(use_accelerate, sharding_strategy, launcher, script, args, output_dir) # resume from ckpt checkpoint = os.path.join(output_dir, "checkpoint-115") resume_args = args + f"--resume_from_checkpoint {checkpoint}".split() is_fsdp_ckpt = os.path.isdir(checkpoint) and ( # this checks the FSDP state dict when `SHARDED_STATE_DICT` is used any( FSDP_MODEL_NAME in folder_name for folder_name in os.listdir(checkpoint) if os.path.isdir(os.path.join(checkpoint, folder_name)) ) # this checks the FSDP state dict when `FULL_STATE_DICT` is used or os.path.isfile(os.path.join(checkpoint, f"{FSDP_MODEL_NAME}.bin")) ) self.assertTrue(is_fsdp_ckpt) logs_resume = self.run_cmd_and_get_logs( use_accelerate, sharding_strategy, launcher, script, resume_args, output_dir ) for log, log1 in zip(logs, logs_resume): if "learning_rate" in log: self.assertAlmostEqual(log["learning_rate"], log1["learning_rate"], delta=1e-5) def run_cmd_and_get_logs(self, use_accelerate, sharding_strategy, launcher, script, args, output_dir): if not use_accelerate: fsdp_args = [ "--fsdp", f"{sharding_strategy} auto_wrap", "--fsdp_transformer_layer_cls_to_wrap", "BertLayer", ] cmd = launcher + script + args + fsdp_args else: fsdp_config = f""" --fsdp_sharding_strategy {FSDP_SHARDING_STRATEGY.index(sharding_strategy.upper()) + 1} """.split() cmd = launcher + fsdp_config + script + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(cmd, env=self.get_env()) logs = TrainerState.load_from_json(os.path.join(output_dir, "trainer_state.json")).log_history return logs def get_base_args(self, output_dir, num_epochs, logging_steps): return f""" --model_name_or_path bert-base-cased --task_name mrpc --output_dir {output_dir} --overwrite_output_dir --do_train --max_seq_length 128 --per_device_train_batch_size 16 --learning_rate 5e-5 --num_train_epochs {num_epochs} --lr_scheduler_type cosine --logging_steps {logging_steps} --save_strategy epoch --do_eval --evaluation_strategy epoch --report_to none """
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/tokenization/test_tokenization_fast.py
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import concurrent.futures import json import os import shutil import tempfile import unittest from transformers import AutoTokenizer, PreTrainedTokenizerFast from transformers.testing_utils import require_tokenizers from ..test_tokenization_common import TokenizerTesterMixin @require_tokenizers class PreTrainedTokenizationFastTest(TokenizerTesterMixin, unittest.TestCase): rust_tokenizer_class = PreTrainedTokenizerFast test_slow_tokenizer = False test_rust_tokenizer = True from_pretrained_vocab_key = "tokenizer_file" def setUp(self): self.test_rust_tokenizer = False # because we don't have pretrained_vocab_files_map super().setUp() self.test_rust_tokenizer = True model_paths = ["robot-test/dummy-tokenizer-fast", "robot-test/dummy-tokenizer-wordlevel"] self.bytelevel_bpe_model_name = "SaulLu/dummy-tokenizer-bytelevel-bpe" # Inclusion of 2 tokenizers to test different types of models (Unigram and WordLevel for the moment) self.tokenizers_list = [(PreTrainedTokenizerFast, model_path, {}) for model_path in model_paths] tokenizer = PreTrainedTokenizerFast.from_pretrained(model_paths[0]) tokenizer.save_pretrained(self.tmpdirname) def test_tokenizer_mismatch_warning(self): # We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any # model pass @unittest.skip( "We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any model" ) def test_encode_decode_with_spaces(self): pass @unittest.skip( "We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any model" ) def test_added_tokens_serialization(self): pass @unittest.skip( "We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any model" ) def test_additional_special_tokens_serialization(self): pass def test_pretrained_model_lists(self): # We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any # model pass def test_prepare_for_model(self): # We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any # model pass def test_rust_tokenizer_signature(self): # PreTrainedTokenizerFast doesn't have tokenizer_file in its signature pass def test_training_new_tokenizer(self): tmpdirname_orig = self.tmpdirname # Here we want to test the 2 available tokenizers that use 2 different types of models: Unigram and WordLevel. for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): try: self.tmpdirname = tempfile.mkdtemp() tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer.save_pretrained(self.tmpdirname) super().test_training_new_tokenizer() finally: # Even if the test fails, we must be sure that the folder is deleted and that the default tokenizer # is restored shutil.rmtree(self.tmpdirname) self.tmpdirname = tmpdirname_orig def test_training_new_tokenizer_with_special_tokens_change(self): tmpdirname_orig = self.tmpdirname # Here we want to test the 2 available tokenizers that use 2 different types of models: Unigram and WordLevel. for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): try: self.tmpdirname = tempfile.mkdtemp() tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer.save_pretrained(self.tmpdirname) super().test_training_new_tokenizer_with_special_tokens_change() finally: # Even if the test fails, we must be sure that the folder is deleted and that the default tokenizer # is restored shutil.rmtree(self.tmpdirname) self.tmpdirname = tmpdirname_orig def test_training_new_tokenizer_with_bytelevel(self): tokenizer = self.rust_tokenizer_class.from_pretrained(self.bytelevel_bpe_model_name) toy_text_iterator = ("a" for _ in range(1000)) new_tokenizer = tokenizer.train_new_from_iterator(text_iterator=toy_text_iterator, length=1000, vocab_size=50) encoding_ids = new_tokenizer.encode("a🤗") self.assertEqual(encoding_ids, [64, 172, 253, 97, 245]) def test_init_from_tokenizers_model(self): from tokenizers import Tokenizer sentences = ["Hello, y'all!", "How are you 😁 ? There should not be any issue right?"] tokenizer = Tokenizer.from_pretrained("t5-base") # Enable padding tokenizer.enable_padding(pad_id=0, pad_token="<pad>", length=512, pad_to_multiple_of=8) self.assertEqual( tokenizer.padding, { "length": 512, "pad_to_multiple_of": 8, "pad_id": 0, "pad_token": "<pad>", "pad_type_id": 0, "direction": "right", }, ) fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer) tmpdirname = tempfile.mkdtemp() fast_tokenizer.save_pretrained(tmpdirname) fast_from_saved = PreTrainedTokenizerFast.from_pretrained(tmpdirname) for tok in [fast_tokenizer, fast_from_saved]: self.assertEqual(tok.pad_token_id, 0) self.assertEqual(tok.padding_side, "right") self.assertEqual(tok.pad_token, "<pad>") self.assertEqual(tok.init_kwargs["max_length"], 512) self.assertEqual(tok.init_kwargs["pad_to_multiple_of"], 8) self.assertEqual(tok(sentences, padding = True), {'input_ids': [[8774, 6, 3, 63, 31, 1748, 55, 1, 0, 0, 0, 0,0, 0, 0, 0],[ 571, 33, 25, 3, 2, 3, 58, 290, 225, 59, 36, 136, 962, 269, 58, 1]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]}) # fmt: skip tokenizer.enable_truncation(8, stride=0, strategy="longest_first", direction="right") self.assertEqual( tokenizer.truncation, {"max_length": 8, "stride": 0, "strategy": "longest_first", "direction": "right"} ) fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer) tmpdirname = tempfile.mkdtemp() fast_tokenizer.save_pretrained(tmpdirname) fast_from_saved = PreTrainedTokenizerFast.from_pretrained(tmpdirname) for tok in [fast_tokenizer, fast_from_saved]: self.assertEqual(tok.truncation_side, "right") self.assertEqual(tok.init_kwargs["truncation_strategy"], "longest_first") self.assertEqual(tok.init_kwargs["max_length"], 8) self.assertEqual(tok.init_kwargs["stride"], 0) # NOTE even if the model has a default max_length, it is not used... # thus tok(sentences, truncation = True) does nothing and does not warn either self.assertEqual(tok(sentences, truncation = True, max_length = 8), {'input_ids': [[8774, 6, 3, 63, 31, 1748, 55, 1],[ 571, 33, 25, 3, 2, 3, 58, 1]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1]]}) # fmt: skip @require_tokenizers class TokenizerVersioningTest(unittest.TestCase): def test_local_versioning(self): tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") json_tokenizer = json.loads(tokenizer._tokenizer.to_str()) json_tokenizer["model"]["vocab"]["huggingface"] = len(tokenizer) with tempfile.TemporaryDirectory() as tmp_dir: # Hack to save this in the tokenizer_config.json tokenizer.init_kwargs["fast_tokenizer_files"] = ["tokenizer.4.0.0.json"] tokenizer.save_pretrained(tmp_dir) json.dump(json_tokenizer, open(os.path.join(tmp_dir, "tokenizer.4.0.0.json"), "w")) # This should pick the new tokenizer file as the version of Transformers is > 4.0.0 new_tokenizer = AutoTokenizer.from_pretrained(tmp_dir) self.assertEqual(len(new_tokenizer), len(tokenizer) + 1) json_tokenizer = json.loads(new_tokenizer._tokenizer.to_str()) self.assertIn("huggingface", json_tokenizer["model"]["vocab"]) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old tokenizer file as the version of Transformers is < 4.0.0 shutil.move(os.path.join(tmp_dir, "tokenizer.4.0.0.json"), os.path.join(tmp_dir, "tokenizer.42.0.0.json")) tokenizer.init_kwargs["fast_tokenizer_files"] = ["tokenizer.42.0.0.json"] tokenizer.save_pretrained(tmp_dir) new_tokenizer = AutoTokenizer.from_pretrained(tmp_dir) self.assertEqual(len(new_tokenizer), len(tokenizer)) json_tokenizer = json.loads(new_tokenizer._tokenizer.to_str()) self.assertNotIn("huggingface", json_tokenizer["model"]["vocab"]) def test_repo_versioning(self): # This repo has two tokenizer files, one for v4.0.0 and above with an added token, one for versions lower. repo = "hf-internal-testing/test-two-tokenizers" # This should pick the new tokenizer file as the version of Transformers is > 4.0.0 tokenizer = AutoTokenizer.from_pretrained(repo) self.assertEqual(len(tokenizer), 28997) json_tokenizer = json.loads(tokenizer._tokenizer.to_str()) self.assertIn("huggingface", json_tokenizer["model"]["vocab"]) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers old_transformers.tokenization_utils_base.__version__ = "3.0.0" old_tokenizer = old_transformers.models.auto.AutoTokenizer.from_pretrained(repo) self.assertEqual(len(old_tokenizer), 28996) json_tokenizer = json.loads(old_tokenizer._tokenizer.to_str()) self.assertNotIn("huggingface", json_tokenizer["model"]["vocab"]) @require_tokenizers class ReduceMutableBorrowTests(unittest.TestCase): def test_async_share_tokenizer(self): # See https://github.com/huggingface/transformers/pull/12550 # and https://github.com/huggingface/tokenizers/issues/537 tokenizer = PreTrainedTokenizerFast.from_pretrained("robot-test/dummy-tokenizer-wordlevel") text = "The Matrix is a 1999 science fiction action film." with concurrent.futures.ThreadPoolExecutor() as executor: futures = [executor.submit(self.fetch, tokenizer, text) for i in range(10)] return_value = [future.result() for future in futures] self.assertEqual(return_value, [[1, 10, 0, 8, 0, 18, 0, 0, 0, 2] for i in range(10)]) def fetch(self, tokenizer, text): return tokenizer.encode(text, truncation="longest_first", padding="longest")
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/tokenization/test_tokenization_utils.py
# coding=utf-8 # Copyright 2018 HuggingFace Inc.. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ isort:skip_file """ import os import pickle import tempfile import unittest from typing import Callable, Optional import numpy as np from transformers import ( BatchEncoding, BertTokenizer, BertTokenizerFast, PreTrainedTokenizer, PreTrainedTokenizerFast, TensorType, TokenSpan, is_tokenizers_available, ) from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer from transformers.testing_utils import CaptureStderr, require_flax, require_tf, require_tokenizers, require_torch, slow if is_tokenizers_available(): from tokenizers import Tokenizer from tokenizers.models import WordPiece class TokenizerUtilsTest(unittest.TestCase): def check_tokenizer_from_pretrained(self, tokenizer_class): s3_models = list(tokenizer_class.max_model_input_sizes.keys()) for model_name in s3_models[:1]: tokenizer = tokenizer_class.from_pretrained(model_name) self.assertIsNotNone(tokenizer) self.assertIsInstance(tokenizer, tokenizer_class) self.assertIsInstance(tokenizer, PreTrainedTokenizer) for special_tok in tokenizer.all_special_tokens: self.assertIsInstance(special_tok, str) special_tok_id = tokenizer.convert_tokens_to_ids(special_tok) self.assertIsInstance(special_tok_id, int) def assert_dump_and_restore(self, be_original: BatchEncoding, equal_op: Optional[Callable] = None): batch_encoding_str = pickle.dumps(be_original) self.assertIsNotNone(batch_encoding_str) be_restored = pickle.loads(batch_encoding_str) # Ensure is_fast is correctly restored self.assertEqual(be_restored.is_fast, be_original.is_fast) # Ensure encodings are potentially correctly restored if be_original.is_fast: self.assertIsNotNone(be_restored.encodings) else: self.assertIsNone(be_restored.encodings) # Ensure the keys are the same for original_v, restored_v in zip(be_original.values(), be_restored.values()): if equal_op: self.assertTrue(equal_op(restored_v, original_v)) else: self.assertEqual(restored_v, original_v) @slow def test_pretrained_tokenizers(self): self.check_tokenizer_from_pretrained(GPT2Tokenizer) def test_tensor_type_from_str(self): self.assertEqual(TensorType("tf"), TensorType.TENSORFLOW) self.assertEqual(TensorType("pt"), TensorType.PYTORCH) self.assertEqual(TensorType("np"), TensorType.NUMPY) @require_tokenizers def test_batch_encoding_pickle(self): import numpy as np tokenizer_p = BertTokenizer.from_pretrained("bert-base-cased") tokenizer_r = BertTokenizerFast.from_pretrained("bert-base-cased") # Python no tensor with self.subTest("BatchEncoding (Python, return_tensors=None)"): self.assert_dump_and_restore(tokenizer_p("Small example to encode")) with self.subTest("BatchEncoding (Python, return_tensors=NUMPY)"): self.assert_dump_and_restore( tokenizer_p("Small example to encode", return_tensors=TensorType.NUMPY), np.array_equal ) with self.subTest("BatchEncoding (Rust, return_tensors=None)"): self.assert_dump_and_restore(tokenizer_r("Small example to encode")) with self.subTest("BatchEncoding (Rust, return_tensors=NUMPY)"): self.assert_dump_and_restore( tokenizer_r("Small example to encode", return_tensors=TensorType.NUMPY), np.array_equal ) @require_tf @require_tokenizers def test_batch_encoding_pickle_tf(self): import tensorflow as tf def tf_array_equals(t1, t2): return tf.reduce_all(tf.equal(t1, t2)) tokenizer_p = BertTokenizer.from_pretrained("bert-base-cased") tokenizer_r = BertTokenizerFast.from_pretrained("bert-base-cased") with self.subTest("BatchEncoding (Python, return_tensors=TENSORFLOW)"): self.assert_dump_and_restore( tokenizer_p("Small example to encode", return_tensors=TensorType.TENSORFLOW), tf_array_equals ) with self.subTest("BatchEncoding (Rust, return_tensors=TENSORFLOW)"): self.assert_dump_and_restore( tokenizer_r("Small example to encode", return_tensors=TensorType.TENSORFLOW), tf_array_equals ) @require_torch @require_tokenizers def test_batch_encoding_pickle_pt(self): import torch tokenizer_p = BertTokenizer.from_pretrained("bert-base-cased") tokenizer_r = BertTokenizerFast.from_pretrained("bert-base-cased") with self.subTest("BatchEncoding (Python, return_tensors=PYTORCH)"): self.assert_dump_and_restore( tokenizer_p("Small example to encode", return_tensors=TensorType.PYTORCH), torch.equal ) with self.subTest("BatchEncoding (Rust, return_tensors=PYTORCH)"): self.assert_dump_and_restore( tokenizer_r("Small example to encode", return_tensors=TensorType.PYTORCH), torch.equal ) @require_tokenizers def test_batch_encoding_is_fast(self): tokenizer_p = BertTokenizer.from_pretrained("bert-base-cased") tokenizer_r = BertTokenizerFast.from_pretrained("bert-base-cased") with self.subTest("Python Tokenizer"): self.assertFalse(tokenizer_p("Small example to_encode").is_fast) with self.subTest("Rust Tokenizer"): self.assertTrue(tokenizer_r("Small example to_encode").is_fast) @require_tokenizers def test_batch_encoding_word_to_tokens(self): tokenizer_r = BertTokenizerFast.from_pretrained("bert-base-cased") encoded = tokenizer_r(["Test", "\xad", "test"], is_split_into_words=True) self.assertEqual(encoded.word_to_tokens(0), TokenSpan(start=1, end=2)) self.assertEqual(encoded.word_to_tokens(1), None) self.assertEqual(encoded.word_to_tokens(2), TokenSpan(start=2, end=3)) def test_batch_encoding_with_labels(self): batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]}) tensor_batch = batch.convert_to_tensors(tensor_type="np") self.assertEqual(tensor_batch["inputs"].shape, (2, 3)) self.assertEqual(tensor_batch["labels"].shape, (2,)) # test converting the converted with CaptureStderr() as cs: tensor_batch = batch.convert_to_tensors(tensor_type="np") self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}") batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0}) tensor_batch = batch.convert_to_tensors(tensor_type="np", prepend_batch_axis=True) self.assertEqual(tensor_batch["inputs"].shape, (1, 3)) self.assertEqual(tensor_batch["labels"].shape, (1,)) @require_torch def test_batch_encoding_with_labels_pt(self): batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]}) tensor_batch = batch.convert_to_tensors(tensor_type="pt") self.assertEqual(tensor_batch["inputs"].shape, (2, 3)) self.assertEqual(tensor_batch["labels"].shape, (2,)) # test converting the converted with CaptureStderr() as cs: tensor_batch = batch.convert_to_tensors(tensor_type="pt") self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}") batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0}) tensor_batch = batch.convert_to_tensors(tensor_type="pt", prepend_batch_axis=True) self.assertEqual(tensor_batch["inputs"].shape, (1, 3)) self.assertEqual(tensor_batch["labels"].shape, (1,)) @require_tf def test_batch_encoding_with_labels_tf(self): batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]}) tensor_batch = batch.convert_to_tensors(tensor_type="tf") self.assertEqual(tensor_batch["inputs"].shape, (2, 3)) self.assertEqual(tensor_batch["labels"].shape, (2,)) # test converting the converted with CaptureStderr() as cs: tensor_batch = batch.convert_to_tensors(tensor_type="tf") self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}") batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0}) tensor_batch = batch.convert_to_tensors(tensor_type="tf", prepend_batch_axis=True) self.assertEqual(tensor_batch["inputs"].shape, (1, 3)) self.assertEqual(tensor_batch["labels"].shape, (1,)) @require_flax def test_batch_encoding_with_labels_jax(self): batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]}) tensor_batch = batch.convert_to_tensors(tensor_type="jax") self.assertEqual(tensor_batch["inputs"].shape, (2, 3)) self.assertEqual(tensor_batch["labels"].shape, (2,)) # test converting the converted with CaptureStderr() as cs: tensor_batch = batch.convert_to_tensors(tensor_type="jax") self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}") batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0}) tensor_batch = batch.convert_to_tensors(tensor_type="jax", prepend_batch_axis=True) self.assertEqual(tensor_batch["inputs"].shape, (1, 3)) self.assertEqual(tensor_batch["labels"].shape, (1,)) def test_padding_accepts_tensors(self): features = [{"input_ids": np.array([0, 1, 2])}, {"input_ids": np.array([0, 1, 2, 3])}] tokenizer = BertTokenizer.from_pretrained("bert-base-cased") batch = tokenizer.pad(features, padding=True) self.assertTrue(isinstance(batch["input_ids"], np.ndarray)) self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) batch = tokenizer.pad(features, padding=True, return_tensors="np") self.assertTrue(isinstance(batch["input_ids"], np.ndarray)) self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) @require_torch def test_padding_accepts_tensors_pt(self): import torch features = [{"input_ids": torch.tensor([0, 1, 2])}, {"input_ids": torch.tensor([0, 1, 2, 3])}] tokenizer = BertTokenizer.from_pretrained("bert-base-cased") batch = tokenizer.pad(features, padding=True) self.assertTrue(isinstance(batch["input_ids"], torch.Tensor)) self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) batch = tokenizer.pad(features, padding=True, return_tensors="pt") self.assertTrue(isinstance(batch["input_ids"], torch.Tensor)) self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) @require_tf def test_padding_accepts_tensors_tf(self): import tensorflow as tf features = [{"input_ids": tf.constant([0, 1, 2])}, {"input_ids": tf.constant([0, 1, 2, 3])}] tokenizer = BertTokenizer.from_pretrained("bert-base-cased") batch = tokenizer.pad(features, padding=True) self.assertTrue(isinstance(batch["input_ids"], tf.Tensor)) self.assertEqual(batch["input_ids"].numpy().tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) batch = tokenizer.pad(features, padding=True, return_tensors="tf") self.assertTrue(isinstance(batch["input_ids"], tf.Tensor)) self.assertEqual(batch["input_ids"].numpy().tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) @require_tokenizers def test_instantiation_from_tokenizers(self): bert_tokenizer = Tokenizer(WordPiece(unk_token="[UNK]")) PreTrainedTokenizerFast(tokenizer_object=bert_tokenizer) @require_tokenizers def test_instantiation_from_tokenizers_json_file(self): bert_tokenizer = Tokenizer(WordPiece(unk_token="[UNK]")) with tempfile.TemporaryDirectory() as tmpdirname: bert_tokenizer.save(os.path.join(tmpdirname, "tokenizer.json")) PreTrainedTokenizerFast(tokenizer_file=os.path.join(tmpdirname, "tokenizer.json"))
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/sagemaker/README.md
# Testing new Hugging Face Deep Learning Container. This document explains the testing strategy for releasing the new Hugging Face Deep Learning Container. AWS maintains 14 days of currency with framework releases. Besides framework releases, AWS release train is bi-weekly on Monday. Code cutoff date for any changes is the Wednesday before release-Monday. ## Test Case 1: Releasing a New Version (Minor/Major) of 🤗 Transformers ### Requirements: Test should run on Release Candidate for new `transformers` release to validate the new release is compatible with the DLCs. To run these tests you need credentials for the HF SageMaker AWS Account. You can ask @philschmid or @n1t0 to get access. ### Run Tests: Before we can run the tests we need to adjust the `requirements.txt` for PyTorch under `/tests/sagemaker/scripts/pytorch` and for TensorFlow under `/tests/sagemaker/scripts/pytorch`. We adjust the branch to the new RC-tag. ``` git+https://github.com/huggingface/[email protected] # install main or adjust ist with vX.X.X for installing version specific-transforms ``` After we adjusted the `requirements.txt` we can run Amazon SageMaker tests with: ```bash AWS_PROFILE=<enter-your-profile> make test-sagemaker ``` These tests take around 10-15 minutes to finish. Preferably make a screenshot of the successfully ran tests. ### After Transformers Release: After we have released the Release Candidate we need to create a PR at the [Deep Learning Container Repository](https://github.com/aws/deep-learning-containers). **Creating the update PR:** 1. Update the two latest `buildspec.yaml` config for [PyTorch](https://github.com/aws/deep-learning-containers/tree/master/huggingface/pytorch) and [TensorFlow](https://github.com/aws/deep-learning-containers/tree/master/huggingface/tensorflow). The two latest `buildspec.yaml` are the `buildspec.yaml` without a version tag and the one with the highest framework version, e.g. `buildspec-1-7-1.yml` and not `buildspec-1-6.yml`. To update the `buildspec.yaml` we need to adjust either the `transformers_version` or the `datasets_version` or both. Example for upgrading to `transformers 4.5.0` and `datasets 1.6.0`. ```yaml account_id: &ACCOUNT_ID <set-$ACCOUNT_ID-in-environment> region: &REGION <set-$REGION-in-environment> base_framework: &BASE_FRAMEWORK pytorch framework: &FRAMEWORK !join [ "huggingface_", *BASE_FRAMEWORK] version: &VERSION 1.6.0 short_version: &SHORT_VERSION 1.6 repository_info: training_repository: &TRAINING_REPOSITORY image_type: &TRAINING_IMAGE_TYPE training root: !join [ "huggingface/", *BASE_FRAMEWORK, "/", *TRAINING_IMAGE_TYPE ] repository_name: &REPOSITORY_NAME !join ["pr", "-", "huggingface", "-", *BASE_FRAMEWORK, "-", *TRAINING_IMAGE_TYPE] repository: &REPOSITORY !join [ *ACCOUNT_ID, .dkr.ecr., *REGION, .amazonaws.com/, *REPOSITORY_NAME ] images: BuildHuggingFacePytorchGpuPy37Cu110TrainingDockerImage: <<: *TRAINING_REPOSITORY build: &HUGGINGFACE_PYTORCH_GPU_TRAINING_PY3 false image_size_baseline: &IMAGE_SIZE_BASELINE 15000 device_type: &DEVICE_TYPE gpu python_version: &DOCKER_PYTHON_VERSION py3 tag_python_version: &TAG_PYTHON_VERSION py36 cuda_version: &CUDA_VERSION cu110 os_version: &OS_VERSION ubuntu18.04 transformers_version: &TRANSFORMERS_VERSION 4.5.0 # this was adjusted from 4.4.2 to 4.5.0 datasets_version: &DATASETS_VERSION 1.6.0 # this was adjusted from 1.5.0 to 1.6.0 tag: !join [ *VERSION, '-', 'transformers', *TRANSFORMERS_VERSION, '-', *DEVICE_TYPE, '-', *TAG_PYTHON_VERSION, '-', *CUDA_VERSION, '-', *OS_VERSION ] docker_file: !join [ docker/, *SHORT_VERSION, /, *DOCKER_PYTHON_VERSION, /, *CUDA_VERSION, /Dockerfile., *DEVICE_TYPE ] ``` 2. In the PR comment describe what test, we ran and with which package versions. Here you can copy the table from [Current Tests](#current-tests). 2. In the PR comment describe what test we ran and with which framework versions. Here you can copy the table from [Current Tests](#current-tests). You can take a look at this [PR](https://github.com/aws/deep-learning-containers/pull/1016), which information are needed. ## Test Case 2: Releasing a New AWS Framework DLC ## Execute Tests ### Requirements: AWS is going to release new DLCs for PyTorch and/or TensorFlow. The Tests should run on the new framework versions with current `transformers` release to validate the new framework release is compatible with the `transformers` version. To run these tests you need credentials for the HF SageMaker AWS Account. You can ask @philschmid or @n1t0 to get access. AWS will notify us with a new issue in the repository pointing to their framework upgrade PR. ### Run Tests: Before we can run the tests we need to adjust the `requirements.txt` for Pytorch under `/tests/sagemaker/scripts/pytorch` and for Tensorflow under `/tests/sagemaker/scripts/pytorch`. We add the new framework version to it. ``` torch==1.8.1 # for pytorch tensorflow-gpu==2.5.0 # for tensorflow ``` After we adjusted the `requirements.txt` we can run Amazon SageMaker tests with. ```bash AWS_PROFILE=<enter-your-profile> make test-sagemaker ``` These tests take around 10-15 minutes to finish. Preferably make a screenshot of the successfully ran tests. ### After successful Tests: After we have successfully run tests for the new framework version we need to create a PR at the [Deep Learning Container Repository](https://github.com/aws/deep-learning-containers). **Creating the update PR:** 1. Create a new `buildspec.yaml` config for [PyTorch](https://github.com/aws/deep-learning-containers/tree/master/huggingface/pytorch) and [TensorFlow](https://github.com/aws/deep-learning-containers/tree/master/huggingface/tensorflow) and rename the old `buildspec.yaml` to `buildespec-x.x.x`, where `x.x.x` is the base framework version, e.g. if pytorch 1.6.0 is the latest version in `buildspec.yaml` the file should be renamed to `buildspec-yaml-1-6.yaml`. To create the new `buildspec.yaml` we need to adjust the `version` and the `short_version`. Example for upgrading to `pytorch 1.7.1`. ```yaml account_id: &ACCOUNT_ID <set-$ACCOUNT_ID-in-environment> region: &REGION <set-$REGION-in-environment> base_framework: &BASE_FRAMEWORK pytorch framework: &FRAMEWORK !join [ "huggingface_", *BASE_FRAMEWORK] version: &VERSION 1.7.1 # this was adjusted from 1.6.0 to 1.7.1 short_version: &SHORT_VERSION 1.7 # this was adjusted from 1.6 to 1.7 repository_info: training_repository: &TRAINING_REPOSITORY image_type: &TRAINING_IMAGE_TYPE training root: !join [ "huggingface/", *BASE_FRAMEWORK, "/", *TRAINING_IMAGE_TYPE ] repository_name: &REPOSITORY_NAME !join ["pr", "-", "huggingface", "-", *BASE_FRAMEWORK, "-", *TRAINING_IMAGE_TYPE] repository: &REPOSITORY !join [ *ACCOUNT_ID, .dkr.ecr., *REGION, .amazonaws.com/, *REPOSITORY_NAME ] images: BuildHuggingFacePytorchGpuPy37Cu110TrainingDockerImage: <<: *TRAINING_REPOSITORY build: &HUGGINGFACE_PYTORCH_GPU_TRAINING_PY3 false image_size_baseline: &IMAGE_SIZE_BASELINE 15000 device_type: &DEVICE_TYPE gpu python_version: &DOCKER_PYTHON_VERSION py3 tag_python_version: &TAG_PYTHON_VERSION py36 cuda_version: &CUDA_VERSION cu110 os_version: &OS_VERSION ubuntu18.04 transformers_version: &TRANSFORMERS_VERSION 4.4.2 datasets_version: &DATASETS_VERSION 1.5.0 tag: !join [ *VERSION, '-', 'transformers', *TRANSFORMERS_VERSION, '-', *DEVICE_TYPE, '-', *TAG_PYTHON_VERSION, '-', *CUDA_VERSION, '-', *OS_VERSION ] docker_file: !join [ docker/, *SHORT_VERSION, /, *DOCKER_PYTHON_VERSION, /, *CUDA_VERSION, /Dockerfile., *DEVICE_TYPE ] ``` 2. In the PR comment describe what test we ran and with which framework versions. Here you can copy the table from [Current Tests](#current-tests). You can take a look at this [PR](https://github.com/aws/deep-learning-containers/pull/1025), which information are needed. ## Current Tests | ID | Description | Platform | #GPUS | Collected & evaluated metrics | |-------------------------------------|-------------------------------------------------------------------|-----------------------------|-------|------------------------------------------| | pytorch-transfromers-test-single | test bert finetuning using BERT fromtransformerlib+PT | SageMaker createTrainingJob | 1 | train_runtime, eval_accuracy & eval_loss | | pytorch-transfromers-test-2-ddp | test bert finetuning using BERT from transformer lib+ PT DPP | SageMaker createTrainingJob | 16 | train_runtime, eval_accuracy & eval_loss | | pytorch-transfromers-test-2-smd | test bert finetuning using BERT from transformer lib+ PT SM DDP | SageMaker createTrainingJob | 16 | train_runtime, eval_accuracy & eval_loss | | pytorch-transfromers-test-1-smp | test roberta finetuning using BERT from transformer lib+ PT SM MP | SageMaker createTrainingJob | 8 | train_runtime, eval_accuracy & eval_loss | | tensorflow-transfromers-test-single | Test bert finetuning using BERT from transformer lib+TF | SageMaker createTrainingJob | 1 | train_runtime, eval_accuracy & eval_loss | | tensorflow-transfromers-test-2-smd | test bert finetuning using BERT from transformer lib+ TF SM DDP | SageMaker createTrainingJob | 16 | train_runtime, eval_accuracy & eval_loss |
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/sagemaker/test_multi_node_model_parallel.py
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER", "False")) is not True, reason="Skipping test because should only be run when releasing minor transformers version", ) @pytest.mark.usefixtures("sm_env") @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class MultiNodeTest(unittest.TestCase): def setUp(self): if self.framework == "pytorch": subprocess.run( f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(), encoding="utf-8", check=True, ) assert hasattr(self, "env") def create_estimator(self, instance_count): # configuration for running training on smdistributed Model Parallel mpi_options = { "enabled": True, "processes_per_host": 8, } smp_options = { "enabled": True, "parameters": { "microbatches": 4, "placement_strategy": "spread", "pipeline": "interleaved", "optimize": "speed", "partitions": 4, "ddp": True, }, } distribution = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options} name_extension = "trainer" if self.script == "run_glue.py" else "smtrainer" # creates estimator return HuggingFace( entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=f"{self.env.base_job_name}-{instance_count}-smp-{name_extension}", instance_count=instance_count, instance_type=self.instance_type, debugger_hook_config=False, hyperparameters={ **self.env.hyperparameters, "model_name_or_path": self.model_name_or_path, "max_steps": 500, }, metric_definitions=self.env.metric_definitions, distribution=distribution, py_version="py36", ) def save_results_as_csv(self, job_name): TrainingJobAnalytics(job_name).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv") # @parameterized.expand([(2,), (4,),]) @parameterized.expand([(1,)]) def test_scripz(self, instance_count): # create estimator estimator = self.create_estimator(instance_count) # run training estimator.fit() # result dataframe result_metrics_df = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis eval_accuracy = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"]) eval_loss = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"]) # get train time from SageMaker job, this includes starting, preprocessing, stopping train_runtime = ( Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds", 999999) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy) assert all(t <= self.results["eval_loss"] for t in eval_loss) # dump tests result into json file to share in PR with open(f"{estimator.latest_training_job.name}.json", "w") as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, outfile)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/sagemaker/__init__.py
import importlib def is_sagemaker_available(): return importlib.util.find_spec("sagemaker") is not None
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/sagemaker/test_single_node_gpu.py
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER", "False")) is not True, reason="Skipping test because should only be run when releasing minor transformers version", ) @pytest.mark.usefixtures("sm_env") @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 650, "eval_accuracy": 0.6, "eval_loss": 0.9}, }, { "framework": "tensorflow", "script": "run_tf.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.3, "eval_loss": 0.9}, }, ] ) class SingleNodeTest(unittest.TestCase): def setUp(self): if self.framework == "pytorch": subprocess.run( f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(), encoding="utf-8", check=True, ) assert hasattr(self, "env") def create_estimator(self, instance_count=1): # creates estimator return HuggingFace( entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=f"{self.env.base_job_name}-single", instance_count=instance_count, instance_type=self.instance_type, debugger_hook_config=False, hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path}, metric_definitions=self.env.metric_definitions, py_version="py36", ) def save_results_as_csv(self, job_name): TrainingJobAnalytics(job_name).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv") def test_glue(self): # create estimator estimator = self.create_estimator() # run training estimator.fit() # result dataframe result_metrics_df = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis eval_accuracy = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"]) eval_loss = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"]) # get train time from SageMaker job, this includes starting, preprocessing, stopping train_runtime = ( Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds", 999999) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy) assert all(t <= self.results["eval_loss"] for t in eval_loss) # dump tests result into json file to share in PR with open(f"{estimator.latest_training_job.name}.json", "w") as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, outfile)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/sagemaker/conftest.py
# we define a fixture function below and it will be "used" by # referencing its name from tests import os import pytest from attr import dataclass os.environ["AWS_DEFAULT_REGION"] = "us-east-1" # defaults region @dataclass class SageMakerTestEnvironment: framework: str role = "arn:aws:iam::558105141721:role/sagemaker_execution_role" hyperparameters = { "task_name": "mnli", "per_device_train_batch_size": 16, "per_device_eval_batch_size": 16, "do_train": True, "do_eval": True, "do_predict": True, "output_dir": "/opt/ml/model", "overwrite_output_dir": True, "max_steps": 500, "save_steps": 5500, } distributed_hyperparameters = {**hyperparameters, "max_steps": 1000} @property def metric_definitions(self) -> str: if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def base_job_name(self) -> str: return f"{self.framework}-transfromers-test" @property def test_path(self) -> str: return f"./tests/sagemaker/scripts/{self.framework}" @property def image_uri(self) -> str: if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope="class") def sm_env(request): request.cls.env = SageMakerTestEnvironment(framework=request.cls.framework)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/sagemaker/test_multi_node_data_parallel.py
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER", "False")) is not True, reason="Skipping test because should only be run when releasing minor transformers version", ) @pytest.mark.usefixtures("sm_env") @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "pytorch", "script": "run_ddp.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "tensorflow", "script": "run_tf_dist.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7}, }, ] ) class MultiNodeTest(unittest.TestCase): def setUp(self): if self.framework == "pytorch": subprocess.run( f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(), encoding="utf-8", check=True, ) assert hasattr(self, "env") def create_estimator(self, instance_count): job_name = f"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}" # distributed data settings distribution = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None # creates estimator return HuggingFace( entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=job_name, instance_count=instance_count, instance_type=self.instance_type, debugger_hook_config=False, hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path}, metric_definitions=self.env.metric_definitions, distribution=distribution, py_version="py36", ) def save_results_as_csv(self, job_name): TrainingJobAnalytics(job_name).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv") # @parameterized.expand([(2,), (4,),]) @parameterized.expand([(2,)]) def test_script(self, instance_count): # create estimator estimator = self.create_estimator(instance_count) # run training estimator.fit() # result dataframe result_metrics_df = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis eval_accuracy = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"]) eval_loss = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"]) # get train time from SageMaker job, this includes starting, preprocessing, stopping train_runtime = ( Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds", 999999) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy) assert all(t <= self.results["eval_loss"] for t in eval_loss) # dump tests result into json file to share in PR with open(f"{estimator.latest_training_job.name}.json", "w") as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, outfile)
0
hf_public_repos/transformers/tests/sagemaker/scripts
hf_public_repos/transformers/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning the library models for sequence classification on GLUE.""" # You can also adapt this script on your own text classification task. Pointers for this are left as comments. import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import numpy as np from datasets import load_dataset, load_metric import transformers from transformers import ( # Trainer,; TrainingArguments, AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, PretrainedConfig, default_data_collator, set_seed, ) # Will import SageMaker Model parallelism specific Trainer from transformers.sagemaker import SageMakerTrainer as Trainer from transformers.sagemaker import SageMakerTrainingArguments as TrainingArguments from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.4.2") task_to_keys = { "cola": ("sentence", None), "mnli": ("premise", "hypothesis"), "mrpc": ("sentence1", "sentence2"), "qnli": ("question", "sentence"), "qqp": ("question1", "question2"), "rte": ("sentence1", "sentence2"), "sst2": ("sentence", None), "stsb": ("sentence1", "sentence2"), "wnli": ("sentence1", "sentence2"), } logger = logging.getLogger(__name__) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ task_name: Optional[str] = field( default=None, metadata={"help": "The name of the task to train on: " + ", ".join(task_to_keys.keys())}, ) max_seq_length: int = field( default=128, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) pad_to_max_length: bool = field( default=True, metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_val_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of validation examples to this " "value if set." ) }, ) max_test_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of test examples to this " "value if set." ) }, ) train_file: Optional[str] = field( default=None, metadata={"help": "A csv or a json file containing the training data."} ) validation_file: Optional[str] = field( default=None, metadata={"help": "A csv or a json file containing the validation data."} ) test_file: Optional[str] = field(default=None, metadata={"help": "A csv or a json file containing the test data."}) def __post_init__(self): if self.task_name is not None: self.task_name = self.task_name.lower() if self.task_name not in task_to_keys.keys(): raise ValueError("Unknown task, you should pick one in " + ",".join(task_to_keys.keys())) elif self.train_file is None or self.validation_file is None: raise ValueError("Need either a GLUE task or a training/validation file.") else: train_extension = self.train_file.split(".")[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." validation_extension = self.validation_file.split(".")[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger.setLevel(logging.INFO if training_args.should_log else logging.WARN) # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): if training_args.should_log: transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info(f"Training/evaluation parameters {training_args}") # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the # sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named # label if at least two columns are provided. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.task_name is not None: # Downloading and loading a dataset from the hub. datasets = load_dataset("glue", data_args.task_name) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. data_files = {"train": data_args.train_file, "validation": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: train_extension = data_args.train_file.split(".")[-1] test_extension = data_args.test_file.split(".")[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." data_files["test"] = data_args.test_file else: raise ValueError("Need either a GLUE task or a test file for `do_predict`.") for key in data_files.keys(): logger.info(f"load a local file for {key}: {data_files[key]}") if data_args.train_file.endswith(".csv"): # Loading a dataset from local csv files datasets = load_dataset("csv", data_files=data_files) else: # Loading a dataset from local json files datasets = load_dataset("json", data_files=data_files) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets. # Labels if data_args.task_name is not None: is_regression = data_args.task_name == "stsb" if not is_regression: label_list = datasets["train"].features["label"].names num_labels = len(label_list) else: num_labels = 1 else: # Trying to have good defaults here, don't hesitate to tweak to your needs. is_regression = datasets["train"].features["label"].dtype in ["float32", "float64"] if is_regression: num_labels = 1 else: # A useful fast method: # https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.unique label_list = datasets["train"].unique("label") label_list.sort() # Let's sort it for determinism num_labels = len(label_list) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) model = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) # Preprocessing the datasets if data_args.task_name is not None: sentence1_key, sentence2_key = task_to_keys[data_args.task_name] else: # Again, we try to have some nice defaults but don't hesitate to tweak to your use case. non_label_column_names = [name for name in datasets["train"].column_names if name != "label"] if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names: sentence1_key, sentence2_key = "sentence1", "sentence2" else: if len(non_label_column_names) >= 2: sentence1_key, sentence2_key = non_label_column_names[:2] else: sentence1_key, sentence2_key = non_label_column_names[0], None # Padding strategy if data_args.pad_to_max_length: padding = "max_length" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch padding = False # Some models have set the order of the labels to use, so let's make sure we do use it. label_to_id = None if ( model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id and data_args.task_name is not None and not is_regression ): # Some have all caps in their config, some don't. label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()} if sorted(label_name_to_id.keys()) == sorted(label_list): label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)} else: logger.warning( "Your model seems to have been trained with labels, but they don't match the dataset: ", f"model labels: {sorted(label_name_to_id.keys())}, dataset labels: {sorted(label_list)}." "\nIgnoring the model labels as a result.", ) elif data_args.task_name is None and not is_regression: label_to_id = {v: i for i, v in enumerate(label_list)} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the " f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) def preprocess_function(examples): # Tokenize the texts args = ( (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) ) result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True) # Map labels to IDs (not necessary for GLUE tasks) if label_to_id is not None and "label" in examples: result["label"] = [(label_to_id[l] if l != -1 else -1) for l in examples["label"]] return result datasets = datasets.map(preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache) if training_args.do_train: if "train" not in datasets: raise ValueError("--do_train requires a train dataset") train_dataset = datasets["train"] if data_args.max_train_samples is not None: train_dataset = train_dataset.select(range(data_args.max_train_samples)) if training_args.do_eval: if "validation" not in datasets and "validation_matched" not in datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = datasets["validation_matched" if data_args.task_name == "mnli" else "validation"] if data_args.max_val_samples is not None: eval_dataset = eval_dataset.select(range(data_args.max_val_samples)) if training_args.do_predict or data_args.task_name is not None or data_args.test_file is not None: if "test" not in datasets and "test_matched" not in datasets: raise ValueError("--do_predict requires a test dataset") test_dataset = datasets["test_matched" if data_args.task_name == "mnli" else "test"] if data_args.max_test_samples is not None: test_dataset = test_dataset.select(range(data_args.max_test_samples)) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") # Get the metric function if data_args.task_name is not None: metric = load_metric("glue", data_args.task_name) # TODO: When datasets metrics include regular accuracy, make an else here and remove special branch from # compute_metrics # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(p: EvalPrediction): preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1) if data_args.task_name is not None: result = metric.compute(predictions=preds, references=p.label_ids) if len(result) > 1: result["combined_score"] = np.mean(list(result.values())).item() return result elif is_regression: return {"mse": ((preds - p.label_ids) ** 2).mean().item()} else: return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: data_collator = default_data_collator elif training_args.fp16: data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) else: data_collator = None # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=compute_metrics, tokenizer=tokenizer, data_collator=data_collator, ) # Training if training_args.do_train: checkpoint = None if last_checkpoint is not None: checkpoint = last_checkpoint elif os.path.isdir(model_args.model_name_or_path): # Check the config from that potential checkpoint has the right number of labels before using it as a # checkpoint. if AutoConfig.from_pretrained(model_args.model_name_or_path).num_labels == num_labels: checkpoint = model_args.model_name_or_path train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") # Loop to handle MNLI double evaluation (matched, mis-matched) tasks = [data_args.task_name] eval_datasets = [eval_dataset] if data_args.task_name == "mnli": tasks.append("mnli-mm") eval_datasets.append(datasets["validation_mismatched"]) for eval_dataset, task in zip(eval_datasets, tasks): metrics = trainer.evaluate(eval_dataset=eval_dataset) max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_val_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) if training_args.do_predict: logger.info("*** Test ***") # Loop to handle MNLI double evaluation (matched, mis-matched) tasks = [data_args.task_name] test_datasets = [test_dataset] if data_args.task_name == "mnli": tasks.append("mnli-mm") test_datasets.append(datasets["test_mismatched"]) for test_dataset, task in zip(test_datasets, tasks): # Removing the `label` columns because it contains -1 and Trainer won't like that. test_dataset = test_dataset.remove_columns("label") predictions = trainer.predict(test_dataset=test_dataset).predictions predictions = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1) output_test_file = os.path.join(training_args.output_dir, f"test_results_{task}.txt") if trainer.is_world_process_zero(): with open(output_test_file, "w") as writer: logger.info(f"***** Test results {task} *****") writer.write("index\tprediction\n") for index, item in enumerate(predictions): if is_regression: writer.write(f"{index}\t{item:3.3f}\n") else: item = label_list[item] writer.write(f"{index}\t{item}\n") def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
0
hf_public_repos/transformers/tests/sagemaker/scripts
hf_public_repos/transformers/tests/sagemaker/scripts/pytorch/requirements.txt
git+https://github.com/huggingface/transformers.git@main # install main or adjust it with vX.X.X for installing version specific transforms datasets==1.8.0
0
hf_public_repos/transformers/tests/sagemaker/scripts
hf_public_repos/transformers/tests/sagemaker/scripts/pytorch/run_ddp.py
import json import logging import os import subprocess from argparse import ArgumentParser logger = logging.getLogger(__name__) def parse_args(): parser = ArgumentParser() parsed, unknown = parser.parse_known_args() for arg in unknown: if arg.startswith(("-", "--")): parser.add_argument(arg.split("=")[0]) return parser.parse_args() def main(): args = parse_args() port = 8888 num_gpus = int(os.environ["SM_NUM_GPUS"]) hosts = json.loads(os.environ["SM_HOSTS"]) num_nodes = len(hosts) current_host = os.environ["SM_CURRENT_HOST"] rank = hosts.index(current_host) os.environ["NCCL_DEBUG"] = "INFO" if num_nodes > 1: cmd = f"""python -m torch.distributed.launch \ --nnodes={num_nodes} \ --node_rank={rank} \ --nproc_per_node={num_gpus} \ --master_addr={hosts[0]} \ --master_port={port} \ ./run_glue.py \ {"".join([f" --{parameter} {value}" for parameter,value in args.__dict__.items()])}""" else: cmd = f"""python -m torch.distributed.launch \ --nproc_per_node={num_gpus} \ ./run_glue.py \ {"".join([f" --{parameter} {value}" for parameter,value in args.__dict__.items()])}""" try: subprocess.run(cmd, shell=True) except Exception as e: logger.info(e) if __name__ == "__main__": main()
0
hf_public_repos/transformers/tests/sagemaker/scripts
hf_public_repos/transformers/tests/sagemaker/scripts/tensorflow/requirements.txt
git+https://github.com/huggingface/transformers.git@main # install main or adjust ist with vX.X.X for installing version specific transforms
0
hf_public_repos/transformers/tests/sagemaker/scripts
hf_public_repos/transformers/tests/sagemaker/scripts/tensorflow/run_tf.py
import argparse import logging import sys import time import tensorflow as tf from datasets import load_dataset from transformers import AutoTokenizer, TFAutoModelForSequenceClassification if __name__ == "__main__": parser = argparse.ArgumentParser() # Hyperparameters sent by the client are passed as command-line arguments to the script. parser.add_argument("--epochs", type=int, default=1) parser.add_argument("--per_device_train_batch_size", type=int, default=16) parser.add_argument("--per_device_eval_batch_size", type=int, default=8) parser.add_argument("--model_name_or_path", type=str) parser.add_argument("--learning_rate", type=str, default=5e-5) parser.add_argument("--do_train", type=bool, default=True) parser.add_argument("--do_eval", type=bool, default=True) parser.add_argument("--output_dir", type=str) args, _ = parser.parse_known_args() # overwrite batch size until we have tf_glue.py args.per_device_train_batch_size = 16 args.per_device_eval_batch_size = 16 # Set up logging logger = logging.getLogger(__name__) logging.basicConfig( level=logging.getLevelName("INFO"), handlers=[logging.StreamHandler(sys.stdout)], format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", ) # Load model and tokenizer model = TFAutoModelForSequenceClassification.from_pretrained(args.model_name_or_path) tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path) # Load dataset train_dataset, test_dataset = load_dataset("imdb", split=["train", "test"]) train_dataset = train_dataset.shuffle().select(range(5000)) # smaller the size for train dataset to 5k test_dataset = test_dataset.shuffle().select(range(500)) # smaller the size for test dataset to 500 # Preprocess train dataset train_dataset = train_dataset.map( lambda e: tokenizer(e["text"], truncation=True, padding="max_length"), batched=True ) train_dataset.set_format(type="tensorflow", columns=["input_ids", "attention_mask", "label"]) train_features = { x: train_dataset[x].to_tensor(default_value=0, shape=[None, tokenizer.model_max_length]) for x in ["input_ids", "attention_mask"] } tf_train_dataset = tf.data.Dataset.from_tensor_slices((train_features, train_dataset["label"])).batch( args.per_device_train_batch_size ) # Preprocess test dataset test_dataset = test_dataset.map( lambda e: tokenizer(e["text"], truncation=True, padding="max_length"), batched=True ) test_dataset.set_format(type="tensorflow", columns=["input_ids", "attention_mask", "label"]) test_features = { x: test_dataset[x].to_tensor(default_value=0, shape=[None, tokenizer.model_max_length]) for x in ["input_ids", "attention_mask"] } tf_test_dataset = tf.data.Dataset.from_tensor_slices((test_features, test_dataset["label"])).batch( args.per_device_eval_batch_size ) # fine optimizer and loss optimizer = tf.keras.optimizers.Adam(learning_rate=args.learning_rate) loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) metrics = [tf.keras.metrics.SparseCategoricalAccuracy()] model.compile(optimizer=optimizer, loss=loss, metrics=metrics) start_train_time = time.time() train_results = model.fit(tf_train_dataset, epochs=args.epochs, batch_size=args.per_device_train_batch_size) end_train_time = time.time() - start_train_time logger.info("*** Train ***") logger.info(f"train_runtime = {end_train_time}") for key, value in train_results.history.items(): logger.info(f" {key} = {value}")
0
hf_public_repos/transformers/tests/sagemaker/scripts
hf_public_repos/transformers/tests/sagemaker/scripts/tensorflow/run_tf_dist.py
import argparse import logging import os import sys import time import tensorflow as tf from datasets import load_dataset from tqdm import tqdm from transformers import AutoTokenizer, TFAutoModelForSequenceClassification from transformers.utils import is_sagemaker_dp_enabled if os.environ.get("SDP_ENABLED") or is_sagemaker_dp_enabled(): SDP_ENABLED = True os.environ["SAGEMAKER_INSTANCE_TYPE"] = "p3dn.24xlarge" import smdistributed.dataparallel.tensorflow as sdp else: SDP_ENABLED = False def fit(model, loss, opt, train_dataset, epochs, train_batch_size, max_steps=None): pbar = tqdm(train_dataset) for i, batch in enumerate(pbar): with tf.GradientTape() as tape: inputs, targets = batch outputs = model(batch) loss_value = loss(targets, outputs.logits) if SDP_ENABLED: tape = sdp.DistributedGradientTape(tape, sparse_as_dense=True) grads = tape.gradient(loss_value, model.trainable_variables) opt.apply_gradients(zip(grads, model.trainable_variables)) pbar.set_description(f"Loss: {loss_value:.4f}") if SDP_ENABLED and i == 0: sdp.broadcast_variables(model.variables, root_rank=0) sdp.broadcast_variables(opt.variables(), root_rank=0) if max_steps and i >= max_steps: break train_results = {"loss": loss_value.numpy()} return train_results def get_datasets(tokenizer, train_batch_size, eval_batch_size): # Load dataset train_dataset, test_dataset = load_dataset("imdb", split=["train", "test"]) # Preprocess train dataset train_dataset = train_dataset.map( lambda e: tokenizer(e["text"], truncation=True, padding="max_length"), batched=True ) train_dataset.set_format(type="tensorflow", columns=["input_ids", "attention_mask", "label"]) train_features = { x: train_dataset[x].to_tensor(default_value=0, shape=[None, tokenizer.model_max_length]) for x in ["input_ids", "attention_mask"] } tf_train_dataset = tf.data.Dataset.from_tensor_slices((train_features, train_dataset["label"])) # Preprocess test dataset test_dataset = test_dataset.map( lambda e: tokenizer(e["text"], truncation=True, padding="max_length"), batched=True ) test_dataset.set_format(type="tensorflow", columns=["input_ids", "attention_mask", "label"]) test_features = { x: test_dataset[x].to_tensor(default_value=0, shape=[None, tokenizer.model_max_length]) for x in ["input_ids", "attention_mask"] } tf_test_dataset = tf.data.Dataset.from_tensor_slices((test_features, test_dataset["label"])) if SDP_ENABLED: tf_train_dataset = tf_train_dataset.shard(sdp.size(), sdp.rank()) tf_test_dataset = tf_test_dataset.shard(sdp.size(), sdp.rank()) tf_train_dataset = tf_train_dataset.batch(train_batch_size, drop_remainder=True) tf_test_dataset = tf_test_dataset.batch(eval_batch_size, drop_remainder=True) return tf_train_dataset, tf_test_dataset if __name__ == "__main__": parser = argparse.ArgumentParser() # Hyperparameters sent by the client are passed as command-line arguments to the script. parser.add_argument("--epochs", type=int, default=3) parser.add_argument("--per_device_train_batch_size", type=int, default=16) parser.add_argument("--per_device_eval_batch_size", type=int, default=8) parser.add_argument("--model_name_or_path", type=str) parser.add_argument("--learning_rate", type=str, default=5e-5) parser.add_argument("--do_train", type=bool, default=True) parser.add_argument("--do_eval", type=bool, default=True) parser.add_argument("--output_dir", type=str) parser.add_argument("--max_steps", type=int, default=None) # Data, model, and output directories parser.add_argument("--output_data_dir", type=str, default=os.environ["SM_OUTPUT_DATA_DIR"]) parser.add_argument("--model_dir", type=str, default=os.environ["SM_MODEL_DIR"]) parser.add_argument("--n_gpus", type=str, default=os.environ["SM_NUM_GPUS"]) args, _ = parser.parse_known_args() # Set up logging logger = logging.getLogger(__name__) logging.basicConfig( level=logging.getLevelName("INFO"), handlers=[logging.StreamHandler(sys.stdout)], format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", ) if SDP_ENABLED: sdp.init() gpus = tf.config.experimental.list_physical_devices("GPU") for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) if gpus: tf.config.experimental.set_visible_devices(gpus[sdp.local_rank()], "GPU") # Load model and tokenizer model = TFAutoModelForSequenceClassification.from_pretrained(args.model_name_or_path) tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path) # get datasets tf_train_dataset, tf_test_dataset = get_datasets( tokenizer=tokenizer, train_batch_size=args.per_device_train_batch_size, eval_batch_size=args.per_device_eval_batch_size, ) # fine optimizer and loss optimizer = tf.keras.optimizers.Adam(learning_rate=args.learning_rate) loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) metrics = [tf.keras.metrics.SparseCategoricalAccuracy()] model.compile(optimizer=optimizer, loss=loss, metrics=metrics) # Training if args.do_train: # train_results = model.fit(tf_train_dataset, epochs=args.epochs, batch_size=args.train_batch_size) start_train_time = time.time() train_results = fit( model, loss, optimizer, tf_train_dataset, args.epochs, args.per_device_train_batch_size, max_steps=args.max_steps, ) end_train_time = time.time() - start_train_time logger.info("*** Train ***") logger.info(f"train_runtime = {end_train_time}") output_eval_file = os.path.join(args.output_dir, "train_results.txt") if not SDP_ENABLED or sdp.rank() == 0: with open(output_eval_file, "w") as writer: logger.info("***** Train results *****") logger.info(train_results) for key, value in train_results.items(): logger.info(f" {key} = {value}") writer.write(f"{key} = {value}\n") # Evaluation if args.do_eval and (not SDP_ENABLED or sdp.rank() == 0): result = model.evaluate(tf_test_dataset, batch_size=args.per_device_eval_batch_size, return_dict=True) logger.info("*** Evaluate ***") output_eval_file = os.path.join(args.output_dir, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") logger.info(result) for key, value in result.items(): logger.info(f" {key} = {value}") writer.write(f"{key} = {value}\n") # Save result if SDP_ENABLED: if sdp.rank() == 0: model.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) else: model.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_logits_process.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from typing import List, Union from parameterized import parameterized from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from torch import nn from transformers.generation import ( EncoderNoRepeatNGramLogitsProcessor, EncoderRepetitionPenaltyLogitsProcessor, EpsilonLogitsWarper, EtaLogitsWarper, ExponentialDecayLengthPenalty, ForcedBOSTokenLogitsProcessor, ForcedEOSTokenLogitsProcessor, HammingDiversityLogitsProcessor, InfNanRemoveLogitsProcessor, LogitNormalization, LogitsProcessorList, MinLengthLogitsProcessor, MinNewTokensLengthLogitsProcessor, NoBadWordsLogitsProcessor, NoRepeatNGramLogitsProcessor, PrefixConstrainedLogitsProcessor, RepetitionPenaltyLogitsProcessor, SequenceBiasLogitsProcessor, TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper, TypicalLogitsWarper, UnbatchedClassifierFreeGuidanceLogitsProcessor, ) from transformers.generation.logits_process import BarkEosPrioritizerLogitsProcessor @require_torch class LogitsProcessorTest(unittest.TestCase): def _get_uniform_logits(self, batch_size: int, length: int): scores = torch.ones((batch_size, length), device=torch_device, dtype=torch.float) / length return scores def test_min_length_dist_processor(self): vocab_size = 20 batch_size = 4 eos_token_id = 0 min_dist_processor = MinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id) # check that min length is applied at length 5 input_ids = ids_tensor((batch_size, 5), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = min_dist_processor(input_ids, scores) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist(), 4 * [-float("inf")]) # check that min length is not applied anymore at length 15 input_ids = ids_tensor((batch_size, 15), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = min_dist_processor(input_ids, scores) self.assertFalse(torch.isinf(scores_before_min_length).any()) @parameterized.expand([(0,), ([0, 18],)]) def test_new_min_length_dist_processor(self, eos_token_id: Union[int, List[int]]): vocab_size = 20 batch_size = 4 # check that first input is skipped (min new length applying) input_ids = ids_tensor((batch_size, 5), vocab_size=20) new_min_dist_processor = MinNewTokensLengthLogitsProcessor( prompt_length_to_skip=input_ids.shape[-1], min_new_tokens=3, eos_token_id=eos_token_id ) expected_eos_scores_before_min_length = batch_size * [-float("inf")] if isinstance(eos_token_id, list): expected_eos_scores_before_min_length *= len(eos_token_id) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = new_min_dist_processor(input_ids, scores) self.assertListEqual( scores_before_min_length[:, eos_token_id].flatten().tolist(), expected_eos_scores_before_min_length ) # check that, for skipping, now prompt length is 5, after that we expect first 5 tokens will be skipped self.assertTrue(new_min_dist_processor.prompt_length_to_skip == 5) # check that min length is applied at length 2 input_ids = ids_tensor((batch_size, 2), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = new_min_dist_processor(input_ids, scores) self.assertListEqual( scores_before_min_length[:, eos_token_id].flatten().tolist(), expected_eos_scores_before_min_length ) # check that min new length is applied at length 6 (because it has only 1 new token) input_ids = ids_tensor((batch_size, 6), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = new_min_dist_processor(input_ids, scores) self.assertListEqual( scores_before_min_length[:, eos_token_id].flatten().tolist(), expected_eos_scores_before_min_length ) # check that min new length is applied at length 7 (because it has only 2 new tokens) input_ids = ids_tensor((batch_size, 7), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = new_min_dist_processor(input_ids, scores) self.assertListEqual( scores_before_min_length[:, eos_token_id].flatten().tolist(), expected_eos_scores_before_min_length ) # check that min new length is not applied anymore at length 8 input_ids = ids_tensor((batch_size, 8), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = new_min_dist_processor(input_ids, scores) self.assertFalse(torch.isinf(scores_before_min_length).any()) # check that min new length is not applied anymore at length 15 input_ids = ids_tensor((batch_size, 15), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = new_min_dist_processor(input_ids, scores) self.assertFalse(torch.isinf(scores_before_min_length).any()) def test_temperature_dist_warper(self): input_ids = None length = 20 scores = self._get_uniform_logits(batch_size=2, length=length) # tweak scores to not be uniform anymore scores[1, 5] = (1 / length) + 0.1 # peak, 1st batch scores[1, 10] = (1 / length) - 0.4 # valley, 1st batch # compute softmax probs = nn.functional.softmax(scores, dim=-1) temp_dist_warper_sharper = TemperatureLogitsWarper(temperature=0.5) temp_dist_warper_smoother = TemperatureLogitsWarper(temperature=1.3) warped_prob_sharp = nn.functional.softmax(temp_dist_warper_sharper(input_ids, scores.clone()), dim=-1) warped_prob_smooth = nn.functional.softmax(temp_dist_warper_smoother(input_ids, scores.clone()), dim=-1) # uniform distribution stays uniform self.assertTrue(torch.allclose(probs[0, :], warped_prob_sharp[0, :], atol=1e-3)) self.assertTrue(torch.allclose(probs[0, :], warped_prob_smooth[0, :], atol=1e-3)) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max(), warped_prob_sharp[1, :].max()) self.assertGreater(probs[1, :].min(), warped_prob_sharp[1, :].min()) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max(), warped_prob_smooth[1, :].max()) self.assertLess(probs[1, :].min(), warped_prob_smooth[1, :].min()) def test_repetition_penalty_dist_process(self): input_ids = torch.tensor([[0, 1], [5, 0]], device=torch_device, dtype=torch.long) vocab_size = 10 scores = self._get_uniform_logits(batch_size=2, length=vocab_size) # give values special values scores[0, 0] = -(1 / vocab_size) scores[1, 5] = 4 / vocab_size rep_penalty_proc = RepetitionPenaltyLogitsProcessor(penalty=2.0) scores = rep_penalty_proc(input_ids, scores.clone()) # check that values were correctly changed self.assertAlmostEqual(scores[0, 0].item(), -(1 / vocab_size) * 2) self.assertAlmostEqual(scores[0, 1].item(), (1 / vocab_size) / 2) self.assertAlmostEqual(scores[1, 0].item(), (1 / vocab_size) / 2) self.assertAlmostEqual(scores[1, 5].item(), (4 / vocab_size) / 2) def test_encoder_repetition_penalty_dist_process(self): input_ids = torch.tensor([[0, 1], [5, 0]], device=torch_device, dtype=torch.long) vocab_size = 10 scores = self._get_uniform_logits(batch_size=2, length=vocab_size) # give values special values scores[0, 0] = -(1 / vocab_size) scores[1, 5] = 4 / vocab_size rep_penalty_proc = EncoderRepetitionPenaltyLogitsProcessor(penalty=2.0, encoder_input_ids=input_ids) scores = rep_penalty_proc(input_ids, scores.clone()) # check that values were correctly changed self.assertAlmostEqual(scores[0, 0].item(), -(1 / vocab_size) / 2) self.assertAlmostEqual(scores[0, 1].item(), (1 / vocab_size) * 2) self.assertAlmostEqual(scores[1, 0].item(), (1 / vocab_size) * 2) self.assertAlmostEqual(scores[1, 5].item(), (4 / vocab_size) * 2) # check that values not in the encoder ids were NOT changed self.assertAlmostEqual(scores[0, 2].item(), (1 / vocab_size)) self.assertAlmostEqual(scores[1, 2].item(), (1 / vocab_size)) def test_top_k_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 # create ramp distribution ramp_logits = ( torch.arange(vocab_size, device=torch_device, dtype=torch.float).unsqueeze(0).repeat(batch_size, 1) ) ramp_logits[1:, : vocab_size // 2] = ramp_logits[1:, : vocab_size // 2] + vocab_size top_k_warp = TopKLogitsWarper(3) scores = top_k_warp(input_ids, ramp_logits) # check that correct tokens are filtered self.assertListEqual(torch.isinf(scores[0]).tolist(), 7 * [True] + 3 * [False]) self.assertListEqual(torch.isinf(scores[1]).tolist(), 2 * [True] + 3 * [False] + 5 * [True]) # check special cases length = 5 logits = self._get_uniform_logits(batch_size=batch_size, length=length) top_k_warp_safety_check = TopKLogitsWarper(top_k=1, filter_value=0.0, min_tokens_to_keep=3) scores = top_k_warp_safety_check(input_ids, logits) # uniform dist is not changed self.assertListEqual((scores == 0.0).to(torch.long).sum(dim=-1).tolist(), [0, 0]) ramp_logits = torch.arange(length, device=torch_device, dtype=torch.float).unsqueeze(0).repeat(batch_size, 1) scores = top_k_warp_safety_check(input_ids, ramp_logits) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).to(torch.long).sum(dim=-1).tolist(), [2, 2]) def test_top_p_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) dist = torch.log( torch.tensor([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]], device=torch_device, dtype=torch.float) ) top_p_warp = TopPLogitsWarper(0.8) filtered_dist = torch.exp(top_p_warp(input_ids, dist)) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 EXPECTED_FILTERED_DIST = torch.tensor( [[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]], device=torch_device, dtype=torch.float ) self.assertTrue(torch.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3)) # check edge cases with negative and extreme logits ramp_logits = torch.arange(vocab_size, device=torch_device, dtype=torch.float).unsqueeze(0).repeat( batch_size, 1 ) - (vocab_size // 2) # make ramp_logits more extreme ramp_logits[1] = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept top_p_warp = TopPLogitsWarper(0.9, min_tokens_to_keep=2, filter_value=0.0) filtered_dist = top_p_warp(input_ids, ramp_logits) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).to(torch.long).sum(dim=-1).tolist(), [3, 2]) def test_typical_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) dist = torch.log( torch.tensor([[0.97, 0.01, 0.01, 0.01], [0.4, 0.2, 0.2, 0.2]], device=torch_device, dtype=torch.float) ) typical_warp = TypicalLogitsWarper(0.5) filtered_dist = torch.exp(typical_warp(input_ids, dist)) # dist should be filtered to keep min num values so that sum is >= 0.7 # exp (-inf) => 0 EXPECTED_FILTERED_DIST = torch.tensor( [[0.97, 0.0, 0.0, 0.0], [0.0, 0.2, 0.2, 0.2]], device=torch_device, dtype=torch.float ) self.assertTrue(torch.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3)) # check special cases length = 5 logits = self._get_uniform_logits(batch_size=batch_size, length=length) typical_warp_safety_check = TypicalLogitsWarper(mass=0.5, filter_value=0.0, min_tokens_to_keep=3) scores = typical_warp_safety_check(input_ids, logits) # uniform dist is not changed self.assertListEqual((scores == 0.0).to(torch.long).sum(dim=-1).tolist(), [0, 0]) # check edge cases with negative and extreme logits ramp_logits = torch.arange(vocab_size, device=torch_device, dtype=torch.float).unsqueeze(0).repeat( batch_size, 1 ) - (vocab_size // 2) # make ramp_logits more extreme ramp_logits[1] = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept typical_warp = TypicalLogitsWarper(0.7, min_tokens_to_keep=2, filter_value=0.0) filtered_dist = typical_warp(input_ids, ramp_logits) # first batch should keep two tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).to(torch.long).sum(dim=-1).tolist(), [2, 2]) def test_epsilon_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) dist = torch.log( torch.tensor( [[0.87, 0.099, 0.001, 0.03], [0.4, 0.299, 0.101, 0.2]], device=torch_device, dtype=torch.float ) ) epsilon_warp = EpsilonLogitsWarper(0.1) filtered_dist = torch.exp(epsilon_warp(input_ids, dist)) # dist should be filtered to only keep values with proba >= 0.1 # exp (-inf) => 0 EXPECTED_FILTERED_DIST = torch.tensor( [[0.87, 0, 0, 0], [0.4, 0.299, 0.101, 0.2]], device=torch_device, dtype=torch.float ) self.assertTrue(torch.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3)) # check edge cases with negative and extreme logits ramp_logits = torch.arange(vocab_size, device=torch_device, dtype=torch.float).unsqueeze(0).repeat( batch_size, 1 ) - (vocab_size // 2) # make ramp_logits more extreme ramp_logits[1] = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept epsilon_warp = EpsilonLogitsWarper(5e-2, min_tokens_to_keep=2, filter_value=0.0) filtered_dist = epsilon_warp(input_ids, ramp_logits) # first batch should keep 3 tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).to(torch.long).sum(dim=-1).tolist(), [3, 2]) def test_eta_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) dist = torch.log( torch.tensor([[0.0, 0.1, 0.8, 0.1], [0.01, 0.04, 0.9, 0.05]], device=torch_device, dtype=torch.float) ) eta_warp = EtaLogitsWarper(0.0625) filtered_dist = torch.exp(eta_warp(input_ids, dist)) # dist should be filtered to only keep values with proba >= min(0.0625, sqrt(0.0625) * e^-H(p)) # min(0.0625, 0.1320) is the cutoff for the first row and min(0.0625, 0.1644) is for the second # where H is the entropy function and p is the probability vector. # exp (-inf) => 0 EXPECTED_FILTERED_DIST = torch.tensor( [[0.0, 0.1, 0.8, 0.1], [0.0, 0.0, 0.9, 0.0]], device=torch_device, dtype=torch.float ) self.assertTrue(torch.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3)) # check edge cases with negative and extreme logits ramp_logits = torch.arange(vocab_size, device=torch_device, dtype=torch.float).unsqueeze(0).repeat( batch_size, 1 ) - (vocab_size // 2) # make ramp_logits more extreme ramp_logits[1] = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept eta_warp = EtaLogitsWarper(0.1, min_tokens_to_keep=2, filter_value=0.0) filtered_dist = eta_warp(input_ids, ramp_logits) # first batch should keep 2 tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).to(torch.long).sum(dim=-1).tolist(), [2, 2]) def test_no_repeat_ngram_dist_processor(self): vocab_size = 3 batch_size = 2 input_ids = torch.tensor([[1, 1, 2, 1], [0, 1, 0, 1]], device=torch_device, dtype=torch.long) scores = self._get_uniform_logits(batch_size, vocab_size) no_repeat_proc_2_gram = NoRepeatNGramLogitsProcessor(2) no_repeat_proc_3_gram = NoRepeatNGramLogitsProcessor(3) filtered_scores_2_gram = no_repeat_proc_2_gram(input_ids, scores.clone()) filtered_scores_3_gram = no_repeat_proc_3_gram(input_ids, scores.clone()) # 2-gram would forbid 2nd and 3rd token (1,2) at 1st batch and 1st token (0) at 2nd batch self.assertListEqual(torch.isinf(filtered_scores_2_gram).tolist(), [[False, True, True], [True, False, False]]) # 3-gram would forbid no token at 1st batch and 1st token (0) at 2nd batch self.assertListEqual( torch.isinf(filtered_scores_3_gram).tolist(), [[False, False, False], [True, False, False]] ) def test_encoder_no_repeat_ngram_dist_processor(self): vocab_size = 3 num_beams = 2 batch_size = 1 encoder_input_ids = torch.tensor([1, 2, 1, 1], device=torch_device, dtype=torch.long) input_ids = torch.tensor([[1, 2, 1], [8, 0, 2]], device=torch_device, dtype=torch.long) scores = self._get_uniform_logits(batch_size * num_beams, vocab_size) no_repeat_proc_2_gram = EncoderNoRepeatNGramLogitsProcessor(2, encoder_input_ids=encoder_input_ids) no_repeat_proc_3_gram = EncoderNoRepeatNGramLogitsProcessor(3, encoder_input_ids=encoder_input_ids) filtered_scores_2_gram = no_repeat_proc_2_gram(input_ids, scores.clone()) filtered_scores_3_gram = no_repeat_proc_3_gram(input_ids, scores.clone()) # 2-gram would forbid 1st and 2nd token at 1st beam and 1st token (0) at 2nd beam self.assertListEqual(torch.isinf(filtered_scores_2_gram).tolist(), [[False, True, True], [False, True, False]]) # 3-gram would forbid 1st token at 1st beam and no token at 2nd beam self.assertListEqual( torch.isinf(filtered_scores_3_gram).tolist(), [[False, True, False], [False, False, False]] ) # Batched input vocab_size = 3 num_beams = 2 batch_size = 2 encoder_input_ids = torch.tensor([[1, 2, 1, 1], [0, 0, 2, 1]], device=torch_device, dtype=torch.long) input_ids = torch.tensor([[1, 2, 1], [1, 0, 2], [0, 0, 0], [0, 2, 2]], device=torch_device, dtype=torch.long) scores = self._get_uniform_logits(batch_size * num_beams, vocab_size) no_repeat_proc_2_gram = EncoderNoRepeatNGramLogitsProcessor(2, encoder_input_ids=encoder_input_ids) no_repeat_proc_3_gram = EncoderNoRepeatNGramLogitsProcessor(3, encoder_input_ids=encoder_input_ids) filtered_scores_2_gram = no_repeat_proc_2_gram(input_ids, scores.clone()) filtered_scores_3_gram = no_repeat_proc_3_gram(input_ids, scores.clone()) # 2gram # Batch 1 # - Beam 1: tokens (1, 2) forbidden # - Beam 2: tokens (1) forbidden # Batch 2 # - Beam 1: tokens (0, 2) forbidden # - Beam 2: tokens (1) forbidden self.assertListEqual( torch.isinf(filtered_scores_2_gram).tolist(), [[False, True, True], [False, True, False], [True, False, True], [False, True, False]], ) # Batch 1 # - Beam 1: tokens (1) forbidden # - Beam 2: tokens () forbidden # Batch 2 # - Beam 1: tokens (2) forbidden # - Beam 2: tokens () forbidden self.assertListEqual( torch.isinf(filtered_scores_3_gram).tolist(), [[False, True, False], [False, False, False], [False, False, True], [False, False, False]], ) def test_no_bad_words_dist_processor(self): vocab_size = 5 batch_size = 2 eos_token_id = 4 input_ids = torch.tensor([[0, 1, 3, 1], [0, 1, 0, 1]], device=torch_device, dtype=torch.long) bad_word_tokens = [[1], [4], [1, 0], [0, 1, 2], [1, 3, 1, 3]] scores = self._get_uniform_logits(batch_size, vocab_size) no_bad_words_dist_proc = NoBadWordsLogitsProcessor(bad_words_ids=bad_word_tokens, eos_token_id=eos_token_id) filtered_scores = no_bad_words_dist_proc(input_ids, scores.clone()) # batch 1: 1st, 2nd, and 4th (0, 1, 3) token are forbidden # batch 2: 1st, 2nd, and 3rd (0, 1, 2) token are forbidden # Note that 5th element cannot be forbidden as it is EOS token self.assertListEqual( torch.isinf(filtered_scores).tolist(), [[True, True, False, True, False], [True, True, True, False, False]] ) # check edge case no_bad_words_dist_proc = NoBadWordsLogitsProcessor(bad_words_ids=[[4]], eos_token_id=eos_token_id) filtered_scores = no_bad_words_dist_proc(input_ids, scores.clone()) self.assertTrue(torch.allclose(scores, filtered_scores, atol=1e-3)) def test_bias_dist_processor(self): vocab_size = 5 batch_size = 2 input_ids = torch.tensor([[0, 1, 3, 1], [0, 1, 0, 1]], device=torch_device, dtype=torch.long) positive_bias = {(1,): 100.0, (4,): 100.0} negative_bias = {(1, 0): -100.0, (0, 1, 2): -100.0, (1, 3, 1, 3): -100.0} # biases the same termination twice, to ensure we can handle overlapping terminations (it won't have an effect # on the test cases, though) negative_bias.update({(1, 3, 1, 3, 1, 3): -100.0}) sequence_bias = {**positive_bias, **negative_bias} # scores = 0 to facilitate checks scores = torch.zeros((batch_size, vocab_size), dtype=torch.float, device=torch_device) bias_dist_proc = SequenceBiasLogitsProcessor(sequence_bias=sequence_bias) filtered_scores = bias_dist_proc(input_ids, scores.clone()) # batch 1: positive bias: tokens (1, 4); negative bias: tokens (0, 3); neutral: tokens (2) # batch 2: positive bias: tokens (1, 4); negative bias: tokens (0, 2); neutral: tokens (3) self.assertListEqual( filtered_scores.tolist(), [[-100.0, 100.0, 0.0, -100.0, 100.0], [-100.0, 100.0, -100.0, 0.0, 100.0]] ) def test_processor_list(self): batch_size = 4 sequence_length = 10 vocab_size = 15 eos_token_id = 0 # dummy input_ids and scores input_ids = ids_tensor((batch_size, sequence_length), vocab_size) input_ids_comp = input_ids.clone() scores = self._get_uniform_logits(batch_size, vocab_size) scores_comp = scores.clone() # instantiate all dist processors min_dist_proc = MinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id) temp_dist_warp = TemperatureLogitsWarper(temperature=0.5) rep_penalty_proc = RepetitionPenaltyLogitsProcessor(penalty=2.0) top_k_warp = TopKLogitsWarper(3) top_p_warp = TopPLogitsWarper(0.8) no_repeat_proc = NoRepeatNGramLogitsProcessor(2) no_bad_words_dist_proc = NoBadWordsLogitsProcessor(bad_words_ids=[[1]], eos_token_id=eos_token_id) # no processor list scores = min_dist_proc(input_ids, scores) scores = temp_dist_warp(input_ids, scores) scores = rep_penalty_proc(input_ids, scores) scores = top_k_warp(input_ids, scores) scores = top_p_warp(input_ids, scores) scores = no_repeat_proc(input_ids, scores) scores = no_bad_words_dist_proc(input_ids, scores) # with processor list processor = LogitsProcessorList( [ min_dist_proc, temp_dist_warp, rep_penalty_proc, top_k_warp, top_p_warp, no_repeat_proc, no_bad_words_dist_proc, ] ) scores_comp = processor(input_ids, scores_comp) # scores should be equal self.assertTrue(torch.allclose(scores, scores_comp, atol=1e-3)) # input_ids should never be changed self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist()) def test_prefix_constrained_logits_processor(self): vocab_size = 5 batch_size = 2 input_ids = torch.tensor([[0, 1, 3, 1], [0, 1, 0, 1]], device=torch_device, dtype=torch.long) scores = self._get_uniform_logits(batch_size, vocab_size) def prefix_allowed_tokens_fn(batch_id, inputs_ids): return [[0, 1], [2, 3]][batch_id] prefix_constrained_logits_proc = PrefixConstrainedLogitsProcessor(prefix_allowed_tokens_fn, 1) filtered_scores = prefix_constrained_logits_proc(input_ids, scores.clone()) # batch 1: 1st, 2nd (0, 1) token are allowed # batch 2: 3rd, 4th (2, 3) token are allowed self.assertListEqual( torch.isinf(filtered_scores).tolist(), [[False, False, True, True, True], [True, True, False, False, True]] ) def empty_prefix_allowed_tokens_fn(batch_id, inputs_ids): return [] prefix_constrained_logits_proc = PrefixConstrainedLogitsProcessor(empty_prefix_allowed_tokens_fn, 1) self.assertRaises(ValueError, prefix_constrained_logits_proc, input_ids, scores.clone()) def test_hamming_diversity(self): vocab_size = 4 num_beams = 2 num_beam_groups = 2 scores = self._get_uniform_logits(num_beams, vocab_size) # batch_idx = 0 -> index batch_idx * num_beam_groups -> idx = 0 * 2 = 0 -> penalises tokens 1 # batch_idx = 1 -> index batch_idx * num_beam_groups -> idx = 1 * 2 = 2 -> penalises tokens 1 current_tokens = torch.tensor([0, 3, 1, 2], device=torch_device, dtype=torch.long) diversity_logits_processor = HammingDiversityLogitsProcessor( diversity_penalty=1.0, num_beams=num_beams, num_beam_groups=num_beam_groups ) processed_scores = diversity_logits_processor(None, scores, current_tokens, 1) self.assertTrue( torch.allclose( processed_scores[0], torch.tensor([-0.7500, 0.2500, 0.2500, 0.2500], device=torch_device), atol=1e-3 ) ) self.assertTrue( torch.allclose( processed_scores[1], torch.tensor([0.2500, -0.7500, 0.2500, 0.2500], device=torch_device), atol=1e-3 ) ) def test_forced_bos_token_logits_processor(self): vocab_size = 20 batch_size = 4 bos_token_id = 0 logits_processor = ForcedBOSTokenLogitsProcessor(bos_token_id=bos_token_id) # check that all scores are -inf except the bos_token_id score input_ids = ids_tensor((batch_size, 1), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores) self.assertTrue(torch.isneginf(scores[:, bos_token_id + 1 :]).all()) self.assertListEqual(scores[:, bos_token_id].tolist(), 4 * [0]) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 input_ids = ids_tensor((batch_size, 4), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores) self.assertFalse(torch.isinf(scores).any()) def test_forced_eos_token_logits_processor(self): vocab_size = 20 batch_size = 4 eos_token_id = 0 max_length = 5 logits_processor = ForcedEOSTokenLogitsProcessor(max_length=max_length, eos_token_id=eos_token_id) # check that all scores are -inf except the eos_token_id when max_length-1 is reached input_ids = ids_tensor((batch_size, 4), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores) self.assertTrue(torch.isneginf(scores[:, eos_token_id + 1 :]).all()) self.assertListEqual(scores[:, eos_token_id].tolist(), 4 * [0]) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length-1 is not reached input_ids = ids_tensor((batch_size, 3), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores) self.assertFalse(torch.isinf(scores).any()) def test_remove_nan_inf_logits_processor(self): scores = torch.tensor( [[0.0, 0.7, 0.8, float("nan")], [0.1, float("inf"), 0.3, float("-inf")]], device=torch_device ) input_ids = ids_tensor((2, 4), vocab_size=20) logits_processor = InfNanRemoveLogitsProcessor() scores = logits_processor(input_ids, scores) self.assertTrue( torch.allclose( scores, torch.tensor( [[0.0, 0.7, 0.8, 0.0], [0.1, torch.finfo(scores.dtype).max, 0.3, torch.finfo(scores.dtype).min]], device=torch_device, ), atol=1e-6, ) ) def test_exponential_decay_length_penalty(self): vocab_size = 20 batch_size = 4 eos_token_id = 0 penalty_start = 5 penalty_factor = 1.1 input_ids = ids_tensor((batch_size, 2), vocab_size=vocab_size) input_ids_seq_length = input_ids.shape[-1] length_decay_processor = ExponentialDecayLengthPenalty( exponential_decay_length_penalty=(penalty_start, penalty_factor), eos_token_id=eos_token_id, input_ids_seq_length=input_ids_seq_length, ) # check that penalty is not applied before start scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_start = torch.clone(scores) # clone scores as precessor updates them inplace scores_before_start = length_decay_processor(input_ids, scores_before_start) self.assertListEqual(scores_before_start[:, eos_token_id].tolist(), scores[:, eos_token_id].tolist()) # check that penalty is applied after start input_ids = ids_tensor((batch_size, 20), vocab_size=vocab_size) scores = self._get_uniform_logits(batch_size, vocab_size) scores_after_start = torch.clone(scores) # clone scores as precessor updates them inplace scores_after_start = length_decay_processor(input_ids, scores_after_start) self.assertTrue(torch.gt(scores_after_start[:, eos_token_id], scores[:, eos_token_id]).all()) # check the penalty increases negative scores input_ids = ids_tensor((batch_size, 20), vocab_size=vocab_size) scores = torch.neg(self._get_uniform_logits(batch_size, vocab_size)) scores_after_start = torch.clone(scores) # clone scores as precessor updates them inplace scores_after_start = length_decay_processor(input_ids, scores_after_start) self.assertTrue(torch.gt(scores_after_start[:, eos_token_id], scores[:, eos_token_id]).all()) def test_normalization(self): input_ids = None scores = torch.tensor( [[-23.18, -29.96, -43.54, 47.77], [-33.58, -26.87, -32.96, 22.51]], device=torch_device, dtype=torch.float ) logit_normalization = LogitNormalization() normalized_scores = logit_normalization(input_ids, scores).exp() ones = torch.ones(scores.shape[0], device=torch_device, dtype=torch.float) self.assertTrue(normalized_scores.sum(dim=-1).allclose(ones)) self.assertTrue(normalized_scores.allclose(scores.softmax(dim=-1))) def test_classifier_free_guidance(self): class Namespace(dict): pass logits_uncond = torch.tensor([[[1.0, 0, 1.5]]]) logits_cond = torch.tensor([[[1.0, 1.0, 1.0]]]) def dummy_model(input_ids, attention_mask, use_cache=True, past_key_values=None): out = Namespace() out.logits = logits_uncond out.past_key_values = None return out def lsm(x): return torch.nn.functional.log_softmax(x, dim=-1) # explicit unconditional prompt + attention mask input_ids = torch.LongTensor([[0]]) cfg = UnbatchedClassifierFreeGuidanceLogitsProcessor( 1.5, dummy_model, input_ids, torch.ones_like(input_ids, dtype=torch.long) ) out = cfg(input_ids, logits_cond)[0, -1] res = (lsm(logits_uncond) + 1.5 * (lsm(logits_cond) - lsm(logits_uncond)))[0, -1] self.assertAlmostEqual(out[0].item(), res[0].item()) self.assertAlmostEqual(out[1].item(), res[1].item()) self.assertAlmostEqual(out[2].item(), res[2].item()) # explicit unconditional prompt input_ids = torch.LongTensor([[0]]) cfg = UnbatchedClassifierFreeGuidanceLogitsProcessor(1.5, dummy_model, input_ids) out = cfg(input_ids, logits_cond)[0, -1] res = (lsm(logits_uncond) + 1.5 * (lsm(logits_cond) - lsm(logits_uncond)))[0, -1] self.assertAlmostEqual(out[0].item(), res[0].item()) self.assertAlmostEqual(out[1].item(), res[1].item()) self.assertAlmostEqual(out[2].item(), res[2].item()) # all implicit input_ids = torch.LongTensor([[0]]) cfg = UnbatchedClassifierFreeGuidanceLogitsProcessor(1.5, dummy_model) out = cfg(input_ids, logits_cond)[0, -1] res = (lsm(logits_uncond) + 1.5 * (lsm(logits_cond) - lsm(logits_uncond)))[0, -1] self.assertAlmostEqual(out[0].item(), res[0].item()) self.assertAlmostEqual(out[1].item(), res[1].item()) self.assertAlmostEqual(out[2].item(), res[2].item()) def test_early_stop_processor(self): input_ids = None eos_token_id = 2 min_eos_p = 0.1 ## some small float scores = self._get_uniform_logits(2, 4) scores[0][eos_token_id] = -6 ## less than log(min_eos_p) esp = BarkEosPrioritizerLogitsProcessor(eos_token_id=eos_token_id, min_eos_p=min_eos_p) actual_scores = esp(input_ids, scores) expected_scores_list = [ scores[0].tolist(), [float("-inf"), float("-inf"), scores[0][0], float("-inf")], ] self.assertListEqual(actual_scores.tolist(), expected_scores_list) def test_early_stop_processor_multi_eos(self): input_ids = None eos_token_id = [2, 3] min_eos_p = 0.1 ## some small float scores = self._get_uniform_logits(2, 4) scores[0][eos_token_id] = -6 ## less than log(min_eos_p) esp = BarkEosPrioritizerLogitsProcessor(eos_token_id=eos_token_id, min_eos_p=min_eos_p) actual_scores = esp(input_ids, scores) expected_scores_list = [ scores[0].tolist(), [float("-inf"), float("-inf"), scores[0][0], scores[0][0]], ] self.assertListEqual(actual_scores.tolist(), expected_scores_list)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_flax_utils.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "0.12" # assumed parallelism: 8 if is_torch_available(): import torch def ids_tensor(shape, vocab_size, rng=None): """Creates a random int32 tensor of the shape within the vocab size.""" if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) output = np.array(values, dtype=jnp.int32).reshape(shape) return output def random_attention_mask(shape, rng=None): attn_mask = ids_tensor(shape, vocab_size=2, rng=rng) # make sure that at least one token is attended to for each batch attn_mask[:, -1] = 1 return attn_mask @require_flax class FlaxGenerationTesterMixin: model_tester = None all_generative_model_classes = () def _get_input_ids_and_config(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 max_batch_size = 2 sequence_length = inputs["input_ids"].shape[-1] // 2 input_ids = inputs["input_ids"][:max_batch_size, :sequence_length] attention_mask = jnp.ones_like(input_ids) attention_mask = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens max_length = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` config.pad_token_id = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def test_greedy_generate_pt_fx(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.do_sample = False config.max_length = max_length config.decoder_start_token_id = 0 for model_class in self.all_generative_model_classes: flax_model = model_class(config) pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, flax_model.params) flax_generation_outputs = flax_model.generate(input_ids).sequences pt_generation_outputs = pt_model.generate(torch.tensor(input_ids, dtype=torch.long)) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: flax_generation_outputs = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist(), flax_generation_outputs.tolist()) def test_greedy_generate(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.do_sample = False config.max_length = max_length for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_sample_generate(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.do_sample = True config.max_length = max_length for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_beam_search_generate(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.do_sample = False config.max_length = max_length config.num_beams = 2 for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_beam_search_generate_num_return_sequences(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.do_sample = False config.max_length = max_length config.num_beams = 2 config.num_return_sequences = 2 for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[0], input_ids.shape[0] * config.num_return_sequences) def test_sample_generate_logits_warper(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.do_sample = True config.max_length = max_length config.temperature = 0.8 config.top_k = 10 config.top_p = 0.3 config.min_length = 1 config.forced_bos_token_id = 8 config.forced_eos_token_id = 9 for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_greedy_generate_logits_warper(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.max_length = max_length config.min_length = 1 config.forced_bos_token_id = 8 config.forced_eos_token_id = 9 for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_beam_search_generate_logits_warper(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.max_length = max_length config.num_beams = 2 config.min_length = 1 config.forced_bos_token_id = 8 config.forced_eos_token_id = 9 for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_greedy_generate_attn_mask(self): config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # pad attention mask on the left attention_mask = attention_mask.at[(0, 0)].set(0) config.do_sample = False config.max_length = max_length for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids, attention_mask=attention_mask).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids, attention_mask=attention_mask).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_sample_generate_attn_mask(self): config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # pad attention mask on the left attention_mask = attention_mask.at[(0, 0)].set(0) config.do_sample = True config.max_length = max_length for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids, attention_mask=attention_mask).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids, attention_mask=attention_mask).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_beam_search_generate_attn_mask(self): config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # pad attention mask on the left attention_mask = attention_mask.at[(0, 0)].set(0) config.num_beams = 2 config.max_length = max_length for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids, attention_mask=attention_mask).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids, attention_mask=attention_mask).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) @require_flax class FlaxGenerationIntegrationTests(unittest.TestCase): def test_validate_generation_inputs(self): tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert") model = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only") encoder_input_str = "Hello world" input_ids = tokenizer(encoder_input_str, return_tensors="np").input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(ValueError, "do_samples"): model.generate(input_ids, do_samples=True) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(ValueError, "foo"): fake_model_kwargs = {"foo": "bar"} model.generate(input_ids, **fake_model_kwargs)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_beam_constraints.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class ConstraintTest(unittest.TestCase): def test_input_types(self): # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. cset = [[1, 2, 4], [1, 2, 3, 4]] dc = DisjunctiveConstraint(cset) self.assertTrue(isinstance(dc.token_ids, list)) with self.assertRaises(ValueError): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]])) with self.assertRaises(ValueError): DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])]) def test_check_illegal_input(self): # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). cset = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(ValueError): DisjunctiveConstraint(cset) # fails here def test_example_progression(self): cset = [[1, 2, 3], [1, 2, 4]] dc = DisjunctiveConstraint(cset) stepped, completed, reset = dc.update(1) desired = stepped is True and completed is False and reset is False self.assertTrue(desired) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1]) stepped, completed, reset = dc.update(2) desired = stepped is True and completed is False and reset is False self.assertTrue(desired) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2]) stepped, completed, reset = dc.update(3) desired = stepped is True and completed is True and reset is False self.assertTrue(desired) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3]) def test_example_progression_unequal_three_mid_and_reset(self): cset = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] dc = DisjunctiveConstraint(cset) stepped, completed, reset = dc.update(1) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1]) stepped, completed, reset = dc.update(2) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2]) stepped, completed, reset = dc.update(4) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2, 4]) stepped, completed, reset = dc.update(5) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5]) dc.reset() stepped, completed, reset = dc.update(1) self.assertTrue(not dc.completed) self.assertTrue(dc.remaining() == 3) self.assertTrue(dc.current_seq == [1]) stepped, completed, reset = dc.update(2) self.assertTrue(not dc.completed) self.assertTrue(dc.remaining() == 2) self.assertTrue(dc.current_seq == [1, 2]) stepped, completed, reset = dc.update(5) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.remaining() == 0) self.assertTrue(dc.current_seq == [1, 2, 5])
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_tf_logits_process.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest import numpy as np from parameterized import parameterized from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers.generation import ( TFForcedBOSTokenLogitsProcessor, TFForcedEOSTokenLogitsProcessor, TFForceTokensLogitsProcessor, TFLogitsProcessorList, TFMinLengthLogitsProcessor, TFNoBadWordsLogitsProcessor, TFNoRepeatNGramLogitsProcessor, TFRepetitionPenaltyLogitsProcessor, TFSuppressTokensAtBeginLogitsProcessor, TFSuppressTokensLogitsProcessor, TFTemperatureLogitsWarper, TFTopKLogitsWarper, TFTopPLogitsWarper, ) from ..test_modeling_tf_common import ids_tensor @require_tf class TFLogitsProcessorTest(unittest.TestCase): def _get_uniform_logits(self, batch_size: int, length: int): scores = tf.ones((batch_size, length), dtype=tf.float32) / length return scores @parameterized.expand([(False,), (True,)]) def test_min_length_dist_processor(self, use_xla): vocab_size = 20 batch_size = 4 eos_token_id = 0 min_dist_processor = TFMinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id) if use_xla: min_dist_processor = tf.function(min_dist_processor, jit_compile=True) # check that min length is applied at length 5 cur_len = 5 input_ids = ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = min_dist_processor(input_ids, scores, cur_len) self.assertListEqual(scores_before_min_length[:, eos_token_id].numpy().tolist(), 4 * [-float("inf")]) # check that min length is not applied anymore at length 15 cur_len = 15 input_ids = ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = min_dist_processor(input_ids, scores, cur_len) self.assertFalse(tf.math.reduce_any(tf.math.is_inf(scores_before_min_length)).numpy()) @parameterized.expand([(False,), (True,)]) def test_temperature_dist_warper(self, use_xla): input_ids = None cur_len = None length = 20 scores = self._get_uniform_logits(batch_size=2, length=length) # tweak scores to not be uniform anymore scores = scores.numpy() scores[1, 5] = (1 / length) + 0.1 # peak, 1st batch scores[1, 10] = (1 / length) - 0.4 # valley, 1st batch scores = tf.convert_to_tensor(scores) # compute softmax probs = tf.nn.softmax(scores, axis=-1) temp_dist_warper_sharper = TFTemperatureLogitsWarper(temperature=0.5) temp_dist_warper_smoother = TFTemperatureLogitsWarper(temperature=1.3) if use_xla: temp_dist_warper_sharper = tf.function(temp_dist_warper_sharper, jit_compile=True) temp_dist_warper_smoother = tf.function(temp_dist_warper_smoother, jit_compile=True) warped_prob_sharp = tf.nn.softmax(temp_dist_warper_sharper(input_ids, tf.identity(scores), cur_len), axis=-1) warped_prob_smooth = tf.nn.softmax(temp_dist_warper_smoother(input_ids, tf.identity(scores), cur_len), axis=-1) # uniform distribution stays uniform tf.debugging.assert_near(probs[0, :], warped_prob_sharp[0, :], atol=1e-3) tf.debugging.assert_near(probs[0, :], warped_prob_smooth[0, :], atol=1e-3) # sharp peaks get higher, valleys get lower self.assertLess(tf.math.reduce_max(probs[1, :]), tf.math.reduce_max(warped_prob_sharp[1, :])) self.assertGreater(tf.math.reduce_min(probs[1, :]), tf.math.reduce_min(warped_prob_sharp[1, :])) # smooth peaks get lower, valleys get higher self.assertGreater(tf.math.reduce_max(probs[1, :]), tf.math.reduce_max(warped_prob_smooth[1, :])) self.assertLess(tf.math.reduce_min(probs[1, :]), tf.math.reduce_min(warped_prob_smooth[1, :])) @parameterized.expand([(False,), (True,)]) def test_repetition_penalty_dist_process(self, use_xla): vocab_size = 10 cur_len = 2 input_ids = tf.constant([[0, 1], [5, 0]], dtype=tf.int32) self.assertEqual(cur_len, input_ids.shape[1]) scores = self._get_uniform_logits(batch_size=2, length=vocab_size) mask = tf.cast(tf.constant([[1] + 9 * [0], 10 * [0]]), tf.bool) scores = tf.where(mask, -1 / vocab_size, scores) mask = tf.cast(tf.constant([10 * [0], 5 * [0] + [1] + 4 * [0]]), tf.bool) scores = tf.where(mask, 4 / vocab_size, scores) rep_penalty_proc = TFRepetitionPenaltyLogitsProcessor(penalty=2.0) if use_xla: rep_penalty_proc = tf.function(rep_penalty_proc, jit_compile=True) scores = rep_penalty_proc(input_ids, tf.identity(scores), cur_len) # check that values were correctly changed (negative scores for used tokens should increase, others # should decrease) self.assertAlmostEqual(scores[0, 0].numpy(), -(1 / vocab_size) * 2) self.assertAlmostEqual(scores[0, 1].numpy(), (1 / vocab_size) / 2) self.assertAlmostEqual(scores[0, 2].numpy(), (1 / vocab_size)) # unused tokens should see no change self.assertAlmostEqual(scores[1, 0].numpy(), (1 / vocab_size) / 2) self.assertAlmostEqual(scores[1, 5].numpy(), (4 / vocab_size) / 2) self.assertAlmostEqual(scores[0, 2].numpy(), (1 / vocab_size)) # unused tokens should see no change @parameterized.expand([(False,), (True,)]) def test_top_k_dist_warper(self, use_xla): input_ids = None cur_len = None vocab_size = 10 batch_size = 2 # create ramp distribution ramp_logits = np.broadcast_to(np.arange(vocab_size, dtype=np.float32), (batch_size, vocab_size)).copy() ramp_logits[1:, : vocab_size // 2] = ramp_logits[1:, : vocab_size // 2] + vocab_size top_k_warp = TFTopKLogitsWarper(3) if use_xla: top_k_warp = tf.function(top_k_warp, jit_compile=True) scores = top_k_warp(input_ids, ramp_logits, cur_len) # check that correct tokens are filtered self.assertListEqual(tf.math.is_inf(scores[0]).numpy().tolist(), 7 * [True] + 3 * [False]) self.assertListEqual(tf.math.is_inf(scores[1]).numpy().tolist(), 2 * [True] + 3 * [False] + 5 * [True]) # check special cases length = 5 logits = self._get_uniform_logits(batch_size=batch_size, length=length) top_k_warp_safety_check = TFTopKLogitsWarper(top_k=1, filter_value=0.0, min_tokens_to_keep=3) if use_xla: top_k_warp_safety_check = tf.function(top_k_warp_safety_check, jit_compile=True) scores = top_k_warp_safety_check(input_ids, logits, cur_len) # uniform dist is not changed self.assertListEqual(tf.math.reduce_sum(tf.where(scores == 0.0, 1, 0), axis=-1).numpy().tolist(), [0, 0]) ramp_logits = np.broadcast_to(np.arange(length, dtype=np.float32), (batch_size, length)).copy() scores = top_k_warp_safety_check(input_ids, ramp_logits, cur_len) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual(tf.math.reduce_sum(tf.where(scores == 0.0, 1, 0), axis=-1).numpy().tolist(), [2, 2]) @parameterized.expand([(False,), (True,)]) def test_top_p_dist_warper(self, use_xla): input_ids = None cur_len = None vocab_size = 10 batch_size = 2 # create distribution and take log (inverse to Softmax as taken in TFTopPLogitsWarper) dist = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]], dtype=np.float32)) # top_p should have been 0.8 to test the edge case of top_p being exactly equal to sum of some token prob # However, due to the numerical instability of softmax in TF we choose this as the edge case # top_p as 0.8 passes when use_xla is True and fails when False. Refer PR #18984. top_p_warp = TFTopPLogitsWarper(0.79999995) if use_xla: top_p_warp = tf.function(top_p_warp, jit_compile=True) filtered_dist = tf.exp(top_p_warp(input_ids, dist, cur_len)) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 EXPECTED_FILTERED_DIST = tf.constant([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]], dtype=tf.float32) tf.debugging.assert_near(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3) # check edge cases with negative and extreme logits ramp_logits = np.broadcast_to( np.arange(vocab_size, dtype=np.float32)[None, :], (batch_size, vocab_size) ).copy() - (vocab_size // 2) # make ramp_logits more extreme ramp_logits[1] = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept top_p_warp = TFTopPLogitsWarper(0.9, min_tokens_to_keep=2, filter_value=0.0) if use_xla: top_p_warp = tf.function(top_p_warp, jit_compile=True) filtered_dist = top_p_warp(input_ids, ramp_logits, cur_len) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps # 2. self.assertListEqual( tf.math.reduce_sum(tf.where(filtered_dist != 0.0, 1, 0), axis=-1).numpy().tolist(), [3, 2] ) def test_no_repeat_ngram_dist_processor(self): vocab_size = 3 batch_size = 2 cur_len = 4 input_ids = tf.constant([[1, 1, 2, 1], [0, 1, 0, 1]], dtype=tf.int32) self.assertEqual(cur_len, input_ids.shape[1]) scores = self._get_uniform_logits(batch_size, vocab_size) no_repeat_proc_2_gram = TFNoRepeatNGramLogitsProcessor(2) no_repeat_proc_3_gram = TFNoRepeatNGramLogitsProcessor(3) filtered_scores_2_gram = no_repeat_proc_2_gram(input_ids, tf.identity(scores), cur_len) filtered_scores_3_gram = no_repeat_proc_3_gram(input_ids, tf.identity(scores), cur_len) # 2-gram would forbid 2nd and 3rd token (1,2) at 1st batch and 1st token (0) at 2nd batch self.assertListEqual( tf.math.is_inf(filtered_scores_2_gram).numpy().tolist(), [[False, True, True], [True, False, False]] ) # 3-gram would forbid no token at 1st batch and 1st token (0) at 2nd batch self.assertListEqual( tf.math.is_inf(filtered_scores_3_gram).numpy().tolist(), [[False, False, False], [True, False, False]] ) @parameterized.expand([(False,), (True,)]) def test_no_bad_words_dist_processor(self, use_xla): vocab_size = 5 batch_size = 2 eos_token_id = 4 cur_len = 4 input_ids = tf.constant([[0, 1, 3, 1], [0, 1, 0, 1]], dtype=tf.int32) self.assertEqual(cur_len, input_ids.shape[1]) bad_word_tokens = [[1], [4], [1, 0], [0, 1, 2], [1, 3, 1, 3]] scores = self._get_uniform_logits(batch_size, vocab_size) no_bad_words_dist_proc = TFNoBadWordsLogitsProcessor(bad_words_ids=bad_word_tokens, eos_token_id=eos_token_id) if use_xla: no_bad_words_dist_proc = tf.function(no_bad_words_dist_proc, jit_compile=True) filtered_scores = no_bad_words_dist_proc(input_ids, tf.identity(scores), cur_len) # batch 1: 1st, 2nd, and 4th (0, 1, 3) token are forbidden # batch 2: 1st, 2nd, and 3rd (0, 1, 2) token are forbidden self.assertListEqual( tf.math.is_inf(filtered_scores).numpy().tolist(), [[True, True, False, True, True], [True, True, True, False, True]], ) @parameterized.expand([(False,), (True,)]) def test_forced_bos_token_logits_processor(self, use_xla): vocab_size = 20 batch_size = 4 bos_token_id = 0 logits_processor = TFForcedBOSTokenLogitsProcessor(bos_token_id=bos_token_id) if use_xla: logits_processor = tf.function(logits_processor, jit_compile=True) # check that all scores are -inf except the bos_token_id score cur_len = 1 input_ids = ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertTrue( tf.math.reduce_all(tf.math.is_inf(scores[:, bos_token_id + 1 :]) & (scores[:, bos_token_id + 1 :] < 0)) ) self.assertListEqual(scores[:, bos_token_id].numpy().tolist(), 4 * [0]) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 cur_len = 4 input_ids = ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertFalse(tf.math.reduce_any(tf.math.is_inf((scores)))) @parameterized.expand([(False,), (True,)]) def test_forced_eos_token_logits_processor(self, use_xla): vocab_size = 20 batch_size = 4 eos_token_id = 0 max_length = 5 logits_processor = TFForcedEOSTokenLogitsProcessor(max_length=max_length, eos_token_id=eos_token_id) if use_xla: logits_processor = tf.function(logits_processor, jit_compile=True) # check that all scores are -inf except the eos_token_id when max_length-1 is reached cur_len = 4 input_ids = ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertTrue( tf.math.reduce_all(tf.math.is_inf(scores[:, eos_token_id + 1 :]) & (scores[:, eos_token_id + 1 :] < 0)) ) self.assertListEqual( scores[:, eos_token_id].numpy().tolist(), 4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length-1 is not reached cur_len = 3 input_ids = ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertFalse(tf.math.reduce_any(tf.math.is_inf((scores)))) @parameterized.expand([(False,), (True,)]) def test_suppress_tokens_at_begin_logits_processor(self, use_xla): vocab_size = 20 batch_size = 4 begin_suppress_tokens = [1, 2, 3] begin_index = 5 logits_processor = TFSuppressTokensAtBeginLogitsProcessor( begin_suppress_tokens=begin_suppress_tokens, begin_index=begin_index ) if use_xla: logits_processor = tf.function(logits_processor, jit_compile=True) # Check that no scores are suppressed if begin_index is not reached cur_len = 4 input_ids = tf.convert_to_tensor([[11, 17, 15, 8], [14, 0, 19, 5], [13, 11, 18, 19], [11, 12, 16, 15]]) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertFalse(tf.math.reduce_any(tf.math.is_inf((scores)))) # Check that scores are suppressed if begin_index is reached cur_len = 5 input_ids = tf.convert_to_tensor([[5, 5, 5, 0, 17], [18, 1, 9, 14, 17], [18, 6, 8, 15, 19], [8, 12, 17, 1, 2]]) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertTrue(tf.math.reduce_all(tf.math.is_inf(tf.gather(scores, begin_suppress_tokens, axis=1)))) @parameterized.expand([(False,), (True,)]) def test_suppress_tokens_logits_processor(self, use_xla): vocab_size = 20 batch_size = 4 suppress_tokens = [1, 3, 5] keep_tokens = [i for i in range(vocab_size) if i not in suppress_tokens] logits_processor = TFSuppressTokensLogitsProcessor(suppress_tokens=suppress_tokens) if use_xla: logits_processor = tf.function(logits_processor, jit_compile=True) # Check that suppress_tokens are suppressed and others are not cur_len = 5 input_ids = tf.convert_to_tensor([[0, 10, 19, 6, 3], [17, 4, 8, 17, 2], [7, 1, 11, 6, 15], [5, 8, 13, 16, 0]]) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertTrue(tf.math.reduce_all(tf.math.is_inf(tf.gather(scores, suppress_tokens, axis=1)))) self.assertFalse(tf.math.reduce_any(tf.math.is_inf(tf.gather(scores, keep_tokens, axis=1)))) @parameterized.expand([(False,), (True,)]) def test_force_tokens_logits_processor(self, use_xla): vocab_size = 20 batch_size = 4 force_token_map = {1: 2, 3: 2} logits_processor = TFForceTokensLogitsProcessor(force_token_map=force_token_map) if use_xla: logits_processor = tf.function(logits_processor, jit_compile=True) # check that if the cur_len is contained in the force_token_map, the logits are the same # for all tokens except the one the force_token_map points to cur_len = 1 input_ids = tf.convert_to_tensor([[11], [7], [5], [15]]) ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) tf.debugging.assert_near(tf.gather(scores, [force_token_map[cur_len]], axis=1), 0.0) non_forced_inds = [i for i in range(vocab_size) if i != force_token_map[cur_len]] self.assertTrue( tf.math.reduce_all(tf.math.is_inf(tf.gather(scores, [non_forced_inds], axis=1))), ) # check that if the cur_len is not contained in the force_token_map, the logits are not modified cur_len = 2 input_ids = tf.convert_to_tensor([[2, 19], [19, 15], [4, 9], [7, 6]]) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertFalse(tf.math.reduce_any(tf.math.is_inf((scores)))) @parameterized.expand([(False,), (True,)]) def test_processor_list(self, use_xla): # TODO (Joao): reintroduce TFNoRepeatNGramLogitsProcessor when it gets compatible with XLA batch_size = 4 cur_len = 10 vocab_size = 15 eos_token_id = 0 # dummy input_ids and scores input_ids = ids_tensor((batch_size, cur_len), vocab_size) input_ids_comp = tf.identity(input_ids) scores = self._get_uniform_logits(batch_size, vocab_size) scores_comp = tf.identity(scores) # instantiate all dist processors min_dist_proc = TFMinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id) temp_dist_warp = TFTemperatureLogitsWarper(temperature=0.5) rep_penalty_proc = TFRepetitionPenaltyLogitsProcessor(penalty=2.0) top_k_warp = TFTopKLogitsWarper(3) top_p_warp = TFTopPLogitsWarper(0.8) # no_repeat_proc = TFNoRepeatNGramLogitsProcessor(2) no_bad_words_dist_proc = TFNoBadWordsLogitsProcessor(bad_words_ids=[[1]], eos_token_id=eos_token_id) if use_xla: min_dist_proc = tf.function(min_dist_proc, jit_compile=True) temp_dist_warp = tf.function(temp_dist_warp, jit_compile=True) rep_penalty_proc = tf.function(rep_penalty_proc, jit_compile=True) top_k_warp = tf.function(top_k_warp, jit_compile=True) top_p_warp = tf.function(top_p_warp, jit_compile=True) # no_repeat_proc = tf.function(no_repeat_proc, jit_compile=True) no_bad_words_dist_proc = tf.function(no_bad_words_dist_proc, jit_compile=True) # no processor list scores = min_dist_proc(input_ids, scores, cur_len) scores = temp_dist_warp(input_ids, scores, cur_len) scores = rep_penalty_proc(input_ids, scores, cur_len) scores = top_k_warp(input_ids, scores, cur_len) scores = top_p_warp(input_ids, scores, cur_len) # scores = no_repeat_proc(input_ids, scores, cur_len) scores = no_bad_words_dist_proc(input_ids, scores, cur_len) # with processor list processor = TFLogitsProcessorList( [ min_dist_proc, temp_dist_warp, rep_penalty_proc, top_k_warp, top_p_warp, # no_repeat_proc, no_bad_words_dist_proc, ] ) scores_comp = processor(input_ids, scores_comp, cur_len) # remove inf scores = tf.where(tf.math.is_inf(scores), -1e9, scores) scores_comp = tf.where(tf.math.is_inf(scores_comp), -1e9, scores_comp) # scores should be equal tf.debugging.assert_near(scores, scores_comp, atol=1e-3) # input_ids should never be changed self.assertListEqual(input_ids.numpy().tolist(), input_ids_comp.numpy().tolist())
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_configuration_utils.py
# coding=utf-8 # Copyright 2022 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import os import tempfile import unittest import warnings from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class GenerationConfigTest(unittest.TestCase): @parameterized.expand([(None,), ("foo.json",)]) def test_save_load_config(self, config_name): config = GenerationConfig( do_sample=True, temperature=0.7, length_penalty=1.0, bad_words_ids=[[1, 2, 3], [4, 5]], ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(tmp_dir, config_name=config_name) loaded_config = GenerationConfig.from_pretrained(tmp_dir, config_name=config_name) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample, True) self.assertEqual(loaded_config.temperature, 0.7) self.assertEqual(loaded_config.length_penalty, 1.0) self.assertEqual(loaded_config.bad_words_ids, [[1, 2, 3], [4, 5]]) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k, 50) self.assertEqual(loaded_config.max_length, 20) self.assertEqual(loaded_config.max_time, None) def test_from_model_config(self): model_config = AutoConfig.from_pretrained("gpt2") generation_config_from_model = GenerationConfig.from_model_config(model_config) default_generation_config = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(generation_config_from_model, default_generation_config) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id, default_generation_config.eos_token_id) self.assertEqual(generation_config_from_model.eos_token_id, model_config.eos_token_id) def test_update(self): generation_config = GenerationConfig() update_kwargs = { "max_new_tokens": 1024, "foo": "bar", } update_kwargs_copy = copy.deepcopy(update_kwargs) unused_kwargs = generation_config.update(**update_kwargs) # update_kwargs was not modified (no side effects) self.assertEqual(update_kwargs, update_kwargs_copy) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens, 1024) # `.update()` returns a dictionary of unused kwargs self.assertEqual(unused_kwargs, {"foo": "bar"}) def test_initialize_new_kwargs(self): generation_config = GenerationConfig() generation_config.foo = "bar" with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo, "bar") generation_config = GenerationConfig.from_model_config(new_config) assert not hasattr(generation_config, "foo") # no new kwargs should be initialized if from config def test_kwarg_init(self): """Tests that we can overwrite attributes at `from_pretrained` time.""" default_config = GenerationConfig() self.assertEqual(default_config.temperature, 1.0) self.assertEqual(default_config.do_sample, False) self.assertEqual(default_config.num_beams, 1) config = GenerationConfig( do_sample=True, temperature=0.7, length_penalty=1.0, bad_words_ids=[[1, 2, 3], [4, 5]], ) self.assertEqual(config.temperature, 0.7) self.assertEqual(config.do_sample, True) self.assertEqual(config.num_beams, 1) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(tmp_dir) loaded_config = GenerationConfig.from_pretrained(tmp_dir, temperature=1.0) self.assertEqual(loaded_config.temperature, 1.0) self.assertEqual(loaded_config.do_sample, True) self.assertEqual(loaded_config.num_beams, 1) # default value def test_validate(self): """ Tests that the `validate` method is working as expected. Note that `validate` is called at initialization time """ # Case 1: A correct configuration will not throw any warning with warnings.catch_warnings(record=True) as captured_warnings: GenerationConfig() self.assertEqual(len(captured_warnings), 0) # Case 2: Inconsequent but technically wrong configuration will throw a warning (e.g. setting sampling # parameters with `do_sample=False`). May be escalated to an error in the future. with warnings.catch_warnings(record=True) as captured_warnings: GenerationConfig(temperature=0.5) self.assertEqual(len(captured_warnings), 1) # Case 3: Impossible sets of contraints/parameters will raise an exception with self.assertRaises(ValueError): GenerationConfig(num_return_sequences=2) # Case 4: Passing `generate()`-only flags to `validate` will raise an exception with self.assertRaises(ValueError): GenerationConfig(logits_processor="foo") # Case 5: Model-specific parameters will NOT raise an exception or a warning with warnings.catch_warnings(record=True) as captured_warnings: GenerationConfig(foo="bar") self.assertEqual(len(captured_warnings), 0) def test_refuse_to_save(self): """Tests that we refuse to save a generation config that fails validation.""" # setting the temperature alone is invalid, as we also need to set do_sample to True -> throws a warning that # is caught, doesn't save, and raises an exception config = GenerationConfig() config.temperature = 0.5 with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(ValueError) as exc: config.save_pretrained(tmp_dir) self.assertTrue("Fix these issues to save the configuration." in str(exc.exception)) self.assertTrue(len(os.listdir(tmp_dir)) == 0) # greedy decoding throws an exception if we try to return multiple sequences -> throws an exception that is # caught, doesn't save, and raises a warning config = GenerationConfig() config.num_return_sequences = 2 with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(ValueError) as exc: config.save_pretrained(tmp_dir) self.assertTrue("Fix these issues to save the configuration." in str(exc.exception)) self.assertTrue(len(os.listdir(tmp_dir)) == 0) # final check: no warnings/exceptions thrown if it is correct, and file is saved config = GenerationConfig() with tempfile.TemporaryDirectory() as tmp_dir: with warnings.catch_warnings(record=True) as captured_warnings: config.save_pretrained(tmp_dir) self.assertEqual(len(captured_warnings), 0) self.assertTrue(len(os.listdir(tmp_dir)) == 1) @is_staging_test class ConfigPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-generation-config") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-generation-config-org") except HTTPError: pass def test_push_to_hub(self): config = GenerationConfig( do_sample=True, temperature=0.7, length_penalty=1.0, ) config.push_to_hub("test-generation-config", token=self._token) new_config = GenerationConfig.from_pretrained(f"{USER}/test-generation-config") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) # Reset repo delete_repo(token=self._token, repo_id="test-generation-config") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(tmp_dir, repo_id="test-generation-config", push_to_hub=True, token=self._token) new_config = GenerationConfig.from_pretrained(f"{USER}/test-generation-config") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) def test_push_to_hub_in_organization(self): config = GenerationConfig( do_sample=True, temperature=0.7, length_penalty=1.0, ) config.push_to_hub("valid_org/test-generation-config-org", token=self._token) new_config = GenerationConfig.from_pretrained("valid_org/test-generation-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) # Reset repo delete_repo(token=self._token, repo_id="valid_org/test-generation-config-org") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( tmp_dir, repo_id="valid_org/test-generation-config-org", push_to_hub=True, token=self._token ) new_config = GenerationConfig.from_pretrained("valid_org/test-generation-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k))
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_beam_search.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import floats_tensor, ids_tensor if is_torch_available(): import torch from transformers.generation import ( BeamHypotheses, BeamSearchScorer, ConstrainedBeamSearchScorer, DisjunctiveConstraint, PhrasalConstraint, ) class BeamSearchTester: def __init__( self, parent, batch_size=3, sequence_length=10, vocab_size=99, pad_token_id=0, max_length=20, num_beams=4, length_penalty=2.0, do_early_stopping=True, num_beam_hyps_to_keep=2, ): self.parent = parent self.batch_size = batch_size self.sequence_length = sequence_length self.vocab_size = vocab_size self.pad_token_id = pad_token_id self.max_length = max_length self.num_beams = num_beams self.length_penalty = length_penalty self.do_early_stopping = do_early_stopping self.num_beam_hyps_to_keep = num_beam_hyps_to_keep # cannot be randomly generated self.eos_token_id = vocab_size + 1 def prepare_beam_scorer(self, **kwargs): return BeamSearchScorer( batch_size=kwargs.get("batch_size", self.batch_size), num_beams=kwargs.get("num_beams", self.num_beams), device=torch_device, length_penalty=kwargs.get("length_penalty", self.length_penalty), do_early_stopping=kwargs.get("do_early_stopping", self.do_early_stopping), num_beam_hyps_to_keep=kwargs.get("num_beam_hyps_to_keep", self.num_beam_hyps_to_keep), ) def prepare_inputs(self): input_ids = ids_tensor((self.batch_size * self.num_beams, self.sequence_length), self.vocab_size) next_tokens = ids_tensor((self.batch_size, 2 * self.num_beams), self.vocab_size).to(torch_device) next_indices = ids_tensor((self.batch_size, 2 * self.num_beams), self.num_beams).to(torch_device) next_scores, _ = (-floats_tensor((self.batch_size, 2 * self.num_beams)).to(torch_device)).sort(descending=True) return (input_ids, next_tokens, next_indices, next_scores) def check_beam_hypotheses(self, input_ids, *args): # check that correct number of beam hypotheses is set in beam scorer beam_scorer = self.prepare_beam_scorer(do_early_stopping=True) beam_hyp = beam_scorer._beam_hyps[0] self.parent.assertEqual(len(beam_scorer._beam_hyps), self.batch_size) # check correct type self.parent.assertTrue(isinstance(beam_hyp, BeamHypotheses)) # check that num_beams is correctly set self.parent.assertEqual(beam_hyp.num_beams, self.num_beams) # check for early stopping deactivated for beam_idx in range(self.num_beams): beam_hyp.add(input_ids[beam_idx], -10.0) # if early stopping True -> score does not matter self.parent.assertTrue(beam_hyp.is_done(-10.0, 5)) # re-init beam_scorer = self.prepare_beam_scorer(do_early_stopping=False) beam_hyp = beam_scorer._beam_hyps[0] # add `num_beams + 1` beams to change `worst_score` for beam_idx in range(self.num_beams + 1): beam_hyp.add(input_ids[beam_idx], -10.0 + float(beam_idx)) # -10.0 is removed => -9.0 is worst score self.parent.assertAlmostEqual(beam_hyp.worst_score, -9.0 / (self.sequence_length**beam_hyp.length_penalty)) # -5.0 is better than worst score => should not be finished self.parent.assertFalse(beam_hyp.is_done(-5.0, self.sequence_length)) # -20.0 is worse than worst score => should be finished self.parent.assertTrue(beam_hyp.is_done(-20.0, self.sequence_length)) def check_beam_scorer_update(self, input_ids, next_tokens, next_indices, next_scores): # check too many eos tokens beam_scorer = self.prepare_beam_scorer() tokens = next_tokens.clone() tokens[0, :] = self.eos_token_id with self.parent.assertRaises(ValueError): beam_scorer.process(input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id) # check all batches are done beam_scorer = self.prepare_beam_scorer() tokens = next_tokens.clone() tokens[:, : self.num_beams] = self.eos_token_id beam_indices = torch.zeros_like(input_ids) + torch.arange(input_ids.shape[-1], device=input_ids.device) beam_indices = tuple(tuple(b) for b in beam_indices) beam_scorer.process( input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id, beam_indices=beam_indices ) # beam scorer should be done self.parent.assertTrue(beam_scorer.is_done) # check beam_scorer = self.prepare_beam_scorer() tokens = next_tokens.clone() tokens[:, 1] = self.eos_token_id beam_outputs = beam_scorer.process( input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id, beam_indices=beam_indices ) output_scores = beam_outputs["next_beam_scores"] output_tokens = beam_outputs["next_beam_tokens"] output_indices = beam_outputs["next_beam_indices"] def cut_expected_tensor(tensor): return torch.cat([tensor[:, :1], tensor[:, 2 : self.num_beams + 1]], dim=1).flatten() # check all outptus # cut out id of eos token and take best `num_beams` outputs expected_output_tokens = cut_expected_tensor(tokens) expected_output_scores = cut_expected_tensor(next_scores) # add num_beams * batch_idx offset = torch.div( torch.arange(self.num_beams * self.batch_size, device=torch_device), self.num_beams, rounding_mode="floor" ) expected_output_indices = cut_expected_tensor(next_indices) + offset * self.num_beams self.parent.assertListEqual(expected_output_tokens.tolist(), output_tokens.tolist()) self.parent.assertListEqual(expected_output_indices.tolist(), output_indices.tolist()) self.parent.assertTrue(torch.allclose(expected_output_scores, output_scores, atol=1e-3)) # make sure ids of eos token are correctly saved in beam_hyps of beam scorer expected_beam_indices = list(range(10)) for batch_idx in range(self.batch_size): correct_idx = batch_idx * self.num_beams + next_indices[batch_idx, 1] self.parent.assertListEqual( input_ids[correct_idx].tolist(), beam_scorer._beam_hyps[batch_idx].beams[0][1].tolist() ) self.parent.assertListEqual( expected_beam_indices + [correct_idx], torch.tensor(beam_scorer._beam_hyps[batch_idx].beams[0][2]).tolist(), ) def check_beam_scores_finalize(self, input_ids, next_tokens, next_indices, next_scores): # max_length should be only one more than current input_ids to check that eos is correctly appended max_length = self.sequence_length + 1 beam_scorer = self.prepare_beam_scorer(num_beam_hyps_to_keep=1, length_penalty=1.0, do_early_stopping=False) # update beams and append to input_ids tokens = next_tokens.clone() # first batch, first output has to finish with eos token id since scores are correctly sorted tokens[0, 0] = self.eos_token_id # make sure corresponding score is as good as possible to surely be picked first next_scores[0, 0] = 0.0 beam_outputs = beam_scorer.process( input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id ) output_scores = beam_outputs["next_beam_scores"] output_tokens = beam_outputs["next_beam_tokens"] output_indices = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[output_indices, :], output_tokens.unsqueeze(-1)], dim=-1) # finalize beam_indices = torch.zeros_like(input_ids) + torch.arange(input_ids.shape[-1], device=input_ids.device) beam_indices = tuple(tuple(b) for b in beam_indices) sequence_output = beam_scorer.finalize( input_ids, output_scores, output_tokens, output_indices, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, max_length=max_length, beam_indices=beam_indices, ) sequences = sequence_output["sequences"] sequence_scores = sequence_output["sequence_scores"] # since `num_beam_hyps_to_keep` = 1 => only return `batch_size` x `max_length` self.parent.assertListEqual(list(sequences.shape), [self.batch_size, max_length]) self.parent.assertListEqual(list(sequence_scores.shape), [self.batch_size]) # check sequence_scores self.parent.assertFalse((sequence_scores > 0).any().item()) # first batch has to finish with eos_token self.parent.assertEqual(sequences[0, -1].item(), self.eos_token_id) # other batches cannot finish with eos token self.parent.assertNotEqual(sequences[1, -1].item(), self.eos_token_id) self.parent.assertNotEqual(sequences[2, -1].item(), self.eos_token_id) # now test that if `num_beam_hyps_to_keep` is 3 => all beams are returned beam_scorer.num_beam_hyps_to_keep = self.num_beams sequence_output = beam_scorer.finalize( input_ids, output_scores, output_tokens, output_indices, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, max_length=max_length, beam_indices=beam_indices, ) sequences = sequence_output["sequences"] sequence_scores = sequence_output["sequence_scores"] self.parent.assertListEqual(list(sequences.shape), [self.num_beams * self.batch_size, max_length]) self.parent.assertListEqual(list(sequence_scores.shape), [self.num_beams * self.batch_size]) class ConstrainedBeamSearchTester: def __init__( self, parent, constraints=None, batch_size=3, sequence_length=10, vocab_size=99, pad_token_id=0, max_length=20, num_beams=4, length_penalty=2.0, do_early_stopping=True, num_beam_hyps_to_keep=2, ): self.parent = parent self.batch_size = batch_size self.sequence_length = sequence_length self.vocab_size = vocab_size self.pad_token_id = pad_token_id self.max_length = max_length self.num_beams = num_beams self.length_penalty = length_penalty self.do_early_stopping = do_early_stopping self.num_beam_hyps_to_keep = num_beam_hyps_to_keep if constraints is None: force_tokens = torch.randint(10, 50, (1, 2))[0].tolist() disjunctive_tokens = torch.randint(10, 50, (2, 2)).tolist() constraints = [PhrasalConstraint(force_tokens), DisjunctiveConstraint(disjunctive_tokens)] self.constraints = constraints # cannot be randomly generated self.eos_token_id = vocab_size + 1 def prepare_constrained_beam_scorer(self, **kwargs): return ConstrainedBeamSearchScorer( constraints=kwargs.get("constraints", self.constraints), batch_size=kwargs.get("batch_size", self.batch_size), num_beams=kwargs.get("num_beams", self.num_beams), device=torch_device, length_penalty=kwargs.get("length_penalty", self.length_penalty), do_early_stopping=kwargs.get("do_early_stopping", self.do_early_stopping), num_beam_hyps_to_keep=kwargs.get("num_beam_hyps_to_keep", self.num_beam_hyps_to_keep), ) def prepare_inputs(self): input_ids = ids_tensor((self.batch_size * self.num_beams, self.sequence_length), self.vocab_size) next_tokens = ids_tensor((self.batch_size, 2 * self.num_beams), self.vocab_size).to(torch_device) next_indices = ids_tensor((self.batch_size, 2 * self.num_beams), self.num_beams).to(torch_device) next_scores, _ = (-floats_tensor((self.batch_size, 2 * self.num_beams)).to(torch_device)).sort(descending=True) scores_for_all_vocab, _ = ( -floats_tensor((self.batch_size * self.num_beams, self.vocab_size)).to(torch_device) ).sort(descending=True) return (input_ids, next_tokens, next_indices, next_scores, scores_for_all_vocab) def check_beam_hypotheses(self, input_ids, *args): # check that correct number of beam hypotheses is set in beam scorer constrained_beam_scorer = self.prepare_constrained_beam_scorer(do_early_stopping=True) beam_hyp = constrained_beam_scorer._beam_hyps[0] self.parent.assertEqual(len(constrained_beam_scorer._beam_hyps), self.batch_size) # check correct type self.parent.assertTrue(isinstance(beam_hyp, BeamHypotheses)) # check that num_beams is correctly set self.parent.assertEqual(beam_hyp.num_beams, self.num_beams) # check for early stopping deactivated for beam_idx in range(self.num_beams): beam_hyp.add(input_ids[beam_idx], -10.0) # if early stopping True -> score does not matter self.parent.assertTrue(beam_hyp.is_done(-10.0, 5)) # re-init constrained_beam_scorer = self.prepare_constrained_beam_scorer(do_early_stopping=False) beam_hyp = constrained_beam_scorer._beam_hyps[0] # add `num_beams + 1` beams to change `worst_score` for beam_idx in range(self.num_beams + 1): beam_hyp.add(input_ids[beam_idx], -10.0 + float(beam_idx)) # -10.0 is removed => -9.0 is worst score self.parent.assertAlmostEqual(beam_hyp.worst_score, -9.0 / (self.sequence_length**beam_hyp.length_penalty)) # -5.0 is better than worst score => should not be finished self.parent.assertFalse(beam_hyp.is_done(-5.0, self.sequence_length)) # -20.0 is worse than worst score => should be finished self.parent.assertTrue(beam_hyp.is_done(-20.0, self.sequence_length)) def check_constrained_beam_scorer_update( self, input_ids, next_tokens, next_indices, next_scores, scores_for_all_vocab ): # check too many eos tokens constrained_beam_scorer = self.prepare_constrained_beam_scorer() stacked_token_ids = [] for constraint in self.constraints: token_ids = constraint.token_ids token_ids = token_ids[0] if isinstance(token_ids[0], list) else token_ids stacked_token_ids = stacked_token_ids + token_ids fulfilling_sequence = torch.LongTensor(stacked_token_ids) fulfill_len = fulfilling_sequence.size(0) input_ids[:, :fulfill_len] = fulfilling_sequence tokens = next_tokens.clone() tokens[0, :] = self.eos_token_id with self.parent.assertRaises(ValueError): constrained_beam_scorer.process( input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id ) # check all batches are done constrained_beam_scorer = self.prepare_constrained_beam_scorer() tokens = next_tokens.clone() tokens[:, : self.num_beams] = self.eos_token_id constrained_beam_scorer.process( input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id ) # beam scorer should be done self.parent.assertTrue(constrained_beam_scorer.is_done) # check constrained_beam_scorer = self.prepare_constrained_beam_scorer() tokens = next_tokens.clone() tokens[:, 1] = self.eos_token_id beam_outputs = constrained_beam_scorer.process( input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id ) output_scores = beam_outputs["next_beam_scores"] output_tokens = beam_outputs["next_beam_tokens"] output_indices = beam_outputs["next_beam_indices"] def cut_expected_tensor(tensor): return torch.cat([tensor[:, :1], tensor[:, 2 : self.num_beams + 1]], dim=1).flatten() # check all outptus # cut out id of eos token and take best `num_beams` outputs expected_output_tokens = cut_expected_tensor(tokens) expected_output_scores = cut_expected_tensor(next_scores) # add num_beams * batch_idx offset = torch.div( torch.arange(self.num_beams * self.batch_size, device=torch_device), self.num_beams, rounding_mode="floor" ) expected_output_indices = cut_expected_tensor(next_indices) + offset * self.num_beams self.parent.assertListEqual(expected_output_tokens.tolist(), output_tokens.tolist()) self.parent.assertListEqual(expected_output_indices.tolist(), output_indices.tolist()) self.parent.assertTrue(torch.allclose(expected_output_scores, output_scores, atol=1e-3)) # make sure ids of eos token are correctly saved in beam_hyps of beam scorer for batch_idx in range(self.batch_size): correct_idx = batch_idx * self.num_beams + next_indices[batch_idx, 1] self.parent.assertListEqual( input_ids[correct_idx].tolist(), constrained_beam_scorer._beam_hyps[batch_idx].beams[0][1].tolist() ) def check_constrained_beam_scorer_finalize( self, input_ids, next_tokens, next_indices, next_scores, scores_for_all_vocab ): # max_length should be only one more than current input_ids to check that eos is correctly appended max_length = self.sequence_length + 1 # for testing finalize, we do want to have fulfilled constraints stacked_token_ids = [] for constraint in self.constraints: token_ids = constraint.token_ids token_ids = token_ids[0] if isinstance(token_ids[0], list) else token_ids stacked_token_ids = stacked_token_ids + token_ids fulfilling_sequence = torch.LongTensor(stacked_token_ids) fulfill_len = fulfilling_sequence.size(0) input_ids[:, :fulfill_len] = fulfilling_sequence constrained_beam_scorer = self.prepare_constrained_beam_scorer( num_beam_hyps_to_keep=1, length_penalty=1.0, do_early_stopping=False ) constraints = constrained_beam_scorer.constraints # update beams and append to input_ids tokens = next_tokens.clone() # first batch, first output has to finish with eos token id since scores are correctly sorted tokens[0, 0] = self.eos_token_id # make sure corresponding score is as good as possible to surely be picked first next_scores[0, 0] = 0.0 beam_outputs = constrained_beam_scorer.process( input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id ) output_scores = beam_outputs["next_beam_scores"] output_tokens = beam_outputs["next_beam_tokens"] output_indices = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[output_indices, :], output_tokens.unsqueeze(-1)], dim=-1) # finalize sequence_output = constrained_beam_scorer.finalize( input_ids, output_scores, output_tokens, output_indices, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, max_length=max_length, ) sequences = sequence_output["sequences"] sequence_scores = sequence_output["sequence_scores"] # since `num_beam_hyps_to_keep` = 1 => only return `batch_size` x `max_length` self.parent.assertListEqual(list(sequences.shape), [self.batch_size, max_length]) self.parent.assertListEqual(list(sequence_scores.shape), [self.batch_size]) # check sequence_scores self.parent.assertFalse((sequence_scores > 0).any().item()) # first batch has to finish with eos_token self.parent.assertEqual(sequences[0, -1].item(), self.eos_token_id) # other batches cannot finish with eos token self.parent.assertNotEqual(sequences[1, -1].item(), self.eos_token_id) self.parent.assertNotEqual(sequences[2, -1].item(), self.eos_token_id) # test that the constraint is indeed fulfilled for output, constraint in [(s, c) for s in sequences for c in constraints]: forced_token_ids = constraint.token_ids if isinstance(forced_token_ids[0], list): # disjunctive case flag = False for token_ids in forced_token_ids: if self._check_sequence_inside_sequence(output, token_ids): flag = True break self.parent.assertEqual(flag, True) else: self.parent.assertEqual(self._check_sequence_inside_sequence(output, forced_token_ids), True) # now test that if `num_beam_hyps_to_keep` is 3 => all beams are returned # constrained_beam_scorer.num_beam_hyps_to_keep = self.num_beams constrained_beam_scorer = self.prepare_constrained_beam_scorer( num_beam_hyps_to_keep=self.num_beams, length_penalty=1.0, do_early_stopping=False ) sequence_output = constrained_beam_scorer.finalize( input_ids, output_scores, output_tokens, output_indices, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, max_length=max_length, ) sequences = sequence_output["sequences"] sequence_scores = sequence_output["sequence_scores"] self.parent.assertListEqual(list(sequences.shape), [self.num_beams * self.batch_size, max_length]) self.parent.assertListEqual(list(sequence_scores.shape), [self.num_beams * self.batch_size]) def _check_sequence_inside_sequence(self, tensor_1, tensor_2): # check if tensor_1 inside tensor_2 or tensor_2 inside tensor_1. # set to same device. we don't care what device. if not isinstance(tensor_1, list): tensor_1 = tensor_1.cpu().tolist() if not isinstance(tensor_2, list): tensor_2 = tensor_2.cpu().tolist() in_order = len(tensor_1) <= len(tensor_2) longer = tensor_2 if in_order else tensor_1 shorter = tensor_1 if in_order else tensor_2 flag = False chunk_size = len(shorter) for chunk_idx in range(len(longer) - chunk_size + 1): subseq = longer[chunk_idx : chunk_idx + chunk_size] if subseq == shorter: flag = True break return flag @require_torch class BeamSearchTest(unittest.TestCase): def setUp(self): self.beam_search_tester = BeamSearchTester(self) def test_beam_hypotheses(self): inputs = self.beam_search_tester.prepare_inputs() self.beam_search_tester.check_beam_hypotheses(*inputs) def test_beam_scorer_update(self): inputs = self.beam_search_tester.prepare_inputs() self.beam_search_tester.check_beam_scorer_update(*inputs) def test_beam_scorer_finalize(self): inputs = self.beam_search_tester.prepare_inputs() self.beam_search_tester.check_beam_scores_finalize(*inputs) @require_torch class ConstrainedBeamSearchTest(unittest.TestCase): def setUp(self): self.constrained_beam_search_tester = ConstrainedBeamSearchTester(self) def test_constrained_beam_hypotheses(self): inputs = self.constrained_beam_search_tester.prepare_inputs() self.constrained_beam_search_tester.check_beam_hypotheses(*inputs) def test_constrained_beam_scorer_update(self): inputs = self.constrained_beam_search_tester.prepare_inputs() self.constrained_beam_search_tester.check_constrained_beam_scorer_update(*inputs) def test_constrained_beam_scorer_finalize(self): inputs = self.constrained_beam_search_tester.prepare_inputs() self.constrained_beam_search_tester.check_constrained_beam_scorer_finalize(*inputs)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_utils.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import tempfile import unittest import warnings import numpy as np from parameterized import parameterized from transformers import is_torch_available, pipeline, set_seed from transformers.testing_utils import ( is_flaky, require_accelerate, require_torch, require_torch_multi_accelerator, slow, torch_device, ) from ..test_modeling_common import floats_tensor, ids_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_torch_available(): import torch from transformers import ( AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoModelForSpeechSeq2Seq, AutoModelForVision2Seq, AutoTokenizer, BartForCausalLM, BartForConditionalGeneration, BartTokenizer, GPT2LMHeadModel, GPT2Tokenizer, ImageGPTForCausalImageModeling, SpeechEncoderDecoderModel, top_k_top_p_filtering, ) from transformers.cache_utils import DynamicCache from transformers.generation import ( BeamSampleDecoderOnlyOutput, BeamSampleEncoderDecoderOutput, BeamSearchDecoderOnlyOutput, BeamSearchEncoderDecoderOutput, BeamSearchScorer, ConstrainedBeamSearchScorer, DisjunctiveConstraint, ForcedBOSTokenLogitsProcessor, ForcedEOSTokenLogitsProcessor, GenerateBeamDecoderOnlyOutput, GenerateBeamEncoderDecoderOutput, GenerateDecoderOnlyOutput, GenerateEncoderDecoderOutput, GreedySearchDecoderOnlyOutput, GreedySearchEncoderDecoderOutput, HammingDiversityLogitsProcessor, InfNanRemoveLogitsProcessor, LogitsProcessorList, MaxLengthCriteria, MinLengthLogitsProcessor, NoBadWordsLogitsProcessor, NoRepeatNGramLogitsProcessor, PhrasalConstraint, RepetitionPenaltyLogitsProcessor, SampleDecoderOnlyOutput, SampleEncoderDecoderOutput, StoppingCriteria, StoppingCriteriaList, TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper, ) class GenerationTesterMixin: model_tester = None all_generative_model_classes = () input_name = "input_ids" def _get_input_ids_and_config(self, batch_size=2): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict[self.input_name] # cut to half length & take max batch_size 3 sequence_length = input_ids.shape[-1] // 2 input_ids = input_ids[:batch_size, :sequence_length] # generate max 3 tokens max_length = input_ids.shape[-1] + 3 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` if isinstance(config.eos_token_id, int): config.eos_token_id = [config.eos_token_id] config.pad_token_id = config.eos_token_id[0] attention_mask = torch.ones_like(input_ids, dtype=torch.long)[:batch_size, :sequence_length] return config, input_ids, attention_mask, max_length @staticmethod def _get_logits_processor_and_kwargs( input_length, eos_token_id, forced_bos_token_id=None, forced_eos_token_id=None, max_length=None, diversity_penalty=None, ): process_kwargs = { "min_length": input_length + 1 if max_length is None else max_length - 1, "bad_words_ids": [[1, 0]], "repetition_penalty": 1.2, "remove_invalid_values": True, } # NoRepeatNGramLogitsProcessor + forced tokens may result in no valid continuations if forced_bos_token_id is None and forced_eos_token_id is None: process_kwargs["no_repeat_ngram_size"] = 2 # NOTE: the order of operations here should match `generate` for accurate testing logits_processor = LogitsProcessorList( ( [ HammingDiversityLogitsProcessor(diversity_penalty, num_beams=2, num_beam_groups=2), ] if diversity_penalty is not None else [] ) + ( [ MinLengthLogitsProcessor(process_kwargs["min_length"], eos_token_id), ] if eos_token_id is not None else [] ) + ( [ ForcedBOSTokenLogitsProcessor(forced_bos_token_id), ] if forced_bos_token_id is not None else [] ) + ( [ForcedEOSTokenLogitsProcessor(max_length, forced_eos_token_id)] if forced_eos_token_id is not None else [] ) + [NoBadWordsLogitsProcessor(process_kwargs["bad_words_ids"], eos_token_id)] + ( [NoRepeatNGramLogitsProcessor(process_kwargs["no_repeat_ngram_size"])] if forced_bos_token_id is None and forced_eos_token_id is None else [] ) + [RepetitionPenaltyLogitsProcessor(process_kwargs["repetition_penalty"])] + [InfNanRemoveLogitsProcessor()] # prevent flaky generation test failures ) return process_kwargs, logits_processor @staticmethod def _get_warper_and_kwargs(num_beams): warp_kwargs = {"top_k": 10, "top_p": 0.7, "temperature": 0.7} logits_warper = LogitsProcessorList( [ TemperatureLogitsWarper(warp_kwargs["temperature"]), TopKLogitsWarper(top_k=warp_kwargs["top_k"], min_tokens_to_keep=(2 if num_beams > 1 else 1)), TopPLogitsWarper(top_p=warp_kwargs["top_p"], min_tokens_to_keep=(2 if num_beams > 1 else 1)), ] ) return warp_kwargs, logits_warper @staticmethod def _get_beam_scorer_and_kwargs(batch_size, max_length, num_return_sequences=1): beam_kwargs = { "early_stopping": False, "length_penalty": 2.0, "num_beams": 2, "num_return_sequences": num_return_sequences, } beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=beam_kwargs["num_beams"], device=torch_device, length_penalty=beam_kwargs["length_penalty"], do_early_stopping=beam_kwargs["early_stopping"], num_beam_hyps_to_keep=num_return_sequences, ) return beam_kwargs, beam_scorer @staticmethod def _get_diverse_beam_scorer_and_kwargs(batch_size, max_length, num_return_sequences=1): beam_kwargs = { "early_stopping": False, "length_penalty": 2.0, "num_beams": 2, "num_return_sequences": num_return_sequences, "num_beam_groups": 2, # one beam per group "diversity_penalty": 2.0, } beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=beam_kwargs["num_beams"], device=torch_device, length_penalty=beam_kwargs["length_penalty"], do_early_stopping=beam_kwargs["early_stopping"], num_beam_hyps_to_keep=num_return_sequences, num_beam_groups=beam_kwargs["num_beam_groups"], ) return beam_kwargs, beam_scorer @staticmethod def _get_constrained_beam_scorer_and_kwargs(batch_size, max_length, constraints, num_return_sequences=1): beam_kwargs = { "early_stopping": False, "length_penalty": 2.0, "num_beams": num_return_sequences * 4, "num_return_sequences": num_return_sequences, } beam_scorer = ConstrainedBeamSearchScorer( batch_size=batch_size, constraints=constraints, num_beams=beam_kwargs["num_beams"], device=torch_device, length_penalty=beam_kwargs["length_penalty"], do_early_stopping=beam_kwargs["early_stopping"], num_beam_hyps_to_keep=num_return_sequences, ) return beam_kwargs, beam_scorer @staticmethod def _get_encoder_outputs( model, input_ids, attention_mask, output_attentions=None, output_hidden_states=None, num_interleave=1 ): encoder = model.get_encoder() encoder_outputs = encoder( input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave( num_interleave, dim=0 ) input_ids = torch.zeros_like(input_ids[:, :1]) + model._get_decoder_start_token_id() attention_mask = None return encoder_outputs, input_ids, attention_mask def _greedy_generate( self, model, input_ids, attention_mask, max_length, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], eos_token_id=model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) kwargs = {} model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, num_beams=1, max_length=max_length, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, **logits_process_kwargs, **model_kwargs, ) if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_greedy = model.greedy_search( input_ids, max_length=max_length, logits_processor=logits_processor, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_greedy, output_generate def _sample_generate( self, model, input_ids, attention_mask, max_length, num_return_sequences, logits_processor, logits_warper, logits_warper_kwargs, process_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): torch.manual_seed(0) model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=True, num_beams=1, max_length=max_length, num_return_sequences=num_return_sequences, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **logits_warper_kwargs, **process_kwargs, **model_kwargs, ) torch.manual_seed(0) kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=num_return_sequences, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(num_return_sequences, dim=0) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_sample = model.sample( input_ids.repeat_interleave(num_return_sequences, dim=0), max_length=max_length, logits_processor=logits_processor, logits_warper=logits_warper, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_sample, output_generate def _beam_search_generate( self, model, input_ids, attention_mask, max_length, beam_scorer, beam_kwargs, logits_processor, logits_process_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, max_length=max_length, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **beam_kwargs, **logits_process_kwargs, **model_kwargs, ) # beam_search does not automatically interleave `batch_size` dim for `num_beams` kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=beam_scorer.num_beams, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(beam_scorer.num_beams, dim=0) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_beam_search = model.beam_search( input_ids.repeat_interleave(beam_scorer.num_beams, dim=0), beam_scorer, max_length=max_length, logits_processor=logits_processor, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_generate, output_beam_search def _beam_sample_generate( self, model, input_ids, attention_mask, max_length, beam_scorer, beam_kwargs, logits_warper, logits_warper_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): torch.manual_seed(0) model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=True, max_length=max_length, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **beam_kwargs, **logits_warper_kwargs, **model_kwargs, ) # beam_search does not automatically interleave `batch_size` dim for `num_beams` torch.manual_seed(0) kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=beam_scorer.num_beams, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(beam_scorer.num_beams, dim=0) # prevent flaky generation test failures logits_processor = LogitsProcessorList() logits_processor.append(InfNanRemoveLogitsProcessor()) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_beam_sample = model.beam_sample( input_ids.repeat_interleave(beam_scorer.num_beams, dim=0), beam_scorer, max_length=max_length, logits_warper=logits_warper, logits_processor=logits_processor, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_generate, output_beam_sample def _group_beam_search_generate( self, model, input_ids, attention_mask, max_length, beam_scorer, beam_kwargs, logits_processor, logits_process_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, max_length=max_length, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **beam_kwargs, **logits_process_kwargs, **model_kwargs, ) # group_beam_search does not automatically interleave `batch_size` dim for `num_beams` kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=beam_scorer.num_beams, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(beam_scorer.num_beams, dim=0) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_group_beam_search = model.group_beam_search( input_ids.repeat_interleave(beam_scorer.num_beams, dim=0), beam_scorer, max_length=max_length, logits_processor=logits_processor, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_generate, output_group_beam_search def _constrained_beam_search_generate( self, model, input_ids, attention_mask, max_length, constrained_beam_scorer, constraints, beam_kwargs, logits_processor, logits_process_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, max_length=max_length, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, constraints=constraints, **beam_kwargs, **logits_process_kwargs, **model_kwargs, ) # group_beam_search does not automatically interleave `batch_size` dim for `num_beams` kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=constrained_beam_scorer.num_beams, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(constrained_beam_scorer.num_beams, dim=0) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_group_beam_search = model.constrained_beam_search( input_ids.repeat_interleave(constrained_beam_scorer.num_beams, dim=0), constrained_beam_scorer, max_length=max_length, logits_processor=logits_processor, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_generate, output_group_beam_search def _contrastive_generate( self, model, input_ids, attention_mask, max_length, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): contrastive_search_kwargs = { "penalty_alpha": 0.6, "top_k": 5, } if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], eos_token_id=model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) kwargs = {} model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, num_beams=1, max_length=max_length, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, **logits_process_kwargs, **model_kwargs, **contrastive_search_kwargs, ) if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)]) output_contrastive = model.contrastive_search( input_ids, stopping_criteria=stopping_criteria, logits_processor=logits_processor, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, **contrastive_search_kwargs, ) return output_contrastive, output_generate def test_greedy_generate(self): # check `generate()` and `greedy_search()` are equal for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # test old generation output for backwards compatibility model = model_class(config).to(torch_device).eval() output_greedy, output_generate = self._greedy_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length ) self.assertListEqual(output_greedy.tolist(), output_generate.tolist()) def test_greedy_generate_dict_outputs(self): for model_class in self.all_generative_model_classes: # disable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() config.use_cache = False model = model_class(config).to(torch_device).eval() output_greedy, output_generate = self._greedy_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_greedy, GenerateEncoderDecoderOutput) self.assertIsInstance(output_generate, GenerateEncoderDecoderOutput) # Retrocompatibility check self.assertIsInstance(output_greedy, GreedySearchEncoderDecoderOutput) self.assertIsInstance(output_generate, GreedySearchEncoderDecoderOutput) else: self.assertIsInstance(output_greedy, GenerateDecoderOnlyOutput) self.assertIsInstance(output_generate, GenerateDecoderOnlyOutput) # Retrocompatibility check self.assertIsInstance(output_greedy, GreedySearchDecoderOnlyOutput) self.assertIsInstance(output_generate, GreedySearchDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_greedy.sequences.tolist()) for output in (output_greedy, output_generate): self._check_outputs(output, input_ids, model.config) def test_greedy_generate_dict_outputs_use_cache(self): for model_class in self.all_generative_model_classes: # enable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() if not hasattr(config, "use_cache"): self.skipTest("This model doesn't support caching") config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() output_greedy, output_generate = self._greedy_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertListEqual(output_generate.sequences.tolist(), output_greedy.sequences.tolist()) for output in (output_greedy, output_generate): self._check_outputs(output, input_ids, model.config, use_cache=True) def test_sample_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=2) # check `generate()` and `sample()` are equal output_sample, output_generate = self._sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=1, logits_processor=logits_processor, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, process_kwargs=process_kwargs, ) self.assertListEqual(output_sample.tolist(), output_generate.tolist()) # check `generate()` and `sample()` yield equal results for `num_return_sequences` output_sample, output_generate = self._sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=3, logits_processor=logits_processor, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, process_kwargs=process_kwargs, ) self.assertListEqual(output_sample.tolist(), output_generate.tolist()) def test_sample_generate_dict_output(self): for model_class in self.all_generative_model_classes: # disable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() config.use_cache = False model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1) output_sample, output_generate = self._sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=2, logits_processor=logits_processor, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, process_kwargs=process_kwargs, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_sample, GenerateEncoderDecoderOutput) self.assertIsInstance(output_generate, GenerateEncoderDecoderOutput) # Retrocompatibility check self.assertIsInstance(output_sample, SampleEncoderDecoderOutput) self.assertIsInstance(output_generate, SampleEncoderDecoderOutput) else: self.assertIsInstance(output_sample, GenerateDecoderOnlyOutput) self.assertIsInstance(output_generate, GenerateDecoderOnlyOutput) # Retrocompatibility check self.assertIsInstance(output_sample, SampleDecoderOnlyOutput) self.assertIsInstance(output_generate, SampleDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_sample.sequences.tolist()) for output in (output_sample, output_generate): self._check_outputs(output, input_ids, model.config, num_return_sequences=2) def test_beam_search_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length) # check `generate()` and `beam_search()` are equal output_generate, output_beam_search = self._beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_process_kwargs=logits_process_kwargs, logits_processor=logits_processor, ) self.assertListEqual(output_generate.tolist(), output_beam_search.tolist()) if model.config.is_encoder_decoder: max_length = 4 beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length) output_generate, output_beam_search = self._beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_process_kwargs=logits_process_kwargs, logits_processor=logits_processor, ) self.assertListEqual(output_generate.tolist(), output_beam_search.tolist()) def test_beam_search_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # disable cache config.use_cache = False # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length) output_generate, output_beam_search = self._beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_process_kwargs=logits_process_kwargs, logits_processor=logits_processor, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_beam_search, GenerateBeamEncoderDecoderOutput) self.assertIsInstance(output_generate, GenerateBeamEncoderDecoderOutput) # Retrocompatibility check self.assertIsInstance(output_beam_search, BeamSearchEncoderDecoderOutput) self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput) else: self.assertIsInstance(output_beam_search, GenerateBeamDecoderOnlyOutput) self.assertIsInstance(output_generate, GenerateBeamDecoderOnlyOutput) # Retrocompatibility check self.assertIsInstance(output_beam_search, BeamSearchDecoderOnlyOutput) self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_beam_search.sequences.tolist()) self.assertTrue( torch.allclose(output_generate["sequences_scores"], output_beam_search["sequences_scores"], atol=1e-3) ) self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],)) self.assertTrue((output_generate["sequences_scores"] < 0).all().item()) for output in (output_beam_search, output_generate): self._check_outputs(output, input_ids, model.config, num_return_sequences=beam_scorer.num_beams) def test_beam_search_generate_dict_outputs_use_cache(self): for model_class in self.all_generative_model_classes: # enable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None if not hasattr(config, "use_cache"): self.skipTest("This model doesn't support caching") model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length) config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() output_beam, output_generate = self._beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_process_kwargs=logits_process_kwargs, logits_processor=logits_processor, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertListEqual(output_generate.sequences.tolist(), output_beam.sequences.tolist()) for output in (output_beam, output_generate): self._check_outputs( output, input_ids, model.config, use_cache=True, num_return_sequences=beam_scorer.num_beams ) @require_accelerate @require_torch_multi_accelerator def test_model_parallel_beam_search(self): for model_class in self.all_generative_model_classes: if model_class._no_split_modules is None: continue config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() model = model_class(config).eval() with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir) new_model = model_class.from_pretrained(tmp_dir, device_map="auto") new_model.generate( input_ids, attention_mask=attention_mask, max_length=max_length, num_beams=2, ) def test_beam_sample_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1) model = model_class(config).to(torch_device).eval() # check `generate()` and `beam_search()` are equal if model.config.is_encoder_decoder: max_length = 4 beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length) output_generate, output_beam_sample = self._beam_sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, ) self.assertListEqual(output_generate.tolist(), output_beam_sample.tolist()) def test_beam_sample_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # disable cache config.use_cache = False # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1) if model.config.is_encoder_decoder: max_length = 4 beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length) output_beam_sample, output_generate = self._beam_sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_beam_sample, GenerateBeamEncoderDecoderOutput) self.assertIsInstance(output_generate, GenerateBeamEncoderDecoderOutput) # Retrocompatibility check self.assertIsInstance(output_beam_sample, BeamSampleEncoderDecoderOutput) self.assertIsInstance(output_generate, BeamSampleEncoderDecoderOutput) else: self.assertIsInstance(output_beam_sample, GenerateBeamDecoderOnlyOutput) self.assertIsInstance(output_generate, GenerateBeamDecoderOnlyOutput) # Retrocompatibility check self.assertIsInstance(output_beam_sample, BeamSampleDecoderOnlyOutput) self.assertIsInstance(output_generate, BeamSampleDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_beam_sample.sequences.tolist()) self.assertTrue( torch.allclose(output_generate["sequences_scores"], output_beam_sample["sequences_scores"], atol=1e-3) ) self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],)) self.assertTrue((output_generate["sequences_scores"] < 0).all().item()) for output in (output_beam_sample, output_generate): self._check_outputs(output, input_ids, model.config, num_return_sequences=beam_scorer.num_beams) def test_generate_without_input_ids(self): config, _, _, max_length = self._get_input_ids_and_config() # if no bos token id => cannot generate from None if config.bos_token_id is None: return for model_class in self.all_generative_model_classes: model = model_class(config).to(torch_device) model.eval() output_ids_generate = model.generate(do_sample=False, max_length=max_length, remove_invalid_values=True) self.assertIsNotNone(output_ids_generate) def test_group_beam_search_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, diversity_penalty=2.0, ) # check `generate()` and `group_beam_search()` are equal beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs(input_ids.shape[0], max_length) output_generate, output_group_beam_search = self._group_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, ) self.assertListEqual(output_generate.tolist(), output_group_beam_search.tolist()) # check `generate()` and `group_beam_search()` are equal for `num_return_sequences` num_return_sequences = 2 if model.config.is_encoder_decoder: max_length = 4 beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs( input_ids.shape[0], max_length, num_return_sequences=num_return_sequences ) output_generate, output_group_beam_search = self._group_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, ) self.assertListEqual(output_generate.tolist(), output_group_beam_search.tolist()) def test_group_beam_search_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() config.use_cache = False # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, diversity_penalty=2.0, ) num_return_sequences = 1 beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs( input_ids.shape[0], max_length, num_return_sequences=num_return_sequences ) output_generate, output_group_beam_search = self._group_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_group_beam_search, GenerateBeamEncoderDecoderOutput) self.assertIsInstance(output_generate, GenerateBeamEncoderDecoderOutput) # Retrocompatibility check self.assertIsInstance(output_group_beam_search, BeamSearchEncoderDecoderOutput) self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput) else: self.assertIsInstance(output_group_beam_search, GenerateBeamDecoderOnlyOutput) self.assertIsInstance(output_generate, GenerateBeamDecoderOnlyOutput) # Retrocompatibility check self.assertIsInstance(output_group_beam_search, BeamSearchDecoderOnlyOutput) self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_group_beam_search.sequences.tolist()) self.assertTrue( torch.allclose( output_generate["sequences_scores"], output_group_beam_search["sequences_scores"], atol=1e-3 ) ) self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],)) self.assertTrue((output_generate["sequences_scores"] < 0).all().item()) for output in (output_group_beam_search, output_generate): self._check_outputs( output, input_ids, model.config, num_return_sequences=num_return_sequences * beam_scorer.num_beams ) def test_constrained_beam_search_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() max_length = 20 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) # check `generate()` and `constrained_beam_search()` are equal # Sample constraints min_id = 3 max_id = config.vocab_size force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0] constraints = [ PhrasalConstraint(force_tokens), ] beam_kwargs, beam_scorer = self._get_constrained_beam_scorer_and_kwargs( input_ids.shape[0], max_length, constraints, num_return_sequences=1 ) output_generate, output_beam_search = self._constrained_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, constrained_beam_scorer=beam_scorer, constraints=constraints, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, ) self.assertListEqual(output_generate.tolist(), output_beam_search.tolist()) for generation_output in output_generate: self._check_sequence_inside_sequence(force_tokens, generation_output) # check `generate()` and `constrained_beam_search()` are equal for `num_return_sequences` # Sample constraints force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0] constraints = [ PhrasalConstraint(force_tokens), ] num_return_sequences = 2 max_length = 20 beam_kwargs, beam_scorer = self._get_constrained_beam_scorer_and_kwargs( input_ids.shape[0], max_length, constraints, num_return_sequences=num_return_sequences ) output_generate, output_beam_search = self._constrained_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, constrained_beam_scorer=beam_scorer, constraints=constraints, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, ) self.assertListEqual(output_generate.tolist(), output_beam_search.tolist()) for generation_output in output_generate: self._check_sequence_inside_sequence(force_tokens, generation_output) def test_constrained_beam_search_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # disable cache config.use_cache = False # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 20 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) # Sample constraints min_id = 3 max_id = model.config.vocab_size force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0] constraints = [ PhrasalConstraint(force_tokens), ] beam_kwargs, beam_scorer = self._get_constrained_beam_scorer_and_kwargs( input_ids.shape[0], max_length, constraints, num_return_sequences=1 ) output_generate, output_beam_search = self._constrained_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, constrained_beam_scorer=beam_scorer, constraints=constraints, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_beam_search, GenerateBeamEncoderDecoderOutput) self.assertIsInstance(output_generate, GenerateBeamEncoderDecoderOutput) # Retrocompatibility check self.assertIsInstance(output_beam_search, BeamSearchEncoderDecoderOutput) self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput) else: self.assertIsInstance(output_beam_search, GenerateBeamDecoderOnlyOutput) self.assertIsInstance(output_generate, GenerateBeamDecoderOnlyOutput) # Retrocompatibility check self.assertIsInstance(output_beam_search, BeamSearchDecoderOnlyOutput) self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_beam_search.sequences.tolist()) self.assertTrue( torch.allclose(output_generate["sequences_scores"], output_beam_search["sequences_scores"], atol=1e-3) ) self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],)) self.assertTrue((output_generate["sequences_scores"] < 0).all().item()) for output in (output_beam_search, output_generate): self._check_outputs(output, input_ids, model.config, num_return_sequences=beam_scorer.num_beams) def test_contrastive_generate(self): # check `generate()` and `contrastive_search()` are equal for model_class in self.all_generative_model_classes: # won't fix: FSMT and Reformer have a different cache variable type (and format). if any(model_name in model_class.__name__.lower() for model_name in ["fsmt", "reformer"]): self.skipTest("Won't fix: old model with different cache format") config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # NOTE: contrastive search only works with cache on at the moment. if not hasattr(config, "use_cache"): self.skipTest("This model doesn't support caching") config.use_cache = True config.is_decoder = True # test old generation output for backwards compatibility model = model_class(config).to(torch_device).eval() output_contrastive, output_generate = self._contrastive_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length ) self.assertListEqual(output_contrastive.tolist(), output_generate.tolist()) def test_contrastive_generate_dict_outputs_use_cache(self): for model_class in self.all_generative_model_classes: # won't fix: FSMT and Reformer have a different cache variable type (and format). if any(model_name in model_class.__name__.lower() for model_name in ["fsmt", "reformer"]): self.skipTest("Won't fix: old model with different cache format") # enable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # NOTE: contrastive search only works with cache on at the moment. if not hasattr(config, "use_cache"): self.skipTest("This model doesn't support caching") config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() output_contrastive, output_generate = self._contrastive_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertListEqual(output_generate.sequences.tolist(), output_contrastive.sequences.tolist()) for output in (output_contrastive, output_generate): self._check_outputs(output, input_ids, model.config, use_cache=True) def test_contrastive_generate_low_memory(self): # Check that choosing 'low_memory' does not change the model output for model_class in self.all_generative_model_classes: if any(model_name in model_class.__name__.lower() for model_name in ["fsmt", "reformer", "speech2text"]): self.skipTest("Won't fix: old model with different cache format") if any(model_name in model_class.__name__.lower() for model_name in ["gptbigcode"]): self.skipTest("TODO: fix me") config, input_ids, attention_mask, max_length = self._get_input_ids_and_config(batch_size=1) # NOTE: contrastive search only works with cache on at the moment. if not hasattr(config, "use_cache"): self.skipTest("This model doesn't support caching") config.use_cache = True config.is_decoder = True # test output equality of low versus high memory model = model_class(config).to(torch_device).eval() low_output = model.generate( input_ids, top_k=4, penalty_alpha=0.6, low_memory=True, max_length=max_length, attention_mask=attention_mask, ) high_output = model.generate( input_ids, top_k=4, penalty_alpha=0.6, low_memory=False, max_length=max_length, attention_mask=attention_mask, ) self.assertListEqual(low_output.tolist(), high_output.tolist()) @is_flaky() # Read NOTE (1) below. If there are API issues, all attempts will fail. def test_assisted_decoding_matches_greedy_search(self): # This test ensures that the assisted generation does not introduce output changes over greedy search. # NOTE (1): The sentence above is true most of the time, there is a tiny difference in the logits due to matmul # shape differences -- and it may result in a different output. The input shape difference happens in the # main model, that runs the forward pass with several candidates at once (as opposed to generating one token at # a time). See https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535 for more info. # NOTE (2): It breaks the pattern in the tests above, for multiple reasons: # - assisted_decoding, contrarily to the other methods, can't be called on its own (e.g. needs to # prepare the assistant encoder outputs in the main generate body); # - assisted_decoding does not support `use_cache = False` # - assisted_decoding does not support `batch_size > 1` for model_class in self.all_generative_model_classes: if any(model_name in model_class.__name__.lower() for model_name in ["fsmt", "reformer"]): self.skipTest("Won't fix: old model with different cache format") if any( model_name in model_class.__name__.lower() for model_name in [ "bigbirdpegasus", "led", "mega", "speech2text", "git", "prophetnet", "seamlessm4t", "clvp", ] ): self.skipTest("May fix in the future: need model-specific fixes") # enable cache config, input_ids, attention_mask, _ = self._get_input_ids_and_config(batch_size=1) # NOTE: assisted generation only works with cache on at the moment. if not hasattr(config, "use_cache"): self.skipTest("This model doesn't support caching") config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() # Sets assisted generation arguments such that: # a) no EOS is generated, to ensure generation doesn't break early # b) the assistant model always generates two tokens when it is called, to ensure the input preparation of # the assistant model is correct # c) there are at least two forward passes in the main model, to ensure the input preparation of # the main model is correct generation_kwargs = { "eos_token_id": -1, # see a) "max_new_tokens": 4, # see c) "num_beams": 1, "do_sample": False, "output_scores": True, "output_hidden_states": True, "output_attentions": True, "return_dict_in_generate": True, } output_greedy = model.generate(input_ids, attention_mask=attention_mask, **generation_kwargs) assistant_model = model assistant_model.generation_config.num_assistant_tokens = 2 # see b) assistant_model.generation_config.num_assistant_tokens_schedule = "constant" # see b) generation_kwargs.update({"assistant_model": assistant_model}) output_assisted = model.generate(input_ids, attention_mask=attention_mask, **generation_kwargs) # The two outputs must match and their shape must be as expected self.assertListEqual(output_greedy.sequences.tolist(), output_assisted.sequences.tolist()) for output in (output_greedy, output_assisted): self._check_outputs(output, input_ids, model.config, use_cache=True) @is_flaky() def test_prompt_lookup_decoding_matches_greedy_search(self): # This test ensures that the prompt lookup generation does not introduce output changes over greedy search. # This test is mostly a copy of test_assisted_decoding_matches_greedy_search for model_class in self.all_generative_model_classes: if any(model_name in model_class.__name__.lower() for model_name in ["fsmt", "reformer"]): self.skipTest("Won't fix: old model with different cache format") if any( model_name in model_class.__name__.lower() for model_name in [ "bigbirdpegasus", "led", "mega", "speech2text", "git", "prophetnet", "seamlessm4t", "clvp", ] ): self.skipTest("May fix in the future: need model-specific fixes") # enable cache config, input_ids, attention_mask, _ = self._get_input_ids_and_config(batch_size=1) # NOTE: assisted generation only works with cache on at the moment. if not hasattr(config, "use_cache"): self.skipTest("This model doesn't support caching") config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() # Sets assisted generation arguments such that: # a) no EOS is generated, to ensure generation doesn't break early # b) the prompt lookup tries to give the model 2 tokens, to ensure the input preparation of # prompt lookup is correct # c) there are at least two forward passes in the main model, to ensure the input preparation of # the main model is correct generation_kwargs = { "eos_token_id": -1, # see a) "max_new_tokens": 4, # see c) "num_beams": 1, "do_sample": False, "output_scores": True, "output_hidden_states": True, "output_attentions": True, "return_dict_in_generate": True, } output_greedy = model.generate(input_ids, attention_mask=attention_mask, **generation_kwargs) generation_kwargs.update({"prompt_lookup_num_tokens": 2}) # see b) output_prompt_lookup = model.generate(input_ids, attention_mask=attention_mask, **generation_kwargs) # The two outputs must match and their shape must be as expected self.assertListEqual(output_greedy.sequences.tolist(), output_prompt_lookup.sequences.tolist()) for output in (output_greedy, output_prompt_lookup): self._check_outputs(output, input_ids, model.config, use_cache=True) def test_assisted_decoding_sample(self): # In this test we don't check assisted vs non-assisted output -- seeded assisted decoding with sample will not # match sample for the same seed, as the forward pass does not return the exact same logits (due to matmul with # different shapes, see https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535). for model_class in self.all_generative_model_classes: if any(model_name in model_class.__name__.lower() for model_name in ["fsmt", "reformer"]): self.skipTest("Won't fix: old model with different cache format") if any( model_name in model_class.__name__.lower() for model_name in [ "bigbirdpegasus", "led", "mega", "speech2text", "git", "prophetnet", "seamlessm4t", "clvp", ] ): self.skipTest("May fix in the future: need model-specific fixes") # enable cache config, input_ids, attention_mask, _ = self._get_input_ids_and_config(batch_size=1) # NOTE: assisted generation only works with cache on at the moment. if not hasattr(config, "use_cache"): self.skipTest("This model doesn't support caching") config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() # Sets assisted generation arguments such that: # a) no EOS is generated, to ensure generation doesn't break early # b) the assistant model always generates two tokens when it is called, to ensure the input preparation of # the assistant model is correct # c) there are at least two forward passes in the main model, to ensure the input preparation of # the main model is correct assistant_model = model assistant_model.generation_config.num_assistant_tokens = 2 # see b) assistant_model.generation_config.num_assistant_tokens_schedule = "constant" # see b) generation_kwargs = { "eos_token_id": -1, # see a) "max_new_tokens": 4, # see c) "num_beams": 1, "do_sample": True, "assistant_model": assistant_model, "output_scores": True, "output_hidden_states": True, "output_attentions": True, "return_dict_in_generate": True, } output_assisted = model.generate(input_ids, attention_mask=attention_mask, **generation_kwargs) self._check_outputs(output_assisted, input_ids, model.config, use_cache=True) def test_generate_with_head_masking(self): """Test designed for encoder-decoder models to ensure the attention head masking is used.""" attention_names = ["encoder_attentions", "decoder_attentions", "cross_attentions"] for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # We want to test only encoder-decoder models if not config.is_encoder_decoder: continue model = model_class(config).to(torch_device) head_masking = { "head_mask": torch.zeros(config.encoder_layers, config.encoder_attention_heads, device=torch_device), "decoder_head_mask": torch.zeros( config.decoder_layers, config.decoder_attention_heads, device=torch_device ), "cross_attn_head_mask": torch.zeros( config.decoder_layers, config.decoder_attention_heads, device=torch_device ), } signature = inspect.signature(model.forward) # We want to test only models where encoder/decoder head masking is implemented if not set(head_masking.keys()) < {*signature.parameters.keys()}: continue for attn_name, (name, mask) in zip(attention_names, head_masking.items()): out = model.generate( input_ids, attention_mask=attention_mask, num_beams=1, output_attentions=True, return_dict_in_generate=True, remove_invalid_values=True, **{name: mask}, ) # We check the state of decoder_attentions and cross_attentions just from the last step attn_weights = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights]), 0.0) def test_left_padding_compatibility(self): # The check done in this test is fairly difficult -- depending on the model architecture, passing the right # position index for the position embeddings can still result in a different output, due to numerical masking. # On the other hand, for some types of position embeddings, an incorrect position index can have a minimal # impact on the output. # There are two tricks employed to check whether left-padding compatibility is in place: # 1 - To reduce the negative impact of the numerical attention mask on a correct position index, we set the # padding size to 1. # 2 - To reduce the chance of false positives (i.e. passing when it should be failing), we run the check # multiple times with random inputs, and it has to pass with all of them. # NOTE: because of 2), there is some chance of false positives in this test. for model_class in self.all_generative_model_classes: config, _, _, _ = self._get_input_ids_and_config() if config.is_encoder_decoder: continue # skip for encoder-decoder models -- they don't need left-padding compatibility model = model_class(config).to(torch_device).eval() signature = inspect.signature(model.forward).parameters.keys() no_failures = True for _ in range(10): # there may be false positives with 10 runs, we rely on the CI to catch the flakiness _, input_ids, attention_mask, _ = self._get_input_ids_and_config() model_kwargs = {"input_ids": input_ids, "attention_mask": attention_mask} if "position_ids" in signature: position_ids = torch.cumsum(attention_mask, dim=-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) model_kwargs["position_ids"] = position_ids next_logits_wo_padding = model(**model_kwargs).logits[:, -1, :] pad_size = (input_ids.shape[0], 1) padding = torch.ones(pad_size, dtype=input_ids.dtype, device=torch_device) * config.pad_token_id padded_input_ids = torch.cat((padding, input_ids), dim=1) padded_attention_mask = torch.cat((torch.zeros_like(padding), attention_mask), dim=1) model_kwargs = {"input_ids": padded_input_ids, "attention_mask": padded_attention_mask} if "position_ids" in signature: position_ids = torch.cumsum(padded_attention_mask, dim=-1) - 1 position_ids.masked_fill_(padded_attention_mask == 0, 1) model_kwargs["position_ids"] = position_ids next_logits_with_padding = model(**model_kwargs).logits[:, -1, :] if not torch.allclose(next_logits_wo_padding, next_logits_with_padding, atol=1e-7): no_failures = False break self.assertTrue(no_failures) def test_past_key_values_format(self): # Test that the KV cache is formatted correctly. Exceptions need to explicitly overwrite this test. Having a # standard KV cache format is important for a consistent API (and for advanced generation methods). for model_class in self.all_generative_model_classes: config, inputs = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(config, "use_cache"): self.skipTest("This model doesn't support caching") model = model_class(config).to(torch_device) if "use_cache" not in inputs: inputs["use_cache"] = True outputs = model(**inputs) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: self.skipTest("This model doesn't return `past_key_values`") num_hidden_layers = ( getattr(config, "decoder_layers", None) or getattr(config, "num_decoder_layers", None) or config.num_hidden_layers ) num_attention_heads = getattr(config, "decoder_attention_heads", config.num_attention_heads) embed_dim = getattr(config, "d_model", config.hidden_size) per_head_embed_dim = embed_dim // num_attention_heads past_kv = outputs["past_key_values"] self.assertEqual(len(past_kv), num_hidden_layers) # Encoder-Decoder checks if config.is_encoder_decoder: encoder_num_attention_heads = config.encoder_attention_heads encoder_per_head_embed_dim = embed_dim // encoder_num_attention_heads batch_size, seq_length = inputs["decoder_input_ids"].shape for i in range(num_hidden_layers): self.assertEqual(len(past_kv[i]), 4) # K V for the decoder + K V for the encoder = 4 self.assertEqual( past_kv[i][0].shape, (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape, (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) # The sequence length for the encoder K V depends on the model. Since it is not manipulated in # autoregressive generation, I'm keeping the test general and not checking the 3rd dim self.assertEqual( (past_kv[i][2].shape[0], past_kv[i][2].shape[1], past_kv[i][2].shape[3]), (batch_size, encoder_num_attention_heads, encoder_per_head_embed_dim), ) self.assertEqual( (past_kv[i][3].shape[0], past_kv[i][3].shape[1], past_kv[i][3].shape[3]), (batch_size, encoder_num_attention_heads, encoder_per_head_embed_dim), ) # Decoder-only checks else: # TODO: this line is only needed because of imagegpt, where "pixel_values" = "input_ids". Fix the # tests in imagegpt such that `prepare_config_and_inputs_for_common` returns the later (and the other # tests use it) key = "input_ids" if "input_ids" in inputs else "pixel_values" batch_size, seq_length = inputs[key].shape for i in range(num_hidden_layers): self.assertEqual(len(past_kv[0]), 2) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape, (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape, (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) def test_generate_from_inputs_embeds_decoder_only(self): # When supported, tests that the decoder model can generate from `inputs_embeds` instead of `input_ids` # if fails, you should probably update the `prepare_inputs_for_generation` function for model_class in self.all_generative_model_classes: config, input_ids, _, _ = self._get_input_ids_and_config() # Ignore: # a) eos (to always output 20 tokens) and pad (so we don't try to infer the attn mask from the input_ids, # which would cause a mismatch), config.pad_token_id = config.eos_token_id = -1 # b) embedding scaling, the scaling factor applied after embeding from input_ids (requires knowledge of the # variable that holds the scaling factor, which is model-dependent) if hasattr(config, "scale_embedding"): config.scale_embedding = False # This test is for decoder-only models (encoder-decoder models have native input embeddings support in the # decoder) if config.is_encoder_decoder: continue # Skip models without explicit support model = model_class(config).to(torch_device).eval() if "inputs_embeds" not in inspect.signature(model.prepare_inputs_for_generation).parameters.keys(): continue # Traditional way of generating text outputs_from_ids = model.generate(input_ids) self.assertEqual(outputs_from_ids.shape, (2, 20)) # Same thing, but from input embeddings (`input_ids` is passed so the prompt is present in the output) inputs_embeds = model.get_input_embeddings()(input_ids) outputs_from_embeds = model.generate(input_ids, inputs_embeds=inputs_embeds) self.assertListEqual(outputs_from_ids.tolist(), outputs_from_embeds.tolist()) # But if we pass different inputs_embeds, we should get different outputs torch.manual_seed(0) random_embeds = torch.rand_like(inputs_embeds) outputs_from_rand_embeds = model.generate(input_ids, inputs_embeds=random_embeds) with self.assertRaises(AssertionError): self.assertListEqual(outputs_from_rand_embeds.tolist(), outputs_from_embeds.tolist()) # input_ids is not a required input -- if we don't pass it, the newly generated tokens will be the same outputs_from_embeds_wo_ids = model.generate( inputs_embeds=inputs_embeds, max_new_tokens=20 - inputs_embeds.shape[1] ) self.assertListEqual( outputs_from_embeds[:, inputs_embeds.shape[1] :].tolist(), outputs_from_embeds_wo_ids[:, 1:].tolist(), ) def test_generate_continue_from_past_key_values(self): # Tests that we can continue generating from past key values, returned from a previous `generate` call for model_class in self.all_generative_model_classes: if any(model_name in model_class.__name__.lower() for model_name in ["imagegpt"]): self.skipTest("Won't fix: old model with unique inputs/caches/other") if any(model_name in model_class.__name__.lower() for model_name in ["umt5"]): self.skipTest("TODO: needs modeling or test input preparation fixes for compatibility") config, inputs = self.model_tester.prepare_config_and_inputs_for_common() if not hasattr(config, "use_cache"): self.skipTest("This model doesn't support caching") # Let's make it always: # 1. use cache (for obvious reasons) # 2. generate to max length (which can be achieved by setting the eos token to an invalid value), which # would make the test flaky (e.g. EOS is generated on iteration 1 on both generations, but the # continuation would force it to generate beyond an EOS token) # 3. ignore `token_type_ids` for simplicity # 4. ignore `forced_eos_token_id`, which requires further manipulation of the continuation inputs and is # active by default on some models config.use_cache = True if "token_type_ids" in inputs: del inputs["token_type_ids"] model = model_class(config).to(torch_device) model.eval() model.generation_config.pad_token_id = model.generation_config.eos_token_id = -1 model.generation_config.forced_eos_token_id = None # If "past_key_values" is not returned, skip the test (e.g. RWKV uses a different cache name and format) outputs = model(**inputs) if "past_key_values" not in outputs: self.skipTest("This model doesn't return `past_key_values`") # Traditional way of generating text, with `return_dict_in_generate` to return the past key values outputs = model.generate(**inputs, do_sample=False, max_new_tokens=4, return_dict_in_generate=True) # Let's generate again, but passing the past key values in between (3 + 1 = 4 tokens). Note that the # inputs may need to be tweaked across `generate` calls (like the attention mask). outputs_cached = model.generate(**inputs, do_sample=False, max_new_tokens=3, return_dict_in_generate=True) # Continue from the tokens generated above, preparing the inputs accordingly inputs["past_key_values"] = outputs_cached.past_key_values new_attention_len = outputs_cached.sequences.shape[-1] if config.is_encoder_decoder: inputs["decoder_input_ids"] = outputs_cached.sequences if "decoder_attention_mask" in inputs: inputs["decoder_attention_mask"] = torch.nn.functional.pad( inputs["decoder_attention_mask"], (0, new_attention_len - inputs["decoder_attention_mask"].shape[1]), mode="constant", value=1, ) else: inputs["input_ids"] = outputs_cached.sequences if "attention_mask" in inputs: inputs["attention_mask"] = torch.nn.functional.pad( inputs["attention_mask"], (0, new_attention_len - inputs["attention_mask"].shape[1]), mode="constant", value=1, ) outputs_cached = model.generate(**inputs, do_sample=False, max_new_tokens=1, return_dict_in_generate=True) # The two sets of generated text and past kv should be equal to each other self.assertListEqual(outputs.sequences.tolist(), outputs_cached.sequences.tolist()) for layer_idx in range(len(outputs_cached.past_key_values)): for kv_idx in range(len(outputs_cached.past_key_values[layer_idx])): self.assertTrue( torch.allclose( outputs.past_key_values[layer_idx][kv_idx], outputs_cached.past_key_values[layer_idx][kv_idx], ) ) @parameterized.expand([(1, False), (1, True), (4, False)]) def test_new_cache_format(self, num_beams, do_sample): # Tests that generating with the new format is exactly the same as the legacy one (for models that support it). # 👉 tests with and without beam search so that we can test with and without cache reordering. # 👉 tests with and without sampling so we can cover the most common use cases. for model_class in self.all_generative_model_classes: if not model_class._supports_cache_class: self.skipTest("This model does not support the new cache format") config, input_ids, attention_mask, _ = self._get_input_ids_and_config() config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() generation_kwargs = { "max_new_tokens": 5, "do_sample": do_sample, "num_beams": num_beams, "num_return_sequences": num_beams, "return_dict_in_generate": True, # Required to return `past_key_values` } # Sets seed before calling `generate` for the case with do_sample=True seed = torch.randint(0, 1000000, (1,)).item() set_seed(seed) legacy_results = model.generate(input_ids, attention_mask=attention_mask, **generation_kwargs) set_seed(seed) new_results = model.generate( input_ids, attention_mask=attention_mask, past_key_values=DynamicCache(), **generation_kwargs ) # The two sets of generated sequences must match, despite the cache format between forward passes being # different self.assertListEqual(legacy_results.sequences.tolist(), new_results.sequences.tolist()) self.assertTrue(isinstance(legacy_results.past_key_values, tuple)) self.assertTrue(isinstance(new_results.past_key_values, DynamicCache)) # The contents of the two caches, when converted to the same format (in both directions!), must match legacy_cache = legacy_results.past_key_values new_cache_converted = new_results.past_key_values.to_legacy_cache() for layer_idx in range(len(legacy_cache)): for kv_idx in range(len(legacy_cache[layer_idx])): self.assertTrue( torch.allclose( legacy_cache[layer_idx][kv_idx], new_cache_converted[layer_idx][kv_idx], ) ) new_cache = new_results.past_key_values legacy_cache_converted = DynamicCache.from_legacy_cache(legacy_results.past_key_values) for layer_idx in range(len(new_cache)): for kv_idx in range(len(new_cache[layer_idx])): self.assertTrue( torch.allclose( new_cache[layer_idx][kv_idx], legacy_cache_converted[layer_idx][kv_idx], ) ) def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_sequences=1): batch_size, seq_length = input_ids.shape num_sequences_in_output = batch_size * num_return_sequences gen_len = ( output.sequences.shape[-1] - 1 if config.is_encoder_decoder else output.sequences.shape[-1] - seq_length ) # scores self._check_scores(num_sequences_in_output, output.scores, length=gen_len, config=config) # Attentions if config.is_encoder_decoder: # encoder self._check_encoder_attention_for_generate(output.encoder_attentions, batch_size, config, seq_length) # decoder self._check_attentions_for_generate( num_sequences_in_output, output.decoder_attentions, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) else: # if use_cache first input is equal to no use_cache, so skip here attentions = output.attentions if not use_cache else output.attentions[1:] min_length = seq_length if not use_cache else seq_length + 1 self._check_attentions_for_generate( num_sequences_in_output, attentions=attentions, min_length=min_length, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) # Hidden States if config.is_encoder_decoder: # encoder self._check_encoder_hidden_states_for_generate( output.encoder_hidden_states, batch_size, config, seq_length ) # decoder self._check_hidden_states_for_generate( num_sequences_in_output, output.decoder_hidden_states, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) else: # if use_cache first input is equal to no use_cache, so skip here hidden_states = output.hidden_states if not use_cache else output.hidden_states[1:] min_length = seq_length if not use_cache else seq_length + 1 self._check_hidden_states_for_generate( num_sequences_in_output, hidden_states, min_length=min_length, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) # Past Key Value States -- two notes here: # 1. Its inner sequence length is with respect to the inputs of the latest forward pass, hence the "-1" # 2. Some old models still return `output.past_key_values` even without `use_cache=True` # 3. TODO (joao): A few models have different formats, skipping those until the cache refactor is complete models_without_standard_cache = ("bloom", "ctrl", "fsmt", "gptbigcode", "mega", "reformer") has_standard_cache = not any( model_name in config.__class__.__name__.lower() for model_name in models_without_standard_cache ) if use_cache and has_standard_cache: past_key_values = output.past_key_values past_sequence_length = output.sequences.shape[-1] - 1 self._check_past_key_values_for_generate( num_sequences_in_output, past_key_values, seq_length=past_sequence_length, config=config, ) def _check_scores(self, batch_size, scores, length, config): expected_shape = (batch_size, config.vocab_size) self.assertIsInstance(scores, tuple) self.assertEqual(len(scores), length) self.assertListEqual([iter_scores.shape for iter_scores in scores], [expected_shape] * len(scores)) def _check_attentions_for_generate( self, batch_size, attentions, min_length, max_length, config, use_cache=False, num_beam_groups=1 ): self.assertIsInstance(attentions, tuple) self.assertListEqual( [isinstance(iter_attentions, tuple) for iter_attentions in attentions], [True] * len(attentions) ) self.assertEqual(len(attentions), (max_length - min_length) * num_beam_groups) for idx, iter_attentions in enumerate(attentions): tgt_len = min_length + idx if not use_cache else 1 src_len = min_length + idx expected_shape = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(iter_attentions) ) def _check_encoder_attention_for_generate(self, attentions, batch_size, config, seq_length): encoder_expected_shape = (batch_size, config.num_attention_heads, seq_length, seq_length) self.assertIsInstance(attentions, tuple) self.assertListEqual( [layer_attentions.shape for layer_attentions in attentions], [encoder_expected_shape] * len(attentions), ) def _check_hidden_states_for_generate( self, batch_size, hidden_states, min_length, max_length, config, use_cache=False, num_beam_groups=1 ): self.assertIsInstance(hidden_states, tuple) self.assertListEqual( [isinstance(iter_hidden_states, tuple) for iter_hidden_states in hidden_states], [True] * len(hidden_states), ) self.assertEqual(len(hidden_states), (max_length - min_length) * num_beam_groups) for idx, iter_hidden_states in enumerate(hidden_states): seq_len = min_length + idx if not use_cache else 1 expected_shape = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(iter_hidden_states), ) def _check_encoder_hidden_states_for_generate(self, hidden_states, batch_size, config, seq_length): encoder_expected_shape = (batch_size, seq_length, config.hidden_size) self.assertIsInstance(hidden_states, tuple) self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in hidden_states], [encoder_expected_shape] * len(hidden_states), ) def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config, num_beam_groups=1): self.assertIsInstance(past_key_values, tuple) self.assertListEqual( [isinstance(iter_past_key_values, tuple) for iter_past_key_values in past_key_values], [True] * len(past_key_values), ) # (batch, head, seq_length, head_features) expected_shape = ( batch_size * num_beam_groups, config.num_key_value_heads if hasattr(config, "num_key_value_heads") else config.num_attention_heads, seq_length, config.hidden_size // config.num_attention_heads, ) # check shape key, value self.assertListEqual( [layer_past_key_values[0].shape for layer_past_key_values in past_key_values], [expected_shape] * len(past_key_values), ) self.assertListEqual( [layer_past_key_values[1].shape for layer_past_key_values in past_key_values], [expected_shape] * len(past_key_values), ) def _check_sequence_inside_sequence(self, tensor_1, tensor_2): # check if tensor_1 inside tensor_2 or tensor_2 inside tensor_1. # set to same device. we don't care what device. if not isinstance(tensor_1, list): tensor_1 = tensor_1.cpu().tolist() if not isinstance(tensor_2, list): tensor_2 = tensor_2.cpu().tolist() in_order = len(tensor_1) <= len(tensor_2) longer = tensor_2 if in_order else tensor_1 shorter = tensor_1 if in_order else tensor_2 flag = False chunk_size = len(shorter) for chunk_idx in range(len(longer) - chunk_size + 1): subseq = longer[chunk_idx : chunk_idx + chunk_size] if subseq == shorter: flag = True break self.assertTrue(flag) @require_torch class UtilsFunctionsTest(unittest.TestCase): # tests whether the top_k_top_p function behaves as expected def test_top_k_top_p_filtering(self): logits = torch.tensor( [ [ 8.2220991, # 3rd highest value; idx. 0 -0.5620044, 5.23229752, 4.0386393, -6.8798378, -0.54785802, -3.2012153, 2.92777176, 1.88171953, 7.35341276, 8.43207833, # 2nd highest value; idx. 10 -9.85711836, -5.96209236, -1.13039161, -7.1115294, -0.8369633, -5.3186408, 7.06427407, 0.81369344, -0.82023817, -5.9179796, 0.58813443, -6.99778438, 4.71551189, -0.18771637, 7.44020759, # 4th highest value; idx. 25 9.38450987, # 1st highest value; idx. 26 2.12662941, -9.32562038, 2.35652522, ], # cummulative prob of 4 highest values <= 0.6 [ 0.58425518, 4.53139238, -5.57510464, -6.28030699, -7.19529503, -4.02122551, 1.39337037, -6.06707057, 1.59480517, -9.643119, 0.03907799, 0.67231762, -8.88206726, 6.27115922, # 4th highest value; idx. 13 2.28520723, 4.82767506, 4.30421368, 8.8275313, # 2nd highest value; idx. 17 5.44029958, -4.4735794, 7.38579536, # 3rd highest value; idx. 20 -2.91051663, 2.61946077, -2.5674762, -9.48959302, -4.02922645, -1.35416918, 9.67702323, # 1st highest value; idx. 27 -5.89478553, 1.85370467, ], # cummulative prob of 4 highest values <= 0.6 ], dtype=torch.float, device=torch_device, ) non_inf_expected_idx = torch.tensor( [[0, 0], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 20], [1, 27]], dtype=torch.long, device=torch_device, ) # expected non filtered idx as noted above non_inf_expected_output = torch.tensor( [ 8.2221, 8.4321, 7.4402, 9.3845, 6.2712, 8.8275, 7.3858, 9.6770, ], # expected non filtered values as noted above dtype=torch.float, device=torch_device, ) output = top_k_top_p_filtering(logits, top_k=10, top_p=0.6, min_tokens_to_keep=4) non_inf_output = output[output != -float("inf")].to(device=torch_device) non_inf_idx = (output != -float("inf")).nonzero().to(device=torch_device) self.assertTrue(torch.allclose(non_inf_expected_output, non_inf_output, atol=1e-12)) self.assertTrue(torch.all(torch.eq(non_inf_expected_idx, non_inf_idx))) # tests whether the function uses filter_value instead of default -inf def test_top_k_top_p_filtering_with_filter_value(self): logits = torch.tensor( [ [ 1, 1, 1, 0.99, # get filtered by top-p filtering 0.98, # get filtered by top-k filtering ] ], dtype=torch.float, device=torch_device, ) expected_output = torch.tensor( [[1, 1, 1, 0, 0]], dtype=torch.float, device=torch_device, ) output = top_k_top_p_filtering(logits, top_k=4, top_p=0.5, filter_value=0.0) self.assertTrue(torch.allclose(expected_output, output, atol=1e-12)) @require_torch class GenerationIntegrationTests(unittest.TestCase, GenerationIntegrationTestsMixin): # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_torch_available(): framework_dependent_parameters = { "AutoModelForCausalLM": AutoModelForCausalLM, "AutoModelForSpeechSeq2Seq": AutoModelForSpeechSeq2Seq, "AutoModelForSeq2SeqLM": AutoModelForSeq2SeqLM, "AutoModelForVision2Seq": AutoModelForVision2Seq, "LogitsProcessorList": LogitsProcessorList, "MinLengthLogitsProcessor": MinLengthLogitsProcessor, "create_tensor_fn": torch.tensor, "floats_tensor": floats_tensor, "return_tensors": "pt", } @slow def test_diverse_beam_search(self): # PT-only test: TF doesn't have a diverse beam search implementation article = """Justin Timberlake and Jessica Biel, welcome to parenthood. The celebrity couple announced the arrival of their son, Silas Randall Timberlake, in statements to People. "Silas was the middle name of Timberlake's maternal grandfather Bill Bomar, who died in 2012, while Randall is the musician's own middle name, as well as his father's first," People reports. The couple announced the pregnancy in January, with an Instagram post. It is the first baby for both.""" bart_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") bart_model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn").to(torch_device) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) outputs = bart_model.generate( input_ids, num_beams=4, num_return_sequences=2, num_beam_groups=4, diversity_penalty=2.0, remove_invalid_values=True, ) generated_text = bart_tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The couple announced the birth of their son, Silas Randall Timberlake, in a statement. Silas was the" " middle name of Timberlake's maternal grandfather Bill Bomar. Randall is the musician's own middle" " name, as well as his father's first. It is the first baby for both of them.", "Justin Timberlake and Jessica Biel have a son. The baby is named Silas Randall Timberlake. It is the" " first child for both. The couple announced the pregnancy in January. The name Silas is the middle" " name of Timberlake's maternal grandfather. It's also his own middle name.", ], ) def test_max_length_backward_compat_greedy(self): # PT-only test: TF doesn't have StoppingCriteria article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) max_length = 20 input_ids = input_ids.expand(2, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids, model_kwargs = bart_model._prepare_decoder_input_ids_for_generation( batch_size=input_ids.shape[0], model_input_name=bart_model.main_input_name, model_kwargs=model_kwargs, decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) with self.assertWarns(UserWarning): bart_model.greedy_search( input_ids, max_length=max_length, pad_token_id=bart_model.config.pad_token_id, eos_token_id=bart_model.config.eos_token_id, **model_kwargs, ) def test_max_length_backward_compat_sample(self): # PT-only test: TF doesn't have StoppingCriteria article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) max_length = 20 input_ids = input_ids.expand(2, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids, model_kwargs = bart_model._prepare_decoder_input_ids_for_generation( batch_size=input_ids.shape[0], model_input_name=bart_model.main_input_name, model_kwargs=model_kwargs, decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) with torch.no_grad(): with self.assertWarns(UserWarning): bart_model.sample( input_ids, max_length=max_length, pad_token_id=bart_model.config.pad_token_id, eos_token_id=bart_model.config.eos_token_id, **model_kwargs, ) def test_max_length_backward_compat_beam_search(self): # PT-only test: TF doesn't have StoppingCriteria article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) batch_size = 1 max_length = 20 num_beams = 2 input_ids = input_ids.expand(2, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids, model_kwargs = bart_model._prepare_decoder_input_ids_for_generation( batch_size=input_ids.shape[0], model_input_name=bart_model.main_input_name, model_kwargs=model_kwargs, decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=torch_device, ) with self.assertWarns(UserWarning): _ = bart_model.beam_search( input_ids, num_beams=num_beams, max_length=max_length, beam_scorer=beam_scorer, **model_kwargs ) def test_max_length_backward_compat_group_beam_search(self): # PT-only test: TF doesn't have StoppingCriteria & group beam search article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) batch_size = 1 max_length = 20 num_beams = 6 num_beam_groups = 3 num_return_sequences = num_beams * batch_size input_ids = input_ids.expand(6, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids, model_kwargs = bart_model._prepare_decoder_input_ids_for_generation( batch_size=input_ids.shape[0], model_input_name=bart_model.main_input_name, model_kwargs=model_kwargs, decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) diverse_beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=torch_device, num_beam_hyps_to_keep=num_return_sequences, num_beam_groups=num_beam_groups, ) with self.assertWarns(UserWarning): bart_model.group_beam_search( input_ids, diverse_beam_scorer, num_beams=num_beams, max_length=max_length, **model_kwargs ) def test_max_length_warning_if_different(self): # PT-only test: TF doesn't have StoppingCriteria article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) batch_size = 1 max_length = 20 num_beams = 6 num_beam_groups = 3 num_return_sequences = num_beams * batch_size stopping_criteria_max_length = 18 stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=stopping_criteria_max_length)]) # Greedy input_ids = input_ids.expand(6, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids, model_kwargs = bart_model._prepare_decoder_input_ids_for_generation( batch_size=input_ids.shape[0], model_input_name=bart_model.main_input_name, model_kwargs=model_kwargs, decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) with self.assertWarns(UserWarning): bart_model.greedy_search( input_ids, max_length=max_length, pad_token_id=bart_model.config.pad_token_id, stopping_criteria=stopping_criteria, eos_token_id=bart_model.config.eos_token_id, **model_kwargs, ) # Sample with self.assertWarns(UserWarning): with torch.no_grad(): bart_model.sample( input_ids, max_length=max_length, stopping_criteria=stopping_criteria, pad_token_id=bart_model.config.pad_token_id, eos_token_id=bart_model.config.eos_token_id, **model_kwargs, ) # Beam beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=torch_device, ) with self.assertWarns(UserWarning): with torch.no_grad(): bart_model.beam_search( input_ids, num_beams=num_beams, stopping_criteria=stopping_criteria, max_length=max_length, beam_scorer=beam_scorer, **model_kwargs, ) # Grouped beam search diverse_beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=torch_device, num_beam_hyps_to_keep=num_return_sequences, num_beam_groups=num_beam_groups, ) with self.assertWarns(UserWarning): bart_model.group_beam_search( input_ids, diverse_beam_scorer, stopping_criteria=stopping_criteria, num_beams=num_beams, max_length=max_length, **model_kwargs, ) def test_custom_stopping_criteria_overload_error(self): # PT-only test: TF doesn't have StoppingCriteria article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random") bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) stopping_criteria = StoppingCriteriaList() stopping_criteria.append(MaxLengthCriteria(max_length=42)) with self.assertRaises(ValueError): bart_model.generate(input_ids, stopping_criteria=stopping_criteria) with self.assertRaises(ValueError): bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=32) def test_custom_stopping_criteria(self): # PT-only test: TF doesn't have StoppingCriteria article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random") bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) class DummyCriteria(StoppingCriteria): def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: return input_ids.shape[-1] >= 20 stopping_criteria = StoppingCriteriaList() stopping_criteria.append(DummyCriteria()) self.assertEqual( list(bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=22).shape), [1, 20], ) self.assertEqual( list(bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=18).shape), [1, 18], ) def test_stop_sequence_stopping_criteria(self): # PT-only test: TF doesn't have StoppingCriteria prompt = """Hello I believe in""" generator = pipeline("text-generation", model="hf-internal-testing/tiny-random-bart") output = generator(prompt) self.assertEqual( output, [ { "generated_text": ( "Hello I believe in in in number number number number number number number number number" ) } ], ) output = generator(prompt, stop_sequence=" number") self.assertEqual(output, [{"generated_text": "Hello I believe in in in number"}]) def test_generate_non_nlp_input_ids_as_kwarg(self): # PT-only test: AFAIK there's no non-NLP model architecture in TF that supports `input_ids` as its only input model = ImageGPTForCausalImageModeling.from_pretrained( "hf-internal-testing/tiny-random-imagegpt", max_length=10 ).to(torch_device) input_ids = ids_tensor((3, 5), vocab_size=10) output_sequences_kwargs = model.generate(input_ids=input_ids).cpu() output_sequences = model.generate(input_ids).cpu() self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist()) self.assertEqual(output_sequences.shape, (3, 10)) def test_generate_input_values_as_encoder_kwarg(self): # PT-only test: AFAIK there's no generate-capable architecture in TF that supports `input_values` as its input input_values = floats_tensor((2, 250)) model = SpeechEncoderDecoderModel.from_pretrained("hf-internal-testing/tiny-random-speech-encoder-decoder") model = model.to(torch_device) output_sequences_kwargs = model.generate(input_values=input_values, max_length=5).cpu() output_sequences = model.generate(input_values, max_length=5).cpu() self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist()) self.assertEqual(output_sequences.shape, (2, 5)) def test_transition_scores_group_beam_search_encoder_decoder(self): # PT-only test: TF doesn't have group beam search articles = [ "Justin Timberlake and Jessica Biel, welcome to parenthood.", "Michael Phelps is arguably the most decorated Olympian of all time.", ] tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = BartForConditionalGeneration.from_pretrained( "hf-internal-testing/tiny-random-bart", max_length=10, num_beams=2, num_beam_groups=2, num_return_sequences=2, diversity_penalty=1.0, eos_token_id=None, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) model = model.to(torch_device) input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device) outputs = model.generate(input_ids=input_ids) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, outputs.beam_indices) transition_scores_sum = transition_scores.sum(-1) self.assertTrue(torch.allclose(transition_scores_sum, outputs.sequences_scores, atol=1e-3)) @slow def test_beam_search_example_integration(self): # PT-only test: TF doesn't have a BeamSearchScorer # exactly the example provided in the docstrings of beam search, which previously # failed after directly copying from it. Refer to PR #15555 tokenizer = AutoTokenizer.from_pretrained("t5-base") model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") encoder_input_str = "translate English to German: How old are you?" encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids # lets run beam search using 3 beams num_beams = 3 # define decoder start token ids input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) input_ids = input_ids * model.config.decoder_start_token_id # add encoder_outputs to model keyword arguments model_kwargs = { "encoder_outputs": model.get_encoder()( encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ) } # instantiate beam scorer beam_scorer = BeamSearchScorer( batch_size=1, num_beams=num_beams, device=model.device, ) # instantiate logits processors logits_processor = LogitsProcessorList( [ MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ] ) outputs = model.beam_search(input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs) outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual(outputs, ["Wie alt bist du?"]) @slow def test_constrained_beam_search(self): # PT-only test: TF doesn't have constrained beam search model = GPT2LMHeadModel.from_pretrained("gpt2").to(torch_device) tokenizer = GPT2Tokenizer.from_pretrained("gpt2") force_tokens = tokenizer("scared", add_prefix_space=True, add_special_tokens=False).input_ids force_tokens_2 = tokenizer("big weapons", add_prefix_space=True, add_special_tokens=False).input_ids constraints = [ PhrasalConstraint(force_tokens), PhrasalConstraint(force_tokens_2), ] starting_text = ["The soldiers were not prepared and"] input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device) outputs = model.generate( input_ids, constraints=constraints, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, max_length=30, remove_invalid_values=True, ) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The soldiers were not prepared and didn't know what to do. They had no idea how they would react if" " the enemy attacked them, big weapons scared" ], ) @slow def test_constrained_beam_search_mixed(self): # PT-only test: TF doesn't have constrained beam search model = GPT2LMHeadModel.from_pretrained("gpt2").to(torch_device) tokenizer = GPT2Tokenizer.from_pretrained("gpt2") force_phrase = tokenizer("scared", add_prefix_space=True, add_special_tokens=False).input_ids flexible_phrases = tokenizer( ["scream", "screams", "screaming", "screamed"], add_prefix_space=True, add_special_tokens=False ).input_ids constraints = [ PhrasalConstraint(force_phrase), DisjunctiveConstraint(flexible_phrases), ] starting_text = ["The soldiers", "The child"] input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device) outputs = model.generate( input_ids, constraints=constraints, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, # max_length=20, remove_invalid_values=True, ) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The soldiers, who had been stationed at the base for more than a year before being evacuated" " screaming scared", "The child was taken to a local hospital where he died.\n 'I don't think screaming scared", ], ) @slow def test_constrained_beam_search_mixed_mixin(self): # PT-only test: TF doesn't have constrained beam search model = GPT2LMHeadModel.from_pretrained("gpt2").to(torch_device) tokenizer = GPT2Tokenizer.from_pretrained("gpt2") force_word = "scared" force_flexible = ["scream", "screams", "screaming", "screamed"] force_words_ids = [ tokenizer([force_word], add_prefix_space=True, add_special_tokens=False).input_ids, tokenizer(force_flexible, add_prefix_space=True, add_special_tokens=False).input_ids, ] starting_text = ["The soldiers", "The child"] input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device) outputs = model.generate( input_ids, force_words_ids=force_words_ids, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, remove_invalid_values=True, ) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The soldiers, who had been stationed at the base for more than a year before being evacuated" " screaming scared", "The child was taken to a local hospital where he died.\n 'I don't think screaming scared", ], ) @slow def test_cfg_mixin(self): model = GPT2LMHeadModel.from_pretrained("gpt2").to(torch_device) tokenizer = GPT2Tokenizer.from_pretrained("gpt2") input = tokenizer(["The dragon flew over Paris,"], return_tensors="pt", return_attention_mask=True) input["input_ids"] = input["input_ids"].to(torch_device) input["attention_mask"] = input["attention_mask"].to(torch_device) outputs = model.generate(**input, max_new_tokens=32, guidance_scale=1.5) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The dragon flew over Paris, landing in the Rue de la Bastille. The crowd was so excited " 'that they had to leave the city.\n\n"We\'re going to Paris!"\n' ], ) neg = tokenizer(["France,"], return_tensors="pt", return_attention_mask=True) neg["input_ids"] = neg["input_ids"].to(torch_device) neg["attention_mask"] = neg["attention_mask"].to(torch_device) outputs = model.generate( **input, max_new_tokens=32, guidance_scale=1.5, negative_prompt_ids=neg["input_ids"], negative_prompt_attention_mask=neg["attention_mask"], ) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ 'The dragon flew over Paris, landing on the pavement.\n\n"Paris!"\n\n"Paris!"\n\n"' 'Paris!"\n\n"Paris!"\n\n"Paris!"\n\n' ], ) @slow def test_constrained_beam_search_example_translation_mixin(self): # PT-only test: TF doesn't have constrained beam search tokenizer = AutoTokenizer.from_pretrained("t5-base") model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") encoder_input_str = "translate English to German: How old are you?" force_words = ["sind"] input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids force_words_ids = tokenizer(force_words, add_special_tokens=False).input_ids outputs = model.generate( input_ids, force_words_ids=force_words_ids, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, remove_invalid_values=True, ) outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual(outputs, ["Wie alt sind Sie?"]) @slow def test_constrained_beam_search_example_integration(self): # PT-only test: TF doesn't have constrained beam search tokenizer = AutoTokenizer.from_pretrained("t5-base") model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") encoder_input_str = "translate English to German: How old are you?" encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids # lets run beam search using 5 beams num_beams = 5 # define decoder start token ids input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) input_ids = input_ids * model.config.decoder_start_token_id # add encoder_outputs to model keyword arguments model_kwargs = { "encoder_outputs": model.get_encoder()( encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ) } constraint_str = "sind" constraint_token_ids = tokenizer.encode(constraint_str)[:-1] # remove eos token constraints = [PhrasalConstraint(token_ids=constraint_token_ids)] # instantiate beam scorer beam_scorer = ConstrainedBeamSearchScorer( batch_size=1, num_beams=num_beams, device=model.device, constraints=constraints ) # instantiate logits processors logits_processor = LogitsProcessorList( [ MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ] ) outputs = model.constrained_beam_search( input_ids, beam_scorer, constraints=constraints, logits_processor=logits_processor, **model_kwargs ) outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual(outputs, ["Wie alt sind Sie?"]) def test_constrained_beam_search_mixin_type_checks(self): # PT-only test: TF doesn't have constrained beam search tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/t5-tiny-random") model = AutoModelForSeq2SeqLM.from_pretrained("patrickvonplaten/t5-tiny-random") encoder_input_str = "translate English to German: How old are you?" input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids with self.assertRaises(ValueError): force_words = ["sind"] force_words_ids = tokenizer(force_words, return_tensors="pt").input_ids model.generate( input_ids, force_words_ids=force_words_ids, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, remove_invalid_values=True, ) with self.assertRaises(ValueError): force_words = ["sind"] force_words_ids = [tokenizer(force_words, return_tensors="pt").input_ids] model.generate( input_ids, force_words_ids=force_words_ids, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, remove_invalid_values=True, ) with self.assertRaises(ValueError): model.generate(input_ids, force_words_ids=[]) with self.assertRaises(ValueError): model.generate(input_ids, force_words_ids=[[-1]]) with self.assertRaises(ValueError): model.generate(input_ids, force_words_ids=[[[-1]]]) def test_contrastive_search_batched(self): # PT-only test: TF doesn't have constrained beam search # Tests that contrastive search works with batched inputs (i.e. has the same output as for non-batched inputs) articles = ["Foo", "Bar Baz"] tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(torch_device) model.config.eos_token_id = None input_ids_batched = tokenizer(articles, padding=True, return_tensors="pt").input_ids.to(torch_device) input_ids = tokenizer(articles[1], return_tensors="pt").input_ids.to(torch_device) output_sequences_batched = model.generate( input_ids=input_ids_batched, penalty_alpha=0.6, top_k=4, return_dict_in_generate=True, output_scores=True ) output_sequences = model.generate( input_ids=input_ids, penalty_alpha=0.6, top_k=4, return_dict_in_generate=True, output_scores=True ) batched_out = tokenizer.decode(output_sequences_batched.sequences[1], skip_special_tokens=True) out = tokenizer.decode(output_sequences.sequences[0], skip_special_tokens=True) self.assertEqual(batched_out, out) # output_sequences_batched.scores[0][1] -> 1st set of logits, 2nd sequence max_score_diff = (output_sequences_batched.scores[0][1] - output_sequences.scores[0][0]).abs().max() self.assertTrue(max_score_diff < 1e-5) def test_eos_token_id_int_and_list_top_k_top_sampling(self): # Has TF equivalent: this test relies on random sampling generation_kwargs = { "do_sample": True, "num_beams": 1, "top_p": 0.7, "top_k": 10, "temperature": 0.7, } expectation = 20 tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") text = """Hello, my dog is cute and""" tokens = tokenizer(text, return_tensors="pt").to(torch_device) model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) # Only some seeds will work both on CPU/GPU for a fixed `expectation` value. # The selected seed is not guaranteed to work on all torch versions. torch.manual_seed(1) eos_token_id = 846 generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) torch.manual_seed(1) eos_token_id = [846, 198] generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) def test_model_kwarg_encoder_signature_filtering(self): # Has TF equivalent: ample use of framework-specific code bart_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") article = """Hugging Face is a technology company based in New York and Paris.""" input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) output = bart_model.generate(input_ids).cpu().numpy() # Let's create a fake model that has a different signature. In particular, this fake model accepts "foo" as an # argument. Because "foo" is not in the encoder signature and doesn't start with "decoder_", it will be part of # the encoder kwargs prior to signature filtering, which would lead to an exception. But filtering kicks in and # saves the day. class FakeBart(BartForConditionalGeneration): def forward(self, input_ids, foo=None, **kwargs): return super().forward(input_ids, **kwargs) bart_model = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart").to(torch_device) fake_output = bart_model.generate(input_ids, foo="bar").cpu().numpy() self.assertTrue(np.array_equal(output, fake_output)) # Encoder signature filtering only kicks in if it doesn't accept wildcard kwargs. The following test will fail # because it doesn't do signature filtering. class FakeEncoder(bart_model.model.encoder.__class__): def forward(self, input_ids, **kwargs): return super().forward(input_ids, **kwargs) fake_encoder = FakeEncoder(bart_model.config, bart_model.model.shared).to(torch_device) bart_model.model.encoder = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) fake_output = bart_model.generate(input_ids).cpu().numpy() with self.assertRaises(TypeError): # FakeEncoder.forward() accepts **kwargs -> no filtering -> type error due to unexpected input "foo" bart_model.generate(input_ids, foo="bar") def test_default_max_length_warning(self): model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model.config.pad_token_id = tokenizer.eos_token_id text = "Hello world" tokenized_inputs = tokenizer([text], return_tensors="pt") input_ids = tokenized_inputs.input_ids.to(torch_device) # Default generation config value of 20 -> emits warning with self.assertWarns(UserWarning): model.generate(input_ids) # Explicitly setting max_length to 20 -> no warning with warnings.catch_warnings(record=True) as warning_list: model.generate(input_ids, max_length=20) self.assertEqual(len(warning_list), 0) # Generation config max_length != 20 -> no warning with warnings.catch_warnings(record=True) as warning_list: # generation_config is modified -> legacy mode is disabled = generation_config takes precedence model.generation_config.max_length = 10 model.generate(input_ids) self.assertEqual(len(warning_list), 0) def test_model_kwarg_assisted_decoding_decoder_only(self): # PT-only test: TF doesn't support assisted decoding yet. model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model.config.pad_token_id = tokenizer.eos_token_id text = "Hello world" tokenized_inputs = tokenizer([text], return_tensors="pt") input_ids = tokenized_inputs.input_ids.to(torch_device) # Traditional way of generating text outputs_normal = model.generate(input_ids) self.assertEqual(outputs_normal.shape, (1, 20)) # Should be different with token_type_ids outputs_tti = model.generate( input_ids, token_type_ids=torch.zeros(input_ids.shape, dtype=torch.long).to(torch_device), ) with self.assertRaises(AssertionError): self.assertListEqual(outputs_tti.tolist(), outputs_normal.tolist()) # Assistant model assistant = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) assistant.config.pad_token_id = tokenizer.eos_token_id # If assisted generation passes model_kwargs correctly, should be same as previous outputs_assisted = model.generate( input_ids, token_type_ids=torch.zeros(input_ids.shape, dtype=torch.long).to(torch_device), assistant_model=assistant, ) self.assertListEqual(outputs_assisted.tolist(), outputs_tti.tolist()) def test_model_kwarg_assisted_decoding_encoder_decoder(self): """ Tests that the following scenario is compatible with assisted generation: 1. encoder-decoder main model 2. encoder-decoder assistant model 3. both have a custom input (e.g. Whisper) """ # PT-only test: TF doesn't support assisted decoding yet. # Bart subclass with a kwarg that distorts the output class FakeBart(BartForConditionalGeneration): def forward(self, input_ids, past_key_values, foo=False, **kwargs): outs = super().forward(input_ids, past_key_values=past_key_values, **kwargs) if foo: outs["logits"][:, :, :] = 0.0 return outs def prepare_inputs_for_generation(self, *args, foo=False, encoder_outputs=None, **kwargs): kwargs["encoder_outputs"] = encoder_outputs inputs = super().prepare_inputs_for_generation(*args, **kwargs) inputs["foo"] = foo return inputs model = FakeBart.from_pretrained("hf-internal-testing/tiny-random-BartForConditionalGeneration").to( torch_device ) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-BartForConditionalGeneration") text = "Hello world" tokenized_inputs = tokenizer([text], return_tensors="pt") input_ids = tokenized_inputs.input_ids.to(torch_device) # Traditional way of generating text outputs_normal = model.generate(input_ids) self.assertEqual(outputs_normal.shape, (1, 20)) # Should be different with foo outputs_foo = model.generate(input_ids, foo=True) with self.assertRaises(AssertionError): self.assertListEqual(outputs_foo.tolist(), outputs_normal.tolist()) # Assistant model assistant = FakeBart.from_pretrained("hf-internal-testing/tiny-random-BartForConditionalGeneration").to( torch_device ) # If assisted generation passes model_kwargs correctly, should be same as previous outputs_assisted = model.generate( input_ids, foo=True, assistant_model=assistant, ) self.assertListEqual(outputs_assisted.tolist(), outputs_foo.tolist()) # Check that passing encoder_outputs directly also works as expected encoder_outputs = assistant.get_encoder()(input_ids) outputs_assisted = model.generate( foo=True, assistant_model=assistant, encoder_outputs=encoder_outputs, assistant_encoder_outputs=encoder_outputs, ) self.assertListEqual(outputs_assisted.tolist(), outputs_foo.tolist()) def test_assisted_decoding_encoder_decoder_shared_encoder(self): """ Tests that the following scenario is compatible with assisted generation: 1. encoder-decoder main model 2. decoder-only assistant model 3. both have a custom input (e.g. DistilWhisper) """ # PT-only test: TF doesn't support assisted decoding yet. # Bart subclass with a kwarg called foo that distorts the output class FakeBartSeq2Seq(BartForConditionalGeneration): def forward(self, input_ids, foo=False, **kwargs): outs = super().forward(input_ids, **kwargs) if foo: outs["logits"][:, :, :] = 0.0 return outs def prepare_inputs_for_generation(self, *args, foo=False, encoder_outputs=None, **kwargs): kwargs["encoder_outputs"] = encoder_outputs inputs = super().prepare_inputs_for_generation(*args, **kwargs) inputs["foo"] = foo return inputs class FakeBartCausalLM(BartForCausalLM): def forward(self, input_ids, attention_mask, past_key_values, foo=False, **kwargs): outs = super().forward(input_ids, attention_mask, past_key_values=past_key_values, **kwargs) if foo: outs["logits"][:, :, :] = 0.0 return outs def prepare_inputs_for_generation(self, *args, foo=False, encoder_outputs=None, **kwargs): kwargs["encoder_outputs"] = encoder_outputs inputs = super().prepare_inputs_for_generation(*args, **kwargs) inputs["foo"] = foo return inputs model = FakeBartSeq2Seq.from_pretrained("hf-internal-testing/tiny-random-BartForConditionalGeneration").to( torch_device ) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-BartForConditionalGeneration") text = "Hello world" tokenized_inputs = tokenizer([text], return_tensors="pt") input_ids = tokenized_inputs.input_ids.to(torch_device) # Traditional way of generating text outputs_normal = model.generate(input_ids) self.assertEqual(outputs_normal.shape, (1, 20)) # Should be different with foo outputs_foo = model.generate(input_ids, foo=True) with self.assertRaises(AssertionError): self.assertListEqual(outputs_foo.tolist(), outputs_normal.tolist()) # Assistant model assistant = FakeBartCausalLM.from_pretrained( "hf-internal-testing/tiny-random-BartForConditionalGeneration" ).to(torch_device) # If assisted generation passes model_kwargs correctly, should be same as previous outputs_assisted = model.generate( input_ids, foo=True, assistant_model=assistant, ) self.assertListEqual(outputs_assisted.tolist(), outputs_foo.tolist()) # Check that passing encoder_outputs directly also works as expected encoder_outputs = model.get_encoder()(input_ids) outputs_assisted = model.generate( foo=True, assistant_model=assistant, encoder_outputs=encoder_outputs, ) self.assertListEqual(outputs_assisted.tolist(), outputs_foo.tolist())
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_streamers.py
# coding=utf-8 # Copyright 2023 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class StreamerTester(unittest.TestCase): def test_text_streamer_matches_non_streaming(self): tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) model.config.eos_token_id = -1 input_ids = ids_tensor((1, 5), vocab_size=model.config.vocab_size).to(torch_device) greedy_ids = model.generate(input_ids, max_new_tokens=10, do_sample=False) greedy_text = tokenizer.decode(greedy_ids[0]) with CaptureStdout() as cs: streamer = TextStreamer(tokenizer) model.generate(input_ids, max_new_tokens=10, do_sample=False, streamer=streamer) # The greedy text should be printed to stdout, except for the final "\n" in the streamer streamer_text = cs.out[:-1] self.assertEqual(streamer_text, greedy_text) def test_iterator_streamer_matches_non_streaming(self): tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) model.config.eos_token_id = -1 input_ids = ids_tensor((1, 5), vocab_size=model.config.vocab_size).to(torch_device) greedy_ids = model.generate(input_ids, max_new_tokens=10, do_sample=False) greedy_text = tokenizer.decode(greedy_ids[0]) streamer = TextIteratorStreamer(tokenizer) generation_kwargs = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} thread = Thread(target=model.generate, kwargs=generation_kwargs) thread.start() streamer_text = "" for new_text in streamer: streamer_text += new_text self.assertEqual(streamer_text, greedy_text) def test_text_streamer_skip_prompt(self): tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) model.config.eos_token_id = -1 input_ids = ids_tensor((1, 5), vocab_size=model.config.vocab_size).to(torch_device) greedy_ids = model.generate(input_ids, max_new_tokens=10, do_sample=False) new_greedy_ids = greedy_ids[:, input_ids.shape[1] :] new_greedy_text = tokenizer.decode(new_greedy_ids[0]) with CaptureStdout() as cs: streamer = TextStreamer(tokenizer, skip_prompt=True) model.generate(input_ids, max_new_tokens=10, do_sample=False, streamer=streamer) # The greedy text should be printed to stdout, except for the final "\n" in the streamer streamer_text = cs.out[:-1] self.assertEqual(streamer_text, new_greedy_text) def test_text_streamer_decode_kwargs(self): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them tokenizer = AutoTokenizer.from_pretrained("distilgpt2") model = AutoModelForCausalLM.from_pretrained("distilgpt2").to(torch_device) model.config.eos_token_id = -1 input_ids = torch.ones((1, 5), device=torch_device).long() * model.config.bos_token_id with CaptureStdout() as cs: streamer = TextStreamer(tokenizer, skip_special_tokens=True) model.generate(input_ids, max_new_tokens=1, do_sample=False, streamer=streamer) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token streamer_text = cs.out[:-1] # Remove the final "\n" streamer_text_tokenized = tokenizer(streamer_text, return_tensors="pt") self.assertEqual(streamer_text_tokenized.input_ids.shape, (1, 1)) def test_iterator_streamer_timeout(self): tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) model.config.eos_token_id = -1 input_ids = ids_tensor((1, 5), vocab_size=model.config.vocab_size).to(torch_device) streamer = TextIteratorStreamer(tokenizer, timeout=0.001) generation_kwargs = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} thread = Thread(target=model.generate, kwargs=generation_kwargs) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(Empty): streamer_text = "" for new_text in streamer: streamer_text += new_text
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_flax_logits_process.py
# coding=utf-8 # Copyright 2021 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class LogitsProcessorTest(unittest.TestCase): def _get_uniform_logits(self, batch_size: int, length: int): scores = jnp.ones((batch_size, length)) / length return scores def test_temperature_dist_warper(self): input_ids = None length = 20 scores = self._get_uniform_logits(batch_size=2, length=length) # tweak scores to not be uniform anymore scores = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch scores = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch # compute softmax probs = jax.nn.softmax(scores, axis=-1) temp_dist_warper_sharper = FlaxTemperatureLogitsWarper(temperature=0.5) temp_dist_warper_smoother = FlaxTemperatureLogitsWarper(temperature=1.3) warped_prob_sharp = jax.nn.softmax(temp_dist_warper_sharper(input_ids, scores.copy(), cur_len=None), axis=-1) warped_prob_smooth = jax.nn.softmax(temp_dist_warper_smoother(input_ids, scores.copy(), cur_len=None), axis=-1) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :], warped_prob_sharp[0, :], atol=1e-3)) self.assertTrue(jnp.allclose(probs[0, :], warped_prob_smooth[0, :], atol=1e-3)) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max(), warped_prob_sharp[1, :].max()) self.assertGreater(probs[1, :].min(), warped_prob_sharp[1, :].min()) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max(), warped_prob_smooth[1, :].max()) self.assertLess(probs[1, :].min(), warped_prob_smooth[1, :].min()) def test_top_k_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 # create ramp distribution ramp_logits = np.broadcast_to(np.arange(vocab_size)[None, :], (batch_size, vocab_size)).copy() ramp_logits[1:, : vocab_size // 2] = ramp_logits[1:, : vocab_size // 2] + vocab_size top_k_warp = FlaxTopKLogitsWarper(3) scores = top_k_warp(input_ids, ramp_logits, cur_len=None) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0]).tolist(), 7 * [True] + 3 * [False]) self.assertListEqual(jnp.isinf(scores[1]).tolist(), 2 * [True] + 3 * [False] + 5 * [True]) # check special case length = 5 top_k_warp_safety_check = FlaxTopKLogitsWarper(top_k=1, filter_value=0.0, min_tokens_to_keep=3) ramp_logits = np.broadcast_to(np.arange(length)[None, :], (batch_size, length)).copy() scores = top_k_warp_safety_check(input_ids, ramp_logits, cur_len=None) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1).tolist(), [2, 2]) def test_top_p_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) dist = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]])) top_p_warp = FlaxTopPLogitsWarper(0.8) filtered_dist = np.exp(top_p_warp(input_ids, dist, cur_len=None)) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 EXPECTED_FILTERED_DIST = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]]) self.assertTrue(np.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3)) # check edge cases with negative and extreme logits ramp_logits = np.broadcast_to(np.arange(vocab_size)[None, :], (batch_size, vocab_size)).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme ramp_logits[1] = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept top_p_warp = FlaxTopPLogitsWarper(0.9, min_tokens_to_keep=2, filter_value=0.0) filtered_dist = top_p_warp(input_ids, ramp_logits, cur_len=None) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist(), [3, 2]) def test_min_length_dist_processor(self): vocab_size = 20 batch_size = 4 eos_token_id = 0 min_dist_processor = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id) # check that min length is applied at length 5 input_ids = ids_tensor((batch_size, 20), vocab_size=20) cur_len = 5 scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = min_dist_processor(input_ids, scores, cur_len=cur_len) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist(), 4 * [-float("inf")]) # check that min length is not applied anymore at length 15 scores = self._get_uniform_logits(batch_size, vocab_size) cur_len = 15 scores_before_min_length = min_dist_processor(input_ids, scores, cur_len=cur_len) self.assertFalse(jnp.isinf(scores_before_min_length).any()) def test_forced_bos_token_logits_processor(self): vocab_size = 20 batch_size = 4 bos_token_id = 0 logits_processor = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=bos_token_id) # check that all scores are -inf except the bos_token_id score input_ids = ids_tensor((batch_size, 1), vocab_size=20) cur_len = 1 scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len=cur_len) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all()) self.assertListEqual(scores[:, bos_token_id].tolist(), 4 * [0]) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 cur_len = 3 scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len=cur_len) self.assertFalse(jnp.isinf(scores).any()) def test_forced_eos_token_logits_processor(self): vocab_size = 20 batch_size = 4 eos_token_id = 0 max_length = 5 logits_processor = FlaxForcedEOSTokenLogitsProcessor(max_length=max_length, eos_token_id=eos_token_id) # check that all scores are -inf except the eos_token_id when max_length is reached input_ids = ids_tensor((batch_size, 4), vocab_size=20) cur_len = 4 scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len=cur_len) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all()) self.assertListEqual(scores[:, eos_token_id].tolist(), 4 * [0]) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached cur_len = 3 scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len=cur_len) self.assertFalse(jnp.isinf(scores).any()) def test_processor_list(self): batch_size = 4 sequence_length = 10 vocab_size = 15 eos_token_id = 2 bos_token_id = 1 max_length = 15 # dummy input_ids and scores input_ids = ids_tensor((batch_size, sequence_length), vocab_size) input_ids_comp = input_ids.copy() scores = self._get_uniform_logits(batch_size, vocab_size) scores_comp = scores.copy() # instantiate all dist processors temp_dist_warp = FlaxTemperatureLogitsWarper(temperature=0.5) top_k_warp = FlaxTopKLogitsWarper(3) top_p_warp = FlaxTopPLogitsWarper(0.8) # instantiate all logits processors min_dist_proc = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id) bos_dist_proc = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=bos_token_id) eos_dist_proc = FlaxForcedEOSTokenLogitsProcessor(max_length=max_length, eos_token_id=eos_token_id) cur_len = 10 # no processor list scores = temp_dist_warp(input_ids, scores, cur_len=cur_len) scores = top_k_warp(input_ids, scores, cur_len=cur_len) scores = top_p_warp(input_ids, scores, cur_len=cur_len) scores = min_dist_proc(input_ids, scores, cur_len=cur_len) scores = bos_dist_proc(input_ids, scores, cur_len=cur_len) scores = eos_dist_proc(input_ids, scores, cur_len=cur_len) # with processor list processor = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) scores_comp = processor(input_ids, scores_comp, cur_len=cur_len) # scores should be equal self.assertTrue(jnp.allclose(scores, scores_comp, atol=1e-3)) # input_ids should never be changed self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist()) def test_processor_list_jitted(self): batch_size = 4 sequence_length = 10 vocab_size = 15 eos_token_id = 2 bos_token_id = 1 max_length = 15 # dummy input_ids and scores input_ids = ids_tensor((batch_size, sequence_length), vocab_size) input_ids_comp = input_ids.copy() scores = self._get_uniform_logits(batch_size, vocab_size) scores_comp = scores.copy() # instantiate all dist processors temp_dist_warp = FlaxTemperatureLogitsWarper(temperature=0.5) top_k_warp = FlaxTopKLogitsWarper(3) top_p_warp = FlaxTopPLogitsWarper(0.8) # instantiate all logits processors min_dist_proc = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id) bos_dist_proc = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=bos_token_id) eos_dist_proc = FlaxForcedEOSTokenLogitsProcessor(max_length=max_length, eos_token_id=eos_token_id) cur_len = 10 # no processor list def run_no_processor_list(input_ids, scores, cur_len): scores = temp_dist_warp(input_ids, scores, cur_len=cur_len) scores = top_k_warp(input_ids, scores, cur_len=cur_len) scores = top_p_warp(input_ids, scores, cur_len=cur_len) scores = min_dist_proc(input_ids, scores, cur_len=cur_len) scores = bos_dist_proc(input_ids, scores, cur_len=cur_len) scores = eos_dist_proc(input_ids, scores, cur_len=cur_len) return scores # with processor list def run_processor_list(input_ids, scores, cur_len): processor = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) scores = processor(input_ids, scores, cur_len=cur_len) return scores jitted_run_no_processor_list = jax.jit(run_no_processor_list) jitted_run_processor_list = jax.jit(run_processor_list) scores = jitted_run_no_processor_list(input_ids, scores, cur_len) scores_comp = jitted_run_processor_list(input_ids, scores_comp, cur_len) # scores should be equal self.assertTrue(jnp.allclose(scores, scores_comp, atol=1e-3)) # input_ids should never be changed self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist())
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_framework_agnostic.py
""" Framework agnostic tests for generate()-related methods. """ import numpy as np from transformers import AutoTokenizer from transformers.testing_utils import slow, torch_device class GenerationIntegrationTestsMixin: # To be populated by the child classes framework_dependent_parameters = { "AutoModelForCausalLM": None, "AutoModelForSpeechSeq2Seq": None, "AutoModelForSeq2SeqLM": None, "AutoModelForVision2Seq": None, "LogitsProcessorList": None, "MinLengthLogitsProcessor": None, "create_tensor_fn": None, "floats_tensor": None, "return_tensors": None, "set_seed": None, } def test_validate_generation_inputs(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] create_tensor_fn = self.framework_dependent_parameters["create_tensor_fn"] tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") model = model_cls.from_pretrained("hf-internal-testing/tiny-random-t5") encoder_input_str = "Hello world" input_ids = tokenizer(encoder_input_str, return_tensors=return_tensors).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(ValueError, "do_samples"): model.generate(input_ids, do_samples=True) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(ValueError, "foo"): fake_model_kwargs = {"foo": "bar"} model.generate(input_ids, **fake_model_kwargs) # however, valid model_kwargs are accepted valid_model_kwargs = {"attention_mask": create_tensor_fn(np.zeros_like(input_ids))} model.generate(input_ids, **valid_model_kwargs) def test_custom_logits_processor(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] logits_processor_list_cls = self.framework_dependent_parameters["LogitsProcessorList"] min_length_logits_processor_cls = self.framework_dependent_parameters["MinLengthLogitsProcessor"] return_tensors = self.framework_dependent_parameters["return_tensors"] bart_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_model = model_cls.from_pretrained("hf-internal-testing/tiny-random-bart", min_length=1) input_ids = bart_tokenizer(article, return_tensors=return_tensors).input_ids logits_processor = logits_processor_list_cls() logits_processor.append(min_length_logits_processor_cls(min_length=10, eos_token_id=0)) # it should not be allowed to both define `min_length` via config and `logits_processor` list with self.assertRaises(ValueError): bart_model.generate(input_ids, logits_processor=logits_processor) bart_model.config.min_length = None bart_model.generate(input_ids, logits_processor=logits_processor) def test_max_new_tokens_encoder_decoder(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = model_cls.from_pretrained("hf-internal-testing/tiny-random-bart") input_ids = bart_tokenizer(article, return_tensors=return_tensors).input_ids if is_pt: bart_model = bart_model.to(torch_device) input_ids = input_ids.to(torch_device) self.assertEqual(list(input_ids.shape), [1, 29]) max_new_tokens = 3 bart_model.config.max_length = 20 bart_model.config.eos_token_id = None # Encoder decoder call outputs = bart_model.generate(input_ids, max_new_tokens=max_new_tokens) # 1 BOS + 3 new tokens self.assertEqual(list(outputs.shape), [1, 4]) # Decoder only call outputs = bart_model.generate(decoder_input_ids=input_ids, max_new_tokens=max_new_tokens) # 1 BOS + 29 (input length) + 3 new tokens self.assertEqual(list(outputs.shape), [1, 33]) # Encoder decoder call > 20 outputs = bart_model.generate(max_new_tokens=max_new_tokens + 20) # 1 BOS + 20 + 3 new tokens self.assertEqual(list(outputs.shape), [1, 24]) def test_max_new_tokens_decoder_only(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") article = """Justin Timberlake.""" gpt2_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") gpt2_model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2") input_ids = gpt2_tokenizer(article, return_tensors=return_tensors).input_ids if is_pt: gpt2_model = gpt2_model.to(torch_device) input_ids = input_ids.to(torch_device) self.assertEqual(list(input_ids.shape), [1, 9]) max_new_tokens = 3 gpt2_model.config.max_length = 20 # call < 20 outputs = gpt2_model.generate(input_ids, max_new_tokens=max_new_tokens) # 9 input_ids + 3 new tokens self.assertEqual(list(outputs.shape), [1, 12]) # call > 20 outputs = gpt2_model.generate(max_new_tokens=max_new_tokens + 20) # 1 BOS token + 23 new tokens self.assertEqual(list(outputs.shape), [1, 24]) def test_encoder_decoder_generate_with_inputs_embeds(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = model_cls.from_pretrained("hf-internal-testing/tiny-random-bart", max_length=5) model.config.eos_token_id = None input_ids = tokenizer(article, return_tensors=return_tensors).input_ids inputs_embeds = model.get_input_embeddings()(input_ids) output_sequences = model.generate(inputs_embeds=inputs_embeds) # make sure model generated correctly until `max_length` self.assertEqual(output_sequences.shape, (1, 5)) def test_transition_scores_greedy_search(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") articles = ["Justin Timberlake", "Michael Phelps"] tokenizer = AutoTokenizer.from_pretrained("distilgpt2", padding_side="left") tokenizer.pad_token = tokenizer.eos_token model = model_cls.from_pretrained("distilgpt2") input_ids = tokenizer(articles, return_tensors=return_tensors, padding=True).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate( input_ids=input_ids, max_new_tokens=5, pad_token_id=tokenizer.eos_token_id, eos_token_id=None, return_dict_in_generate=True, output_scores=True, ) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores) if is_pt: transition_scores = transition_scores.cpu().numpy() expected_scores = np.array( [ [-57.8844, -60.45698, -70.16364, -65.50791, -66.35648], [-54.417572, -60.216614, -62.661243, -58.621933, -58.298683], ] ) self.assertTrue(np.allclose(transition_scores, expected_scores, atol=1e-3)) def test_transition_scores_greedy_search_normalized(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") articles = ["Justin Timberlake", "Michael Phelps"] tokenizer = AutoTokenizer.from_pretrained("distilgpt2", padding_side="left") tokenizer.pad_token = tokenizer.eos_token model = model_cls.from_pretrained("distilgpt2") input_ids = tokenizer(articles, return_tensors=return_tensors, padding=True).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate( input_ids=input_ids, max_new_tokens=5, pad_token_id=tokenizer.eos_token_id, eos_token_id=None, return_dict_in_generate=True, output_scores=True, ) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, normalize_logits=True) if is_pt: transition_scores = transition_scores.cpu().numpy() expected_scores = np.array( [ [-2.538938, -2.2694316, -2.1580915, -1.572299, -2.6719835], [-1.8826028, -2.2461371, -1.7556462, -2.9644494, -1.7996008], ] ) self.assertTrue(np.allclose(transition_scores, expected_scores, atol=1e-3)) def test_transition_scores_beam_search_encoder_decoder(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") articles = [ "Justin Timberlake and Jessica Biel, welcome to parenthood.", "Michael Phelps is arguably the most decorated Olympian of all time.", ] tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = model_cls.from_pretrained( "hf-internal-testing/tiny-random-bart", max_length=10, num_beams=4, num_return_sequences=2, eos_token_id=None, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) input_ids = tokenizer(articles, return_tensors=return_tensors, padding=True).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate(input_ids=input_ids) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, outputs.beam_indices) if is_pt: transition_scores = transition_scores.cpu().numpy() outputs.sequences_scores = outputs.sequences_scores.cpu().numpy() self.assertTrue(np.allclose(np.sum(transition_scores, axis=-1), outputs.sequences_scores, atol=1e-3)) def test_transition_scores_beam_search_encoder_decoder_with_eos(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") articles = [ "Justin Timberlake and Jessica Biel, welcome to parenthood.", "Michael Phelps is arguably the most decorated Olympian of all time.", ] tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = model_cls.from_pretrained( "hf-internal-testing/tiny-random-bart", max_length=10, num_beams=4, num_return_sequences=2, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) input_ids = tokenizer(articles, return_tensors=return_tensors, padding=True).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate(input_ids=input_ids) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, outputs.beam_indices) if is_pt: transition_scores = transition_scores.cpu().numpy() outputs.sequences_scores = outputs.sequences_scores.cpu().numpy() self.assertTrue(np.allclose(np.sum(transition_scores, axis=-1), outputs.sequences_scores, atol=1e-3)) def test_transition_scores_beam_search_decoder_only(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") articles = [ "Justin Timberlake", "Michael Phelps", ] tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") tokenizer.pad_token = tokenizer.eos_token model = model_cls.from_pretrained( "hf-internal-testing/tiny-random-gpt2", max_length=10, num_beams=4, num_return_sequences=2, pad_token_id=tokenizer.eos_token_id, eos_token_id=None, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) input_ids = tokenizer(articles, return_tensors=return_tensors, padding=True).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate(input_ids=input_ids) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, outputs.beam_indices) if is_pt: transition_scores = transition_scores.cpu().numpy() outputs.sequences_scores = outputs.sequences_scores.cpu().numpy() self.assertTrue(np.allclose(np.sum(transition_scores, axis=-1), outputs.sequences_scores, atol=1e-3)) def test_transition_scores_beam_sample_encoder_decoder(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") articles = [ "Justin Timberlake and Jessica Biel, welcome to parenthood.", "Michael Phelps is arguably the most decorated Olympian of all time.", ] tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = model_cls.from_pretrained( "hf-internal-testing/tiny-random-bart", do_sample=True, max_length=10, num_beams=4, num_return_sequences=2, eos_token_id=None, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) input_ids = tokenizer(articles, return_tensors=return_tensors, padding=True).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate(input_ids=input_ids) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, outputs.beam_indices) if is_pt: transition_scores = transition_scores.cpu().numpy() outputs.sequences_scores = outputs.sequences_scores.cpu().numpy() self.assertTrue(np.allclose(np.sum(transition_scores, axis=-1), outputs.sequences_scores, atol=1e-3)) @slow def test_transition_scores_early_stopping(self): # This is an aggressive test that makes sure that `beam_search's` # transition scores are computed correctly for varying `num_return_sequences`, `num_beams` and `batch_size > 1` # 2 x input_ids for "question: How are you? \n context: I had a long day, " model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] create_tensor_fn = self.framework_dependent_parameters["create_tensor_fn"] is_pt = not model_cls.__name__.startswith("TF") input_ids = create_tensor_fn(2 * [[822, 10, 571, 33, 25, 58, 2625, 10, 27, 141, 3, 9, 307, 239, 6, 1]]) model = model_cls.from_pretrained("t5-small") if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate( input_ids, max_length=10, return_dict_in_generate=True, output_scores=True, forced_eos_token_id=model.config.eos_token_id, num_beams=4, do_sample=False, num_return_sequences=3, length_penalty=0.0, ) transition_scores = model.compute_transition_scores( sequences=outputs.sequences, scores=outputs.scores, beam_indices=outputs.beam_indices ) if is_pt: transition_scores = transition_scores.cpu().numpy() outputs.sequences_scores = outputs.sequences_scores.cpu().numpy() self.assertTrue(np.allclose(np.sum(transition_scores, axis=-1), outputs.sequences_scores)) def test_encoder_decoder_generate_attention_mask(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") articles = ["Timberlake", "Jessica Biel, welcome to parenthood among other things"] tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") # need extreme generation values here to force this test # to fail when `attention_mask` is not correctly treated in generate model = model_cls.from_pretrained( "hf-internal-testing/tiny-random-bart", max_length=50, num_beams=5, num_return_sequences=5 ) model.config.eos_token_id = None input_ids = tokenizer(articles[0], return_tensors=return_tensors).input_ids input_ids_batched = tokenizer(articles, padding=True, return_tensors=return_tensors).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) input_ids_batched = input_ids_batched.to(torch_device) output_sequences_batched = model.generate( input_ids=input_ids_batched, return_dict_in_generate=True, output_scores=True ) output_sequences = model.generate(input_ids=input_ids, return_dict_in_generate=True, output_scores=True) batched_out = output_sequences_batched.sequences_scores out = output_sequences.sequences_scores if is_pt: batched_out = batched_out.cpu().numpy() out = out.cpu().numpy() diff = np.abs(np.sum(batched_out[:5]) - np.sum(out)) self.assertTrue(diff < 1e-4) def test_generate_input_ids_as_kwarg(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") article = """I need input_ids to generate""" tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2", max_length=15) input_ids = tokenizer(article, return_tensors=return_tensors).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) output_sequences_kwargs = model.generate(input_ids=input_ids) output_sequences = model.generate(input_ids) if is_pt: output_sequences_kwargs = output_sequences_kwargs.cpu().numpy() output_sequences = output_sequences.cpu().numpy() self.assertTrue(np.array_equal(output_sequences, output_sequences_kwargs)) self.assertEqual(output_sequences.shape, (1, 15)) def test_generate_input_ids_as_encoder_kwarg(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = model_cls.from_pretrained("hf-internal-testing/tiny-random-bart", max_length=5) model.config.eos_token_id = None input_ids = tokenizer(article, return_tensors=return_tensors).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) output_sequences_kwargs = model.generate(input_ids=input_ids) output_sequences = model.generate(input_ids) if is_pt: output_sequences_kwargs = output_sequences_kwargs.cpu().numpy() output_sequences = output_sequences.cpu().numpy() self.assertTrue(np.array_equal(output_sequences, output_sequences_kwargs)) self.assertEqual(output_sequences.shape, (1, 5)) def test_generate_inputs_and_encoder_kwargs(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] article = """I need input_ids to generate""" tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2", max_length=10) input_ids = tokenizer(article, return_tensors=return_tensors).input_ids with self.assertRaises(ValueError): model.generate(input_ids, input_ids=input_ids) def test_generate_too_many_encoder_kwargs(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] article = """I need input_ids to generate""" tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = model_cls.from_pretrained("hf-internal-testing/tiny-random-bart", max_length=10) input_ids = tokenizer(article, return_tensors=return_tensors).input_ids with self.assertRaises(ValueError): model.generate(input_ids=input_ids, inputs_embeds=input_ids) def test_generate_input_features_as_encoder_kwarg(self): model_cls = self.framework_dependent_parameters["AutoModelForSpeechSeq2Seq"] floats_tensor = self.framework_dependent_parameters["floats_tensor"] is_pt = not model_cls.__name__.startswith("TF") input_features = floats_tensor((3, 80, 60)) model = model_cls.from_pretrained("hf-internal-testing/tiny-random-WhisperForConditionalGeneration") if is_pt: input_features.to(torch_device) model = model.to(torch_device) output_sequences_kwargs = model.generate(input_features=input_features, max_length=5) output_sequences = model.generate(input_features, max_length=5) if is_pt: output_sequences_kwargs = output_sequences_kwargs.cpu().numpy() output_sequences = output_sequences.cpu().numpy() self.assertTrue(np.array_equal(output_sequences, output_sequences_kwargs)) self.assertEqual(output_sequences.shape, (3, 5)) def test_generate_pixel_values_as_encoder_kwarg(self): model_cls = self.framework_dependent_parameters["AutoModelForVision2Seq"] floats_tensor = self.framework_dependent_parameters["floats_tensor"] is_pt = not model_cls.__name__.startswith("TF") pixel_values = floats_tensor((2, 3, 30, 30)) model = model_cls.from_pretrained("hf-internal-testing/tiny-random-VisionEncoderDecoderModel-vit-gpt2") model.generation_config.eos_token_id = None if is_pt: pixel_values = pixel_values.to(torch_device) model = model.to(torch_device) output_sequences_kwargs = model.generate(pixel_values=pixel_values, max_length=5) output_sequences = model.generate(pixel_values, max_length=5) if is_pt: output_sequences_kwargs = output_sequences_kwargs.cpu().numpy() output_sequences = output_sequences.cpu().numpy() self.assertTrue(np.array_equal(output_sequences, output_sequences_kwargs)) self.assertEqual(output_sequences.shape, (2, 5)) def test_generate_encoder_outputs_attention_mask(self): model_cls = self.framework_dependent_parameters["AutoModelForSpeechSeq2Seq"] floats_tensor = self.framework_dependent_parameters["floats_tensor"] create_tensor_fn = self.framework_dependent_parameters["create_tensor_fn"] is_pt = not model_cls.__name__.startswith("TF") input_features = floats_tensor((3, 80, 60)) attention_mask = create_tensor_fn(np.ones(input_features.shape)) model = model_cls.from_pretrained("hf-internal-testing/tiny-random-WhisperForConditionalGeneration") if is_pt: input_features = input_features.to(torch_device) attention_mask = attention_mask.to(torch_device) model = model.to(torch_device) encoder = model.get_encoder() encoder_outputs = encoder(input_features) output_sequences_no_mask = model.generate(encoder_outputs=encoder_outputs) output_sequences_with_mask = model.generate(encoder_outputs=encoder_outputs, attention_mask=attention_mask) if is_pt: output_sequences_no_mask = output_sequences_no_mask.cpu().numpy() output_sequences_with_mask = output_sequences_with_mask.cpu().numpy() self.assertTrue(np.array_equal(output_sequences_no_mask, output_sequences_with_mask)) def test_eos_token_id_int_and_list_greedy_search(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") generation_kwargs = { "do_sample": False, "num_beams": 1, } expectation = 13 tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") text = """Hello, my dog is cute and""" tokens = tokenizer(text, return_tensors=return_tensors) model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2") if is_pt: model = model.to(torch_device) tokens = tokens.to(torch_device) eos_token_id = 873 generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) eos_token_id = [873, 198] generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) def test_eos_token_id_int_and_list_contrastive_search(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") generation_kwargs = { "do_sample": False, "num_beams": 1, "penalty_alpha": 0.6, "top_k": 4, } expectation = 17 tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") text = """Hello, my dog is cute and""" tokens = tokenizer(text, return_tensors=return_tensors) model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2") if is_pt: model = model.to(torch_device) tokens = tokens.to(torch_device) eos_token_id = 225 generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) eos_token_id = [225, 198] generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) def test_eos_token_id_int_and_list_beam_search(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") generation_kwargs = { "do_sample": False, "num_beams": 3, } expectation = 13 tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") text = """Hello, my dog is cute and""" tokens = tokenizer(text, return_tensors=return_tensors) model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2") if is_pt: model = model.to(torch_device) tokens = tokens.to(torch_device) eos_token_id = 873 generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) unpadded_correct_condition = expectation == len(generated_tokens[0]) padded_correct_condition = expectation < len(generated_tokens[0]) and all( token == model.config.pad_token_id for token in generated_tokens[0][expectation:] ) self.assertTrue(unpadded_correct_condition or padded_correct_condition) eos_token_id = [873, 198] generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) unpadded_correct_condition = expectation == len(generated_tokens[0]) padded_correct_condition = expectation < len(generated_tokens[0]) and all( token == model.config.pad_token_id for token in generated_tokens[0][expectation:] ) self.assertTrue(unpadded_correct_condition or padded_correct_condition) def test_generate_vision2text_conditioning(self): model_cls = self.framework_dependent_parameters["AutoModelForVision2Seq"] floats_tensor = self.framework_dependent_parameters["floats_tensor"] create_tensor_fn = self.framework_dependent_parameters["create_tensor_fn"] is_pt = not model_cls.__name__.startswith("TF") pixel_values = floats_tensor((2, 3, 30, 30)) conditioning_input = create_tensor_fn([[10], [10]]) # this should be the 2nd output token, after the BOS token model = model_cls.from_pretrained("hf-internal-testing/tiny-random-VisionEncoderDecoderModel-vit-gpt2") if is_pt: pixel_values = pixel_values.to(torch_device) model = model.to(torch_device) conditioning_input = conditioning_input.to(torch_device) # we can condition on decoder_input_ids (expected decoder input) and input_ids (which we pipe internally as # decoder_input_ids, if the encoder is not a model with text input) output_sequences_decoder_input_ids = model.generate( pixel_values, max_length=5, decoder_input_ids=conditioning_input ) output_sequences_input_ids = model.generate(pixel_values, max_length=5, input_ids=conditioning_input) if is_pt: output_sequences_decoder_input_ids = output_sequences_decoder_input_ids.cpu().numpy() output_sequences_input_ids = output_sequences_input_ids.cpu().numpy() conditioning_input = conditioning_input.cpu().numpy() self.assertTrue(np.array_equal(output_sequences_decoder_input_ids, output_sequences_input_ids)) self.assertTrue(np.array_equal(output_sequences_decoder_input_ids[:, 1:2], conditioning_input))
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_stopping_criteria.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class StoppingCriteriaTestCase(unittest.TestCase): def _get_tensors(self, length): batch_size = 3 vocab_size = 250 input_ids = ids_tensor((batch_size, length), vocab_size) scores = torch.ones((batch_size, length), device=torch_device, dtype=torch.float) / length return input_ids, scores def test_list_criteria(self): input_ids, scores = self._get_tensors(5) criteria = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10), MaxTimeCriteria(max_time=0.1), ] ) self.assertFalse(criteria(input_ids, scores)) input_ids, scores = self._get_tensors(9) self.assertFalse(criteria(input_ids, scores)) input_ids, scores = self._get_tensors(10) self.assertTrue(criteria(input_ids, scores)) def test_max_length_criteria(self): criteria = MaxLengthCriteria(max_length=10) input_ids, scores = self._get_tensors(5) self.assertFalse(criteria(input_ids, scores)) input_ids, scores = self._get_tensors(9) self.assertFalse(criteria(input_ids, scores)) input_ids, scores = self._get_tensors(10) self.assertTrue(criteria(input_ids, scores)) def test_max_new_tokens_criteria(self): criteria = MaxNewTokensCriteria(start_length=5, max_new_tokens=5) input_ids, scores = self._get_tensors(5) self.assertFalse(criteria(input_ids, scores)) input_ids, scores = self._get_tensors(9) self.assertFalse(criteria(input_ids, scores)) input_ids, scores = self._get_tensors(10) self.assertTrue(criteria(input_ids, scores)) criteria_list = StoppingCriteriaList([criteria]) self.assertEqual(criteria_list.max_length, 10) def test_max_time_criteria(self): input_ids, scores = self._get_tensors(5) criteria = MaxTimeCriteria(max_time=0.1) self.assertFalse(criteria(input_ids, scores)) criteria = MaxTimeCriteria(max_time=0.1, initial_timestamp=time.time() - 0.2) self.assertTrue(criteria(input_ids, scores)) def test_validate_stopping_criteria(self): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]), 10) with self.assertWarns(UserWarning): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]), 11) stopping_criteria = validate_stopping_criteria(StoppingCriteriaList(), 11) self.assertEqual(len(stopping_criteria), 1)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_tf_utils.py
# coding=utf-8 # Copyright 2022 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeq2SeqLM, TFAutoModelForSpeechSeq2Seq, TFAutoModelForVision2Seq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class UtilsFunctionsTest(unittest.TestCase): # tests whether the top_k_top_p_filtering function behaves as expected def test_top_k_top_p_filtering(self): logits = tf.convert_to_tensor( [ [ 8.2220991, # 3rd highest value; idx. 0 -0.5620044, 5.23229752, 4.0386393, -6.8798378, -0.54785802, -3.2012153, 2.92777176, 1.88171953, 7.35341276, # 5th highest value; idx. 9 8.43207833, # 2nd highest value; idx. 10 -9.85711836, -5.96209236, -1.13039161, -7.1115294, -0.8369633, -5.3186408, 7.06427407, 0.81369344, -0.82023817, -5.9179796, 0.58813443, -6.99778438, 4.71551189, -0.18771637, 7.44020759, # 4th highest value; idx. 25 9.38450987, # 1st highest value; idx. 26 2.12662941, -9.32562038, 2.35652522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58425518, 4.53139238, -5.57510464, -6.28030699, -7.19529503, -4.02122551, 1.39337037, -6.06707057, 1.59480517, -9.643119, 0.03907799, 0.67231762, -8.88206726, 6.27115922, # 4th highest value; idx. 13 2.28520723, 4.82767506, 4.30421368, 8.8275313, # 2nd highest value; idx. 17 5.44029958, # 5th highest value; idx. 18 -4.4735794, 7.38579536, # 3rd highest value; idx. 20 -2.91051663, 2.61946077, -2.5674762, -9.48959302, -4.02922645, -1.35416918, 9.67702323, # 1st highest value; idx. 27 -5.89478553, 1.85370467, ], # cummulative prob of 5 highest values <= 0.6 ], dtype=tf.float32, ) non_inf_expected_idx = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]], dtype=tf.int32, ) # expected non filtered idx as noted above non_inf_expected_output = tf.convert_to_tensor( [8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023], dtype=tf.float32, ) # expected non filtered values as noted above output = tf_top_k_top_p_filtering(logits, top_k=10, top_p=0.6, min_tokens_to_keep=4) non_inf_output = output[output != -float("inf")] non_inf_idx = tf.cast( tf.where(tf.not_equal(output, tf.constant(-float("inf"), dtype=tf.float32))), dtype=tf.int32, ) tf.debugging.assert_near(non_inf_output, non_inf_expected_output, rtol=1e-12) tf.debugging.assert_equal(non_inf_idx, non_inf_expected_idx) @require_tf class TFGenerationIntegrationTests(unittest.TestCase, GenerationIntegrationTestsMixin): # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_tf_available(): framework_dependent_parameters = { "AutoModelForCausalLM": TFAutoModelForCausalLM, "AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeq2Seq, "AutoModelForSeq2SeqLM": TFAutoModelForSeq2SeqLM, "AutoModelForVision2Seq": TFAutoModelForVision2Seq, "LogitsProcessorList": TFLogitsProcessorList, "MinLengthLogitsProcessor": TFMinLengthLogitsProcessor, "create_tensor_fn": tf.convert_to_tensor, "floats_tensor": floats_tensor, "return_tensors": "tf", } @slow def test_generate_tf_function_export_fixed_input_length(self): # TF-only test: tf.saved_model export test_model = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2") input_length = 2 max_new_tokens = 2 class DummyModel(tf.Module): def __init__(self, model): super(DummyModel, self).__init__() self.model = model @tf.function( input_signature=( tf.TensorSpec((None, input_length), tf.int32, name="input_ids"), tf.TensorSpec((None, input_length), tf.int32, name="attention_mask"), ), jit_compile=True, ) def serving(self, input_ids, attention_mask): outputs = self.model.generate( input_ids=input_ids, attention_mask=attention_mask, max_new_tokens=max_new_tokens, return_dict_in_generate=True, ) return {"sequences": outputs["sequences"]} dummy_input_ids = [[2, 0], [102, 103]] dummy_attention_masks = [[1, 0], [1, 1]] dummy_model = DummyModel(model=test_model) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(dummy_model, tmp_dir, signatures={"serving_default": dummy_model.serving}) serving_func = tf.saved_model.load(tmp_dir).signatures["serving_default"] for batch_size in range(1, len(dummy_input_ids) + 1): inputs = { "input_ids": tf.constant(dummy_input_ids[:batch_size]), "attention_mask": tf.constant(dummy_attention_masks[:batch_size]), } tf_func_outputs = serving_func(**inputs)["sequences"] tf_model_outputs = test_model.generate(**inputs, max_new_tokens=max_new_tokens) tf.debugging.assert_equal(tf_func_outputs, tf_model_outputs) @slow def test_generate_tf_function_export_fixed_batch_size(self): # TF-only test: tf.saved_model export test_model = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2") batch_size = 1 max_new_tokens = 2 class DummyModel(tf.Module): def __init__(self, model): super(DummyModel, self).__init__() self.model = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None), tf.int32, name="input_ids"), tf.TensorSpec((batch_size, None), tf.int32, name="attention_mask"), ), jit_compile=True, ) def serving(self, input_ids, attention_mask): outputs = self.model.generate( input_ids=input_ids, attention_mask=attention_mask, max_new_tokens=max_new_tokens, return_dict_in_generate=True, ) return {"sequences": outputs["sequences"]} dummy_input_ids = [[2], [102, 103]] dummy_attention_masks = [[1], [1, 1]] dummy_model = DummyModel(model=test_model) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(dummy_model, tmp_dir, signatures={"serving_default": dummy_model.serving}) serving_func = tf.saved_model.load(tmp_dir).signatures["serving_default"] for input_row in range(len(dummy_input_ids)): inputs = { "input_ids": tf.constant([dummy_input_ids[input_row]]), "attention_mask": tf.constant([dummy_attention_masks[input_row]]), } tf_func_outputs = serving_func(**inputs)["sequences"] tf_model_outputs = test_model.generate(**inputs, max_new_tokens=max_new_tokens) tf.debugging.assert_equal(tf_func_outputs, tf_model_outputs) @slow @require_tensorflow_text def test_generate_tf_function_export_with_tf_tokenizer(self): # TF-only test: tf.saved_model export with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id="google/flan-t5-small", filename="spiece.model", local_dir=tmp_dir) class CompleteSentenceTransformer(tf.keras.layers.Layer): def __init__(self): super().__init__() self.tokenizer = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(tmp_dir, "spiece.model"), "rb").read() ) self.model = TFAutoModelForSeq2SeqLM.from_pretrained("hf-internal-testing/tiny-random-t5") def call(self, inputs, *args, **kwargs): tokens = self.tokenizer.tokenize(inputs) input_ids, attention_mask = text.pad_model_inputs( tokens, max_seq_length=64, pad_value=self.model.config.pad_token_id ) outputs = self.model.generate(input_ids=input_ids, attention_mask=attention_mask) return self.tokenizer.detokenize(outputs) complete_model = CompleteSentenceTransformer() inputs = tf.keras.layers.Input(shape=(1,), dtype=tf.string, name="inputs") outputs = complete_model(inputs) keras_model = tf.keras.Model(inputs, outputs) keras_model.save(tmp_dir) def test_eos_token_id_int_and_list_top_k_top_sampling(self): # Has PT equivalent: this test relies on random sampling generation_kwargs = { "do_sample": True, "num_beams": 1, "top_p": 0.7, "top_k": 10, "temperature": 0.7, } expectation = 14 tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") text = """Hello, my dog is cute and""" tokens = tokenizer(text, return_tensors="tf") model = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2") eos_token_id = 638 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(":/CPU:0"): tf.random.set_seed(0) generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) eos_token_id = [638, 198] with tf.device(":/CPU:0"): tf.random.set_seed(0) generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) def test_model_kwarg_encoder_signature_filtering(self): # Has PT equivalent: ample use of framework-specific code bart_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") article = """Hugging Face is a technology company based in New York and Paris.""" input_ids = bart_tokenizer(article, return_tensors="tf").input_ids bart_model = TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart") output = bart_model.generate(input_ids).numpy() # Let's create a fake model that has a different signature. In particular, this fake model accepts "foo" as an # argument. Because "foo" is not in the encoder signature and doesn't start with "decoder_", it will be part of # the encoder kwargs prior to signature filtering, which would lead to an exception. But filtering kicks in and # saves the day. class FakeBart(TFBartForConditionalGeneration): def call(self, input_ids, foo=None, **kwargs): return super().call(input_ids, **kwargs) bart_model = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart") fake_output = bart_model.generate(input_ids, foo="bar").numpy() self.assertTrue(np.array_equal(output, fake_output)) # Encoder signature filtering only kicks in if it doesn't accept wildcard kwargs. The following test will fail # because it doesn't do signature filtering. class FakeEncoder(bart_model.model.encoder.__class__): def call(self, input_ids, **kwargs): return super().call(input_ids, **kwargs) fake_encoder = FakeEncoder(bart_model.config, bart_model.model.shared) bart_model.model.encoder = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) fake_output = bart_model.generate(input_ids).numpy() with self.assertRaises(ValueError): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(input_ids, foo="bar")
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/deepspeed/ds_config_zero3.json
{ "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "bf16": { "enabled": "auto" }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "betas": "auto", "eps": "auto", "weight_decay": "auto" } }, "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto" } }, "zero_optimization": { "stage": 3, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "offload_param": { "device": "cpu", "pin_memory": true }, "overlap_comm": true, "contiguous_gradients": true, "sub_group_size": 1e9, "reduce_bucket_size": "auto", "stage3_prefetch_bucket_size": "auto", "stage3_param_persistence_threshold": "auto", "stage3_max_live_parameters": 1e9, "stage3_max_reuse_distance": 1e9, "stage3_gather_16bit_weights_on_model_save": true }, "gradient_accumulation_steps": "auto", "gradient_clipping": "auto", "steps_per_print": 2000, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false }
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/deepspeed/vit_feature_extractor.json
{ "feature_extractor_type": "ViTFeatureExtractor", "size": 30 }
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/deepspeed/test_deepspeed.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import dataclasses import io import itertools import json import os import unittest from copy import deepcopy from functools import partial import datasets from parameterized import parameterized import tests.trainer.test_trainer from tests.trainer.test_trainer import TrainerIntegrationCommon # noqa from transformers import AutoModel, TrainingArguments, is_torch_available, logging from transformers.integrations.deepspeed import ( HfDeepSpeedConfig, is_deepspeed_available, unset_hf_deepspeed_config, ) from transformers.testing_utils import ( CaptureLogger, CaptureStd, CaptureStderr, LoggingLevel, TestCasePlus, backend_device_count, execute_subprocess_async, mockenv_context, require_deepspeed, require_optuna, require_torch_accelerator, require_torch_multi_accelerator, slow, torch_device, ) from transformers.trainer_utils import get_last_checkpoint, set_seed from transformers.utils import SAFE_WEIGHTS_NAME, is_torch_bf16_available_on_device if is_torch_available(): from tests.trainer.test_trainer import ( # noqa RegressionModelConfig, RegressionPreTrainedModel, ) # hack to restore original logging level pre #21700 get_regression_trainer = partial(tests.trainer.test_trainer.get_regression_trainer, log_level="info") set_seed(42) # default torch.distributed port DEFAULT_MASTER_PORT = "10999" T5_SMALL = "t5-small" T5_TINY = "patrickvonplaten/t5-tiny-random" GPT2_TINY = "sshleifer/tiny-gpt2" def load_json(path): with open(path) as f: return json.load(f) def get_master_port(real_launcher=False): """ When using a single gpu launcher emulation (i.e. not deepspeed or python -m torch.distributed) the issue is that once the port is tied it can't be used anywhere else outside of this process, since torch.dist doesn't free the port until the process exits. Therefore for the sake of being able to run both emulated launcher and normal launcher tests we need 2 distinct ports. This function will give the right port in the right context. For real launcher it'll give the base port, for emulated launcher it'll give the base port + 1. In both cases a string is returned. Args: `real_launcher`: whether a real launcher is going to be used, or the emulated one """ master_port_base = os.environ.get("DS_TEST_PORT", DEFAULT_MASTER_PORT) if not real_launcher: master_port_base = str(int(master_port_base) + 1) return master_port_base def require_deepspeed_aio(test_case): """ Decorator marking a test that requires deepspeed aio (nvme) """ if not is_deepspeed_available(): return unittest.skip("test requires deepspeed")(test_case) import deepspeed from deepspeed.ops.aio import AsyncIOBuilder if not deepspeed.ops.__compatible_ops__[AsyncIOBuilder.NAME]: return unittest.skip("test requires deepspeed async-io")(test_case) else: return test_case if is_deepspeed_available(): from deepspeed.utils import logger as deepspeed_logger # noqa from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint from transformers.integrations.deepspeed import deepspeed_config, is_deepspeed_zero3_enabled # noqa def get_launcher(distributed=False): # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) num_gpus = min(2, backend_device_count(torch_device)) if distributed else 1 master_port = get_master_port(real_launcher=True) return f"deepspeed --num_nodes 1 --num_gpus {num_gpus} --master_port {master_port}".split() ZERO2 = "zero2" ZERO3 = "zero3" FP16 = "fp16" BF16 = "bf16" HF_OPTIM = "hf_optim" HF_SCHEDULER = "hf_scheduler" DS_OPTIM = "ds_optim" DS_SCHEDULER = "ds_scheduler" optims = [HF_OPTIM, DS_OPTIM] schedulers = [HF_SCHEDULER, DS_SCHEDULER] stages = [ZERO2, ZERO3] if is_torch_bf16_available_on_device(torch_device): dtypes = [FP16, BF16] else: dtypes = [FP16] def parameterized_custom_name_func(func, param_num, param): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param param_based_name = parameterized.to_safe_name("_".join(str(x) for x in param.args)) return f"{func.__name__}_{param_based_name}" # Cartesian-product of zero stages with models to test params = list(itertools.product(stages, dtypes)) params_with_optims_and_schedulers = list(itertools.product(stages, dtypes, optims, schedulers)) @require_deepspeed @require_torch_accelerator class CoreIntegrationDeepSpeed(TestCasePlus, TrainerIntegrationCommon): """ Testing non-Trainer DeepSpeed integration """ def setUp(self): super().setUp() master_port = get_master_port(real_launcher=False) self.dist_env_1_gpu = { "MASTER_ADDR": "localhost", "MASTER_PORT": master_port, "RANK": "0", "LOCAL_RANK": "0", "WORLD_SIZE": "1", } def tearDown(self): super().tearDown() # reset the ds config global so that tests state doesn't leak unset_hf_deepspeed_config() def test_init_zero3_fp16(self): # test that zero.Init() works correctly under zero3/fp16 ds_config = { "train_batch_size": 1, "zero_optimization": { "stage": 3, }, } dschf = HfDeepSpeedConfig(ds_config) self.assertTrue(dschf.is_zero3()) self.assertTrue(is_deepspeed_zero3_enabled()) with LoggingLevel(logging.INFO): with mockenv_context(**self.dist_env_1_gpu): logger = logging.get_logger("transformers.modeling_utils") with CaptureLogger(logger) as cl: AutoModel.from_pretrained(T5_TINY) self.assertIn("Detected DeepSpeed ZeRO-3", cl.out) # now remove zero optimization del ds_config["zero_optimization"] dschf = HfDeepSpeedConfig(ds_config) self.assertFalse(dschf.is_zero3()) self.assertFalse(is_deepspeed_zero3_enabled()) with LoggingLevel(logging.INFO): with mockenv_context(**self.dist_env_1_gpu): logger = logging.get_logger("transformers.modeling_utils") with CaptureLogger(logger) as cl: AutoModel.from_pretrained(T5_TINY) self.assertNotIn("Detected DeepSpeed ZeRO-3", cl.out) def test_init_zero3_missing_params(self): # test that zero.Init() for missing parameters works correctly under zero3 import deepspeed import torch from transformers.models.gpt2.modeling_gpt2 import GPT2PreTrainedModel class TinyGPT2WithUninitializedWeights(GPT2PreTrainedModel): def __init__(self, config): super().__init__(config) self.transformer = AutoModel.from_pretrained(GPT2_TINY, config=config) self.new_head = torch.nn.Linear(config.hidden_size, config.vocab_size, bias=True) def forward(self, *args, **kwargs): transformer_outputs = self.transformer(*args, **kwargs) hidden_states = transformer_outputs[0] return self.new_head(hidden_states).float() def _init_weights(self, module): super()._init_weights(module) if module is self.new_head: self.new_head.weight.data.fill_(-100.0) self.new_head.bias.data.fill_(+100.0) ds_config = { "train_batch_size": 1, "zero_optimization": { "stage": 3, }, } dschf = HfDeepSpeedConfig(ds_config) self.assertTrue(dschf.is_zero3()) self.assertTrue(is_deepspeed_zero3_enabled()) with LoggingLevel(logging.INFO): with mockenv_context(**self.dist_env_1_gpu): logger = logging.get_logger("transformers.modeling_utils") with CaptureLogger(logger) as cl: model = TinyGPT2WithUninitializedWeights.from_pretrained(GPT2_TINY) self.assertIn("Detected DeepSpeed ZeRO-3", cl.out) self.assertRegex(cl.out, r"newly initialized.*new_head\.bias.*new_head\.weight") with deepspeed.zero.GatheredParameters([model.new_head.weight, model.new_head.bias]): self.assertTrue( torch.allclose(model.new_head.weight, torch.tensor(-100.0, device=model.new_head.weight.device)), ) self.assertTrue( torch.allclose(model.new_head.bias, torch.tensor(+100.0, device=model.new_head.bias.device)), ) # now remove zero optimization del ds_config["zero_optimization"] dschf = HfDeepSpeedConfig(ds_config) self.assertFalse(dschf.is_zero3()) self.assertFalse(is_deepspeed_zero3_enabled()) with LoggingLevel(logging.INFO): with mockenv_context(**self.dist_env_1_gpu): logger = logging.get_logger("transformers.modeling_utils") with CaptureLogger(logger) as cl: model = TinyGPT2WithUninitializedWeights.from_pretrained(GPT2_TINY) self.assertNotIn("Detected DeepSpeed ZeRO-3", cl.out) self.assertRegex(cl.out, r"newly initialized.*new_head\.bias.*new_head\.weight") self.assertTrue( torch.allclose(model.new_head.weight, torch.tensor(-100.0, device=model.new_head.weight.device)), ) self.assertTrue( torch.allclose(model.new_head.bias, torch.tensor(+100.0, device=model.new_head.bias.device)), ) class TrainerIntegrationDeepSpeedWithCustomConfig(TestCasePlus): def setUp(self): super().setUp() args = TrainingArguments(".") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size master_port = get_master_port(real_launcher=False) self.dist_env_1_gpu = { "MASTER_ADDR": "localhost", "MASTER_PORT": master_port, "RANK": "0", "LOCAL_RANK": "0", "WORLD_SIZE": "1", } self.ds_config_file = { "zero2": f"{self.test_file_dir_str}/ds_config_zero2.json", "zero3": f"{self.test_file_dir_str}/ds_config_zero3.json", } # use self.get_config_dict(stage) to use these to ensure the original is not modified with io.open(self.ds_config_file[ZERO2], "r", encoding="utf-8") as f: config_zero2 = json.load(f) with io.open(self.ds_config_file[ZERO3], "r", encoding="utf-8") as f: config_zero3 = json.load(f) # The following setting slows things down, so don't enable it by default unless needed by a test. # It's in the file as a demo for users since we want everything to work out of the box even if slower. config_zero3["zero_optimization"]["stage3_gather_16bit_weights_on_model_save"] = False self.ds_config_dict = { "zero2": config_zero2, "zero3": config_zero3, } def tearDown(self): super().tearDown() # reset the ds config global so that tests state doesn't leak unset_hf_deepspeed_config() def get_config_dict(self, stage): # As some tests modify the dict, always make a copy return deepcopy(self.ds_config_dict[stage]) @require_deepspeed @require_torch_accelerator class TrainerIntegrationDeepSpeed(TrainerIntegrationDeepSpeedWithCustomConfig, TrainerIntegrationCommon): """ This class is for testing directly via get_regression_trainer It mixes in `TrainerIntegrationCommon` which already has a lot of helper validation methods which we can re-use here. Important: this class' setup can only work with a single gpu because it runs within the current pytest worker. For multi-gpu tests use TestDeepSpeedWithLauncher. Note: if any of the tests of this class get run there will be at least one gpu occupied by them until this pytest worker exits. This is because the gpu memory allocated by the cuda-kernels won't be released until this pytest worker exits. This may appear as some run-away tests if you watch `nvidia-smi` while other tests that fork new processes are run. So there will be one or two "stale" processes reported in `nvidia-smi`. This is not a bug. """ # --- These tests are enough to run on one of zero stages --- # def test_hf_ds_config_mismatch(self): ds_config = self.get_config_dict(ZERO2) # Purposefully configure these values to mismatch TrainingArguments values. # This currently doesn't cover all keys (but it could) per_device_train_batch_size = 2 ds_config["train_micro_batch_size_per_gpu"] = per_device_train_batch_size + 2 ds_config["train_batch_size"] = 1000 gradient_accumulation_steps = 2 ds_config["gradient_accumulation_steps"] = gradient_accumulation_steps + 2 max_grad_norm = 1.0 ds_config["gradient_clipping"] = max_grad_norm + 0.1 adam_beta1, adam_beta2 = 0.9, 0.99 ds_config["optimizer"]["params"]["betas"] = [adam_beta1 - 0.1, adam_beta2 - 0.1] fp16 = True ds_config["fp16"]["enabled"] = not fp16 keys = [ "per_device_train_batch_size", "train_batch_size", "gradient_accumulation_steps", "max_grad_norm", "betas", "fp16", ] with mockenv_context(**self.dist_env_1_gpu): trainer = get_regression_trainer( local_rank=0, fp16=fp16, deepspeed=ds_config, per_device_train_batch_size=per_device_train_batch_size, gradient_accumulation_steps=gradient_accumulation_steps, max_grad_norm=max_grad_norm, adam_beta1=adam_beta1, adam_beta2=adam_beta2, ) with self.assertRaises(Exception) as context: trainer.train() for key in keys: self.assertTrue( key in str(context.exception), f"{key} is not in the exception message:\n{context.exception}", ) # Test various combos # 1. DS scheduler + DS optimizer: this is already tested by most other tests # 2. HF scheduler + HF optimizer: # 3. DS scheduler + HF optimizer: # 4. HF scheduler + DS optimizer: def test_hf_scheduler_hf_optimizer(self): a = 0 with mockenv_context(**self.dist_env_1_gpu): ds_config_zero2_dict = self.get_config_dict(ZERO2) del ds_config_zero2_dict["optimizer"] # force default HF Trainer optimizer del ds_config_zero2_dict["scheduler"] # force default HF Trainer scheduler ds_config_zero2_dict["zero_optimization"]["offload_optimizer"]["device"] = "none" ds_config_zero2_dict["fp16"]["initial_scale_power"] = 1 # force optimizer on the first step trainer = get_regression_trainer(a=a, local_rank=0, fp16=True, deepspeed=ds_config_zero2_dict) trainer.train() new_a = trainer.model.a.item() self.assertNotEqual(new_a, a) def test_ds_scheduler_hf_optimizer(self): a = 0 with mockenv_context(**self.dist_env_1_gpu): ds_config_zero2_dict = self.get_config_dict(ZERO2) del ds_config_zero2_dict["optimizer"] # force default HF Trainer optimizer ds_config_zero2_dict["zero_optimization"]["offload_optimizer"]["device"] = "none" ds_config_zero2_dict["fp16"]["initial_scale_power"] = 1 # force optimizer on the first step trainer = get_regression_trainer(a=a, local_rank=0, fp16=True, deepspeed=ds_config_zero2_dict) trainer.train() new_a = trainer.model.a.item() self.assertNotEqual(new_a, a) def test_hf_scheduler_ds_optimizer(self): a = 0 with mockenv_context(**self.dist_env_1_gpu): ds_config_zero2_dict = self.get_config_dict(ZERO2) del ds_config_zero2_dict["scheduler"] # force default HF Trainer scheduler ds_config_zero2_dict["zero_optimization"]["offload_optimizer"]["device"] = "none" ds_config_zero2_dict["fp16"]["initial_scale_power"] = 1 # force optimizer on the first step trainer = get_regression_trainer(a=a, local_rank=0, fp16=True, deepspeed=ds_config_zero2_dict) trainer.train() new_a = trainer.model.a.item() self.assertNotEqual(new_a, a) @require_deepspeed_aio def test_stage3_nvme_offload(self): with mockenv_context(**self.dist_env_1_gpu): # this actually doesn't have to be on NVMe, any storage will do since this test only # runs a simple check that we can use some directory as if it were NVMe nvme_path = self.get_auto_remove_tmp_dir() nvme_config = {"device": "nvme", "nvme_path": nvme_path} ds_config_zero3_dict = self.get_config_dict(ZERO3) ds_config_zero3_dict["zero_optimization"]["offload_optimizer"] = nvme_config ds_config_zero3_dict["zero_optimization"]["offload_param"] = nvme_config trainer = get_regression_trainer(local_rank=0, fp16=True, deepspeed=ds_config_zero3_dict) with CaptureLogger(deepspeed_logger) as cl: trainer.train() self.assertIn("DeepSpeed info", cl.out, "expected DeepSpeed logger output but got none") @require_optuna def test_hyperparameter_search(self): with mockenv_context(**self.dist_env_1_gpu): ds_config_zero3_dict = self.get_config_dict(ZERO3) # hyperparameter_search requires model_init() to recreate the model for each trial def model_init(): config = RegressionModelConfig(a=0, b=0, double_output=False) model = RegressionPreTrainedModel(config) return model trainer = get_regression_trainer( local_rank=0, fp16=True, model_init=model_init, deepspeed=ds_config_zero3_dict, ) n_trials = 3 with CaptureLogger(deepspeed_logger) as cl: with CaptureStd() as cs: trainer.hyperparameter_search(direction="maximize", n_trials=n_trials) self.assertIn("DeepSpeed info", cl.out, "expected DeepSpeed logger output but got none") self.assertIn(f"Trial {n_trials-1} finished with value", cs.err, "expected hyperparameter_search output") self.assertIn("Best is trial", cs.err, "expected hyperparameter_search output") # --- These tests need to run on both zero stages --- # @parameterized.expand(params, name_func=parameterized_custom_name_func) def test_hf_optimizer_with_offload(self, stage, dtype): # non-DS optimizers can be used with ZERO-offload (as long as they have both CPU and GPU implementation (except LAMB)) ds_config_dict = self.get_config_dict(stage) del ds_config_dict["optimizer"] # force default HF Trainer optimizer # force cpu offload ds_config_dict["zero_optimization"]["offload_optimizer"]["device"] = "cpu" ds_config_dict["zero_force_ds_cpu_optimizer"] = False # offload is not efficient w/o CPUAdam with mockenv_context(**self.dist_env_1_gpu): kwargs = {"local_rank": 0, "deepspeed": ds_config_dict} kwargs[dtype] = True trainer = get_regression_trainer(**kwargs) with CaptureLogger(deepspeed_logger) as cl: trainer.train() self.assertIn("DeepSpeed info", cl.out, "expected DeepSpeed logger output but got none") @parameterized.expand(params, name_func=parameterized_custom_name_func) def test_fake_notebook_no_launcher(self, stage, dtype): # this setup emulates a notebook where a launcher needs to be emulated by hand # note that unittest resets sys.stdout each test, so `CaptureStd` will work here to capture # DeepSpeed log if this test happens to run first in this pytest worker. But it will fail if # it's run not as a first test as `sys.stdout` will no longer be the same. So we either have # to reset `deepspeed_logger.handlers[0].setStream(sys.stdout)` or directly capture from the deepspeed_logger. with mockenv_context(**self.dist_env_1_gpu): kwargs = {"local_rank": 0, "deepspeed": self.get_config_dict(stage)} kwargs[dtype] = True trainer = get_regression_trainer(**kwargs) with CaptureLogger(deepspeed_logger) as cl: trainer.train() self.assertIn("DeepSpeed info", cl.out, "expected DeepSpeed logger output but got none") @parameterized.expand(params, name_func=parameterized_custom_name_func) def test_early_get_last_lr(self, stage, dtype): # with deepspeed's fp16 and dynamic loss scale enabled the optimizer/scheduler steps may # not run for the first few dozen steps while loss scale is too large, and thus during # that time `get_last_lr` will fail if called during that warm up stage, # # setting `logging_steps=1` forces an early `trainer._maybe_log_save_evaluate()` which calls # `self.lr_scheduler.get_last_lr()` and originally it'd fail on the very first step. with mockenv_context(**self.dist_env_1_gpu): a = b = 0.0 kwargs = { "a": a, "b": b, "local_rank": 0, "train_len": 8, "deepspeed": self.get_config_dict(stage), "per_device_train_batch_size": 8, "logging_steps": 1, } kwargs[dtype] = True trainer = get_regression_trainer(**kwargs) trainer.train() post_train_a = trainer.model.a.item() # XXX: for some reason the following check fails with zero3/fp16 and any/bf16 - not a # broken but a different qualitative outcome - as if optimizer did run # oddly getting 1.0 for both a and b from 0.0 - there is a bug somewhere # print(trainer.model.a.item()) # print(trainer.model.b.item()) # need to investigate at some point if (stage == ZERO3 and dtype == FP16) or (dtype == BF16): return # it's enough that train didn't fail for this test, but we must check that # optimizer/scheduler didn't run (since if it did this test isn't testing the right thing) self.assertEqual(post_train_a, a) @parameterized.expand(params, name_func=parameterized_custom_name_func) def test_gradient_accumulation(self, stage, dtype): # this test measures that we get identical weights and similar loss with: # 1. per_device_train_batch_size=8, gradient_accumulation_steps=1 # 2. per_device_train_batch_size=4, gradient_accumulation_steps=2 # since the 2nd should produce the effective batch of 1st, with the same results # # I can get an identical loss for a small train_len=32, plus the power of the initial # dynamic loss scale value set to: # "fp16.initial_scale_power": 1 # plus having the same WarmupLR's warmup_min_lr == warmup_max_lr in the config file # but for some reason going to train_len=64 the weights, weights start to mismatch with this setup. # the culprit seems to be `initial_scale_power` - putting it back to its default 32 keeps the weights identical train_len = 64 a = b = 0.0 kwargs = { "a": a, "b": b, "local_rank": 0, "train_len": train_len, "deepspeed": self.get_config_dict(stage), } kwargs[dtype] = True with mockenv_context(**self.dist_env_1_gpu): no_grad_accum_trainer = get_regression_trainer( **kwargs, per_device_train_batch_size=16, gradient_accumulation_steps=1, ) no_grad_accum_result = no_grad_accum_trainer.train() no_grad_accum_loss = no_grad_accum_result.training_loss no_grad_accum_a = no_grad_accum_trainer.model.a.item() no_grad_accum_b = no_grad_accum_trainer.model.b.item() # make sure the optimizer kicked in - if it hasn't changed from the original value of a then make train_len bigger self.assertNotEqual(no_grad_accum_a, a) with mockenv_context(**self.dist_env_1_gpu): yes_grad_accum_trainer = get_regression_trainer( **kwargs, per_device_train_batch_size=4, gradient_accumulation_steps=4, ) yes_grad_accum_result = yes_grad_accum_trainer.train() yes_grad_accum_loss = yes_grad_accum_result.training_loss yes_grad_accum_a = yes_grad_accum_trainer.model.a.item() yes_grad_accum_b = yes_grad_accum_trainer.model.b.item() self.assertNotEqual(yes_grad_accum_a, a) # training with half the batch size but accumulation steps as 2 should give the same # weights, but sometimes get a slight difference still of 1e-6 self.assertAlmostEqual(no_grad_accum_a, yes_grad_accum_a, places=5) self.assertAlmostEqual(no_grad_accum_b, yes_grad_accum_b, places=5) # Relative difference. See the note above how to get identical loss on a small bs self.assertTrue((no_grad_accum_loss - yes_grad_accum_loss) / (no_grad_accum_loss + 1e-15) <= 1e-3) def check_saved_checkpoints_deepspeed(self, output_dir, freq, total, stage, dtype): # adapted from TrainerIntegrationCommon.check_saved_checkpoints file_list = [SAFE_WEIGHTS_NAME, "training_args.bin", "trainer_state.json", "config.json"] if stage == ZERO2: ds_file_list = ["mp_rank_00_model_states.pt"] elif stage == ZERO3: ds_file_list = ["zero_pp_rank_0_mp_rank_00_model_states.pt"] else: raise ValueError(f"unknown stage {stage}") if dtype == "bf16": ds_file_list.append("bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt") for step in range(freq, total, freq): checkpoint = os.path.join(output_dir, f"checkpoint-{step}") self.assertTrue(os.path.isdir(checkpoint), f"[{stage}] {checkpoint} dir is not found") # common files for filename in file_list: path = os.path.join(checkpoint, filename) self.assertTrue(os.path.isfile(path), f"[{stage}] {path} is not found") # ds files ds_path = os.path.join(checkpoint, f"global_step{step}") for filename in ds_file_list: # filename = os.path.join(path, filename) # print(filename) path = os.path.join(ds_path, filename) self.assertTrue(os.path.isfile(path), f"[{stage}] {path} is not found") @parameterized.expand(params, name_func=parameterized_custom_name_func) def test_save_checkpoints(self, stage, dtype): # adapted from TrainerIntegrationTest.test_save_checkpoints freq = 5 output_dir = self.get_auto_remove_tmp_dir() ds_config_dict = self.get_config_dict(stage) if dtype == FP16: ds_config_dict["fp16"]["initial_scale_power"] = 1 # force optimizer on the first step # XXX: if stage == ZERO3: ds_config_dict["zero_optimization"]["stage3_gather_16bit_weights_on_model_save"] = True # save checkpoints with mockenv_context(**self.dist_env_1_gpu): kwargs = { "output_dir": output_dir, "save_steps": freq, "deepspeed": ds_config_dict, } kwargs[dtype] = True trainer = get_regression_trainer(**kwargs) trainer.train() total = int(self.n_epochs * 64 / self.batch_size) self.check_saved_checkpoints_deepspeed(output_dir, freq, total, stage, dtype) @parameterized.expand(params, name_func=parameterized_custom_name_func) def test_can_resume_training_errors(self, stage, dtype): with mockenv_context(**self.dist_env_1_gpu): ds_config_dict = self.get_config_dict(stage) output_dir = self.get_auto_remove_tmp_dir() kwargs = {"output_dir": output_dir, "deepspeed": ds_config_dict} kwargs[dtype] = True trainer = get_regression_trainer(**kwargs) # 1. fail to find any checkpoint - due a fresh output_dir with self.assertRaises(Exception) as context: trainer.train(resume_from_checkpoint=True) self.assertTrue( "No valid checkpoint found in output directory" in str(context.exception), f"got exception: {context.exception}", ) # 2. fail to find a bogus checkpoint with self.assertRaises(Exception) as context: checkpoint = os.path.join(output_dir, "checkpoint-5") trainer.train(resume_from_checkpoint=f"{checkpoint}-bogus") self.assertTrue( "Can't find a valid checkpoint at" in str(context.exception), f"got exception: {context.exception}" ) @parameterized.expand(params_with_optims_and_schedulers, name_func=parameterized_custom_name_func) def test_can_resume_training_normal(self, stage, dtype, optim, scheduler): # adapted from TrainerIntegrationTest.test_can_resume_training # test normal resume for each stage separately, error-handling is tested in a different test # ToDo: Currently, hf_optim + hf_scheduler resumes with the correct states and # also has same losses for few steps but then slowly diverges. Need to figure it out. if optim == HF_OPTIM and scheduler == HF_SCHEDULER: return output_dir = self.get_auto_remove_tmp_dir("./xxx", after=False) ds_config_dict = self.get_config_dict(stage) if dtype == FP16: ds_config_dict["fp16"]["initial_scale_power"] = 1 # force optimizer on the first step # XXX: if stage == ZERO3: ds_config_dict["zero_optimization"]["stage3_gather_16bit_weights_on_model_save"] = True if optim == HF_OPTIM: del ds_config_dict["optimizer"] if scheduler == HF_SCHEDULER: del ds_config_dict["scheduler"] kwargs = { "output_dir": output_dir, "train_len": 128, "save_steps": 5, "learning_rate": 0.1, "deepspeed": ds_config_dict, } kwargs[dtype] = True with mockenv_context(**self.dist_env_1_gpu): trainer = get_regression_trainer(**kwargs) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(output_dir, "checkpoint-5") # Reinitialize trainer trainer = get_regression_trainer(**kwargs) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) # Now check with a later checkpoint that it also works when we span over one epoch checkpoint = os.path.join(output_dir, "checkpoint-15") # Reinitialize trainer and load model trainer = get_regression_trainer(**kwargs) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) # Finally, should be able to resume with the same trainer/same deepspeed engine instance # XXX: but currently this not possible due DS bug: https://github.com/microsoft/DeepSpeed/issues/1612 # trainer.train(resume_from_checkpoint=checkpoint) # a workaround needs to be used that re-creates the deepspeed engine @parameterized.expand(params, name_func=parameterized_custom_name_func) def test_load_state_dict_from_zero_checkpoint(self, stage, dtype): # test that we can load fp32 weights directly from the zero checkpoint into the current model output_dir = self.get_auto_remove_tmp_dir() # "./xxx", after=False, before=False) ds_config_dict = self.get_config_dict(stage) kwargs = { "output_dir": output_dir, "train_len": 4, "per_device_train_batch_size": 4, "num_train_epochs": 1, "save_strategy": "steps", "save_steps": 1, "learning_rate": 0.1, "deepspeed": ds_config_dict, } kwargs[dtype] = True with mockenv_context(**self.dist_env_1_gpu): trainer = get_regression_trainer(**kwargs) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint_dir = get_last_checkpoint(output_dir) model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) (a1, b1) = model.a.item(), model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) def test_config_object(self): # test that we can switch from zero2 to zero3 in the same process for example # test is_zero, etc. output_dir = self.get_auto_remove_tmp_dir() kwargs = {"output_dir": output_dir, "train_len": 8, "fp16": True} ds_config_zero3_dict = self.get_config_dict(ZERO3) ds_config_zero2_dict = self.get_config_dict(ZERO2) with mockenv_context(**self.dist_env_1_gpu): trainer = get_regression_trainer(deepspeed=ds_config_zero3_dict, **kwargs) self.assertTrue(is_deepspeed_zero3_enabled()) # test we can repeat that and with train this time trainer = get_regression_trainer(deepspeed=ds_config_zero3_dict, **kwargs) trainer.train() self.assertTrue(is_deepspeed_zero3_enabled()) # test zero3 is disabled trainer = get_regression_trainer(deepspeed=ds_config_zero2_dict, **kwargs) self.assertFalse(is_deepspeed_zero3_enabled()) # check config obj config = deepspeed_config() self.assertTrue(bool(config), "Deepspeed config should be accessible") # with accelerate integration below line is additionally required for this test to pass trainer.accelerator.state._reset_state() del trainer # now weakref should gc the global and we shouldn't get anything here config = deepspeed_config() self.assertFalse(is_deepspeed_zero3_enabled()) self.assertFalse(bool(config), "Deepspeed config should not be accessible") @parameterized.expand(params, name_func=parameterized_custom_name_func) def test_load_best_model(self, stage, dtype): # Test that forced deepspeed reinit doesn't break the model. the forced re-init after # loading the best model in Trainer is there to workaround this bug in Deepspeed # https://github.com/microsoft/DeepSpeed/issues/1612 # # The test is derived from a repro script submitted in this Issue: # https://github.com/huggingface/transformers/issues/17114 # # One additional feature of this test is that we use a non-AdamW optimizer to test that # deepspeed doesn't fallback to AdamW, which would prevent the optimizer states from loading # correctly from transformers import T5ForConditionalGeneration, T5Tokenizer, Trainer # noqa output_dir = self.get_auto_remove_tmp_dir() # "./xxx", after=False, before=False) ds_config_dict = self.get_config_dict(stage) del ds_config_dict["optimizer"] # will use HF Trainer optimizer del ds_config_dict["scheduler"] # will use HF Trainer scheduler ds_config_dict["zero_force_ds_cpu_optimizer"] = False # offload is not efficient w/o CPUAdam # must use this setting to get the reload path exercised ds_config_dict["zero_optimization"]["stage3_gather_16bit_weights_on_model_save"] = True with mockenv_context(**self.dist_env_1_gpu): args_dict = { "per_device_train_batch_size": 1, "per_device_eval_batch_size": 1, "gradient_accumulation_steps": 1, "learning_rate": 1e-4, "num_train_epochs": 1, "do_train": True, "do_eval": True, "optim": "adafactor", "evaluation_strategy": "steps", "eval_steps": 1, "save_strategy": "steps", "save_steps": 1, "load_best_model_at_end": True, "max_steps": 1, "deepspeed": ds_config_dict, "report_to": "none", } training_args = TrainingArguments(output_dir, **args_dict) tokenizer = T5Tokenizer.from_pretrained(T5_TINY) model = T5ForConditionalGeneration.from_pretrained(T5_TINY) def _add_eos_to_examples(example): example["input_text"] = f"question: {example['question']} context: {example['context']}" example["target_text"] = example["answers"]["text"][0] if len(example["answers"]["text"]) > 0 else "" return example def _convert_to_features(example_batch): input_encodings = tokenizer.batch_encode_plus( example_batch["input_text"], pad_to_max_length=True, max_length=512, truncation=True ) target_encodings = tokenizer.batch_encode_plus( example_batch["target_text"], pad_to_max_length=True, max_length=16, truncation=True ) encodings = { "input_ids": input_encodings["input_ids"], "attention_mask": input_encodings["attention_mask"], "labels": target_encodings["input_ids"], } return encodings def get_dataset(): data_file = str(self.tests_dir / "fixtures/tests_samples/SQUAD/sample.json") data_files = {"train": data_file, "validation": data_file} raw_datasets = datasets.load_dataset("json", data_files=data_files, field="data") train_dataset = raw_datasets["train"].map(_add_eos_to_examples).map(_convert_to_features, batched=True) valid_dataset = deepcopy(train_dataset) return train_dataset, valid_dataset train_dataset, eval_dataset = get_dataset() trainer = Trainer( model=model, tokenizer=tokenizer, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, ) trainer.train() # crash 1 was here trainer.evaluate() # crash 2 was here @slow @require_deepspeed @require_torch_accelerator class TestDeepSpeedWithLauncher(TestCasePlus): """This class is for testing via an external script - can do multiple gpus""" # Tests to devise # # # 1. predict_with_generate on multigpu - need to figure out how to give input sequences so that # the 2 gpus will generate prediction sequences that aren't of the same length - this is because # we had to code a special feature to sync the gpus when the predicted sequences aren't of the # same length. In general this will tested as a side-effect through a variety of other tests - # it'll simply hang trying to synchronize with other gpus if this problem is encountered. So as # long as we have a few full tests running on zero3 + predict_with_generate this should be # mostly covered. # # but there are 5 variations on beam search in `generate`- with identical code branched with `if # synced_gpus` # # 2. most tests should probably be run on both: zero2 and zero3 configs # @parameterized.expand(params, name_func=parameterized_custom_name_func) @require_torch_multi_accelerator def test_basic_distributed(self, stage, dtype): self.run_and_check(stage=stage, dtype=dtype, distributed=True) def test_do_eval_no_train(self): # testing only zero3 since zero2 makes no sense with inference self.run_and_check( stage=ZERO3, dtype=FP16, eval_steps=1, distributed=False, do_train=False, do_eval=True, ) @parameterized.expand(params, name_func=parameterized_custom_name_func) def test_fp32_non_distributed(self, stage, dtype): # real model needs too much GPU memory under stage2+fp32, so using tiny random model here - # therefore no quality checks, just basic completion checks are done self.run_and_check( stage=stage, dtype=dtype, model_name=T5_TINY, distributed=False, do_train=True, do_eval=True, quality_checks=False, fp32=True, ) @parameterized.expand(params, name_func=parameterized_custom_name_func) @require_torch_multi_accelerator def test_fp32_distributed(self, stage, dtype): # real model needs too much GPU memory under stage2+fp32, so using tiny random model here - # therefore no quality checks, just basic completion checks are done self.run_and_check( stage=stage, dtype=dtype, model_name=T5_TINY, distributed=True, do_train=True, do_eval=True, quality_checks=False, fp32=True, ) @parameterized.expand(params, name_func=parameterized_custom_name_func) def test_resume_train_not_from_ds_checkpoint(self, stage, dtype): # do normal training and then resume not from the deepspeed checkpoint but explicitly from # the saved model dir do_train = True do_eval = False kwargs = { "stage": stage, "dtype": dtype, "eval_steps": 1, "distributed": True, "do_train": do_train, "do_eval": do_eval, } # 1. normal training output_dir = self.run_and_check(**kwargs) # 2. now resume explicitly from the saved weights, by passing --model_name_or_path output_dir # - i.e. the same path the model was saved to in step 1 output_dir = self.run_trainer(**kwargs, model_name=output_dir) self.do_checks(output_dir, do_train=do_train, do_eval=do_eval) @parameterized.expand(["bf16", "fp16", "fp32"]) @require_torch_multi_accelerator def test_inference(self, dtype): if dtype == "bf16" and not is_torch_bf16_available_on_device(torch_device): self.skipTest("test requires bfloat16 hardware support") # this is just inference, so no optimizer should be loaded # it only works for z3 (makes no sense with z1-z2) fp32 = True if dtype == "fp32" else False self.run_and_check( stage=ZERO3, dtype=FP16, model_name=T5_TINY, distributed=True, do_train=False, do_eval=True, quality_checks=False, fp32=fp32, ) def do_checks(self, output_dir, do_train=True, do_eval=True, quality_checks=True): if do_train: train_metrics = load_json(os.path.join(output_dir, "train_results.json")) self.assertIn("train_samples_per_second", train_metrics) if quality_checks: self.assertGreater(train_metrics["train_samples_per_second"], 0.5) if do_eval: eval_metrics = load_json(os.path.join(output_dir, "eval_results.json")) self.assertIn("eval_bleu", eval_metrics) if quality_checks: self.assertGreater(eval_metrics["eval_bleu"], 1) # XXX: need to do better validation beyond just that the run was successful def run_and_check( self, stage, dtype, model_name: str = T5_SMALL, eval_steps: int = 10, distributed: bool = True, do_train: bool = True, do_eval: bool = True, quality_checks: bool = True, fp32: bool = False, extra_args_str: str = None, remove_args_str: str = None, ): # we are doing quality testing so using a small real model output_dir = self.run_trainer( stage=stage, dtype=dtype, model_name=model_name, eval_steps=eval_steps, num_train_epochs=1, do_train=do_train, do_eval=do_eval, distributed=distributed, fp32=fp32, extra_args_str=extra_args_str, remove_args_str=remove_args_str, ) self.do_checks(output_dir, do_train=do_train, do_eval=do_eval, quality_checks=quality_checks) return output_dir def run_trainer( self, stage: str, dtype: str, model_name: str, eval_steps: int = 10, num_train_epochs: int = 1, do_train: bool = False, do_eval: bool = True, distributed: bool = True, fp32: bool = False, extra_args_str: str = None, remove_args_str: str = None, ): max_len = 32 data_dir = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro" output_dir = self.get_auto_remove_tmp_dir() args = f""" --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --output_dir {output_dir} --overwrite_output_dir --max_source_length {max_len} --max_target_length {max_len} --val_max_target_length {max_len} --warmup_steps 8 --predict_with_generate --save_steps 0 --eval_steps {eval_steps} --group_by_length --label_smoothing_factor 0.1 --source_lang en --target_lang ro --report_to none """.split() args.extend(["--source_prefix", '"translate English to Romanian: "']) if not fp32: args.extend([f"--{dtype}"]) actions = 0 if do_train: actions += 1 args.extend( f""" --do_train --num_train_epochs {str(num_train_epochs)} --max_train_samples 16 --per_device_train_batch_size 2 --learning_rate 3e-3 """.split() ) if do_eval: actions += 1 args.extend( """ --do_eval --max_eval_samples 16 --per_device_eval_batch_size 2 """.split() ) assert actions > 0, "need at least do_train or do_eval for the test to run" if extra_args_str is not None: args.extend(extra_args_str.split()) # currently only works for bool args if remove_args_str is not None: remove_args = remove_args_str.split() args = [x for x in args if x not in remove_args] ds_args = f"--deepspeed {self.test_file_dir_str}/ds_config_{stage}.json".split() script = [f"{self.examples_dir_str}/pytorch/translation/run_translation.py"] launcher = get_launcher(distributed) cmd = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(cmd, env=self.get_env()) return output_dir @parameterized.expand(params, name_func=parameterized_custom_name_func) def test_clm(self, stage, dtype): # this test exercises model.resize_token_embeddings() which requires param gathering outside # of forward - it's not used by `run_translation.py`, but it is in `run_clm.py` data_dir = self.tests_dir / "fixtures" output_dir = self.get_auto_remove_tmp_dir() args = f""" --model_name_or_path {GPT2_TINY} --train_file {data_dir}/sample_text.txt --validation_file {data_dir}/sample_text.txt --output_dir {output_dir} --overwrite_output_dir --do_train --do_eval --max_train_samples 16 --max_eval_samples 16 --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --num_train_epochs 1 --warmup_steps 8 --block_size 64 --report_to none """.split() args.extend([f"--{dtype}"]) ds_args = f"--deepspeed {self.test_file_dir_str}/ds_config_{stage}.json".split() script = [f"{self.examples_dir_str}/pytorch/language-modeling/run_clm.py"] launcher = get_launcher(distributed=True) cmd = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(cmd, env=self.get_env()) def test_clm_from_config_zero3_fp16(self): # this test exercises AutoModel.from_config(config) - to ensure zero.Init is called data_dir = self.tests_dir / "fixtures" output_dir = self.get_auto_remove_tmp_dir() args = f""" --model_type gpt2 --tokenizer_name {GPT2_TINY} --train_file {data_dir}/sample_text.txt --validation_file {data_dir}/sample_text.txt --output_dir {output_dir} --overwrite_output_dir --do_train --max_train_samples 4 --per_device_train_batch_size 2 --num_train_epochs 1 --warmup_steps 8 --block_size 8 --fp16 --report_to none """.split() ds_args = f"--deepspeed {self.test_file_dir_str}/ds_config_zero3.json".split() script = [f"{self.examples_dir_str}/pytorch/language-modeling/run_clm.py"] launcher = get_launcher(distributed=True) cmd = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die with CaptureStderr() as cs: execute_subprocess_async(cmd, env=self.get_env()) self.assertIn("Detected DeepSpeed ZeRO-3", cs.err)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/deepspeed/test_model_zoo.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import os import subprocess from os.path import dirname from parameterized import parameterized from tests.trainer.test_trainer import TrainerIntegrationCommon # noqa from transformers import is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_gpu_count, get_tests_dir, require_deepspeed, require_torch_gpu, slow, ) from transformers.trainer_utils import set_seed if is_torch_available(): from tests.trainer.test_trainer import ( # noqa RegressionModelConfig, RegressionPreTrainedModel, get_regression_trainer, ) set_seed(42) FIXTURE_DIRECTORY = get_tests_dir("fixtures") ROOT_DIRECTORY = os.path.join(dirname(get_tests_dir())) DS_TESTS_DIRECTORY = dirname(os.path.abspath(__file__)) # default torch.distributed port DEFAULT_MASTER_PORT = "10999" T5_SMALL = "t5-small" # *** Working Models *** ALBERT_TINY = "hf-internal-testing/tiny-albert" BART_TINY = "sshleifer/bart-tiny-random" BERT_TINY = "hf-internal-testing/tiny-bert" BIGBIRD_PEGASUS_TINY = "hf-internal-testing/tiny-random-bigbird_pegasus" BIG_BIRD_TINY = "hf-internal-testing/tiny-random-big_bird" BLENDERBOT_TINY = "hf-internal-testing/tiny-random-blenderbot" BLOOM_TINY = "bigscience/bigscience-small-testing" DEBERTA_TINY = "hf-internal-testing/tiny-random-deberta" DEBERTA_V2_TINY = "hf-internal-testing/tiny-random-deberta-v2" DISTILBERT_TINY = "sshleifer/tiny-distilbert-base-cased" ELECTRA_TINY = "hf-internal-testing/tiny-electra" FLAUBERT_TINY = "hf-internal-testing/tiny-random-flaubert" FSMT_TINY = "stas/tiny-wmt19-en-de" FUNNEL_TINY = "hf-internal-testing/tiny-random-funnel" GPT2_TINY = "sshleifer/tiny-gpt2" GPTJ_TINY = "hf-internal-testing/tiny-random-gptj" GPT_NEO_TINY = "hf-internal-testing/tiny-random-gpt_neo" LAYOUTLM_TINY = "hf-internal-testing/tiny-layoutlm" LED_TINY = "hf-internal-testing/tiny-random-led" LONGFORMER_TINY = "hf-internal-testing/tiny-random-longformer" M2M_100_TINY = "stas/tiny-m2m_100" # hf tiny model is unsuitable MARIAN_TINY = "sshleifer/tiny-marian-en-de" MBART_TINY = "sshleifer/tiny-mbart" MOBILEBERT_TINY = "hf-internal-testing/tiny-random-mobilebert" MPNET_TINY = "hf-internal-testing/tiny-random-mpnet" PEGASUS_TINY = "stas/pegasus-cnn_dailymail-tiny-random" PROPHETNET_TINY = "hf-internal-testing/tiny-random-prophetnet" ROBERTA_TINY = "sshleifer/tiny-distilroberta-base" SQUEEZEBERT_TINY = "hf-internal-testing/tiny-random-squeezebert" T5_TINY = "patrickvonplaten/t5-tiny-random" T5_V1_TINY = "hf-internal-testing/tiny-random-t5-v1.1" VIT_TINY = "hf-internal-testing/tiny-random-vit" XLM_ROBERTA_TINY = "hf-internal-testing/tiny-xlm-roberta" XLNET_TINY = "sshleifer/tiny-xlnet-base-cased" # *** To Fix *** # *** tiny model issues *** # missing model files: MT5_TINY = "hf-internal-testing/tiny-random-mt5" CAMEMBERT_TINY = "hf-internal-testing/tiny-random-camembert" OPENAI_GPT_TINY = "hf-internal-testing/tiny-random-openai-gpt" # missing tokenizer files CONVBERT_TINY = "hf-internal-testing/tiny-random-convbert" LAYOUTLMV2_TINY = "hf-internal-testing/tiny-random-layoutlmv2" HUBERT_TINY = "hf-internal-testing/tiny-random-hubert" # issues with tokenizer CTRL_TINY = "hf-internal-testing/tiny-random-ctrl" TRANSFO_XL_TINY = "hf-internal-testing/tiny-random-transfo-xl" # same as ctrl # other issues with tiny models IBERT_TINY = "hf-internal-testing/tiny-random-ibert" # multiple issues with either mlm/qa/clas REFORMER_TINY = "hf-internal-testing/tiny-random-reformer" # multiple issues with either mlm/qa/clas # *** Lacking official examples to test with *** # or not working with examples DPR_TINY = "hf-internal-testing/tiny-random-dpr" # - "dpr" examples/research_projects/rag-end2end-retriever/ RAG_TINY = "hf-internal-testing/tiny-random-rag" # - "rag" research_projects LUKE_TINY = "" # - "luke" Entities classes - no plan to make such example LXMERT_TINY = "hf-internal-testing/tiny-random-lxmert" # - "lxmert" doesn't work with run_qa.py CLIP_TINY = "hf-internal-testing/tiny-random-clip" # - "clip" nothing under pytorch examples - XXX: Suraj is working on adding some - check by end of Sep SPEECH_TO_TEXT_TINY = "hf-internal-testing/tiny-random-speech_to_text" # - "speech_to_text", nothing under pytorch examples # *** Reactive mode *** # models with low usage, unstable API, things about to change - do nothing about the following until someone runs into a problem TAPAS_TINY = "hf-internal-testing/tiny-random-tapas" # additional notes on tapas # 1. "Table must be of type pd.DataFrame" failure # TODO: new models to add: # def get_launcher(distributed=False): # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) num_gpus = min(2, get_gpu_count()) if distributed else 1 master_port = os.environ.get("DS_TEST_PORT", DEFAULT_MASTER_PORT) return f"deepspeed --num_nodes 1 --num_gpus {num_gpus} --master_port {master_port}".split() def make_task_cmds(): data_dir_samples = f"{FIXTURE_DIRECTORY}/tests_samples" data_dir_wmt = f"{data_dir_samples}/wmt_en_ro" data_dir_xsum = f"{data_dir_samples}/xsum" args_main = """ --do_train --max_train_samples 4 --per_device_train_batch_size 2 --num_train_epochs 1 --fp16 --report_to none --overwrite_output_dir """.split() # try to cover as many models as possible once (it's enough to run on one task per model) # but need a tiny model for each # # should have "{model_type.upper()}_TINY" corresponding vars defined, e.g., T5_TINY, etc. tasks2models = { "trans": [ "bart", "fsmt", "m2m_100", "marian", "mbart", "t5", "t5_v1", # "mt5", missing model files ], "sum": [ "pegasus", ], "clm": [ "big_bird", "bigbird_pegasus", "blenderbot", "bloom", "gpt2", "gpt_neo", "gptj", "xlm-roberta", "prophetnet", # "camembert", missing model files ], "mlm": [ "albert", "deberta", "deberta-v2", "distilbert", "electra", "flaubert", "funnel", "layoutlm", # "reformer", # multiple issues with either mlm/qa/clas ], "qa": [ "led", "longformer", "mobilebert", "mpnet", "roberta", "squeezebert", # "convbert", # missing tokenizer files # "layoutlmv2", missing model files ], "clas": [ "bert", "xlnet", # "hubert", # missing tokenizer files # "ibert", # multiple issues with either mlm/qa/clas # "transfo-xl", # tokenizer issues as ctrl # "ctrl", # tokenizer issues # "openai-gpt", missing model files # "tapas", multiple issues ], "img_clas": [ "vit", ], } scripts_dir = f"{ROOT_DIRECTORY}/examples/pytorch" tasks = { "trans": f""" {scripts_dir}/translation/run_translation.py --train_file {data_dir_wmt}/train.json --source_lang en --target_lang ro """, "sum": f""" {scripts_dir}/summarization/run_summarization.py --train_file {data_dir_xsum}/sample.json --max_source_length 12 --max_target_length 12 --lang en """, "clm": f""" {scripts_dir}/language-modeling/run_clm.py --train_file {FIXTURE_DIRECTORY}/sample_text.txt --block_size 8 """, "mlm": f""" {scripts_dir}/language-modeling/run_mlm.py --train_file {FIXTURE_DIRECTORY}/sample_text.txt """, "qa": f""" {scripts_dir}/question-answering/run_qa.py --train_file {data_dir_samples}/SQUAD/sample.json """, "clas": f""" {scripts_dir}/text-classification/run_glue.py --train_file {data_dir_samples}/MRPC/train.csv --max_seq_length 12 --task_name MRPC """, "img_clas": f""" {scripts_dir}/image-classification/run_image_classification.py --dataset_name hf-internal-testing/cats_vs_dogs_sample --remove_unused_columns False --max_steps 10 --image_processor_name {DS_TESTS_DIRECTORY}/vit_feature_extractor.json """, } launcher = get_launcher(distributed=True) cmds = {} for task, args in tasks.items(): args = args.split() for model in tasks2models[task]: model_name = globals()[f"{model.upper().replace('-', '_')}_TINY"] args_model = f"--model_name_or_path {model_name}".split() cmds[f"{task}_{model}"] = launcher + args + args_model + args_main # # generation special case # if task == "gen": # launcher = f"deepspeed --num_nodes 1 --num_gpus 1".split() # args_model += f"--model_type {model}".split() # cmds[f"{task}_{model}"] = launcher + args + args_model # else: return cmds task_cmds = make_task_cmds() ZERO2 = "zero2" ZERO3 = "zero3" stages = [ZERO2, ZERO3] # future preparation: # for now test just fp16, as these tests are quite slow # FP16 = "fp16" # BF16 = "bf16" # # dtypes = [FP16] # so just hardcoding --fp16 for now # if is_torch_bf16_gpu_available(): # dtypes += [BF16] def parameterized_custom_name_func(func, param_num, param): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param param_based_name = parameterized.to_safe_name("_".join(str(x) for x in param.args)) return f"{func.__name__}_{param_based_name}" # Cartesian-product of zero stages with models to test params = list(itertools.product(stages, task_cmds.keys())) @slow @require_deepspeed @require_torch_gpu class TestDeepSpeedModelZoo(TestCasePlus): """This class is for testing via an external script - can do multiple gpus""" def get_task_cmd(self, task, stage): # return a ready to run train cmd if task not in task_cmds: raise ValueError(f"don't know of task {task}, have {task_cmds.keys()}") cmd = task_cmds[task] args_ds = f"--deepspeed {self.test_file_dir_str}/ds_config_{stage}.json".split() output_dir = self.get_auto_remove_tmp_dir() args_out = f"--output_dir {output_dir}".split() cmd += args_ds + args_out return cmd, output_dir @parameterized.expand(params, name_func=parameterized_custom_name_func) def test_zero_to_fp32(self, stage, task): # testing the ability to do a run followed by recovery of full fp32 weights cmd, output_dir = self.get_task_cmd(task, stage) # 1. generate the checkpoint cmd += "--save_steps 1".split() # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] + cmd)); die execute_subprocess_async(cmd, env=self.get_env()) # 2. test that the fp32 weights get reconsolidated chkpt_dir = f"{output_dir}/checkpoint-1" recovered_model_path = f"{chkpt_dir}/out.bin" cmd = f"{chkpt_dir}/zero_to_fp32.py {chkpt_dir} {recovered_model_path}" # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die subprocess.check_call(cmd, shell=True) assert os.path.exists(recovered_model_path), f"{recovered_model_path} was not found" # possibly could also test that the resulting saved model is usable but given that we use # random models we won't know if it's any good
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/deepspeed/ds_config_zero2.json
{ "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "bf16": { "enabled": "auto" }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "betas": "auto", "eps": "auto", "weight_decay": "auto" } }, "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto" } }, "zero_optimization": { "stage": 2, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "allgather_partitions": true, "allgather_bucket_size": 2e8, "overlap_comm": true, "reduce_scatter": true, "reduce_bucket_size": 2e8, "contiguous_gradients": true }, "gradient_accumulation_steps": "auto", "gradient_clipping": "auto", "steps_per_print": 2000, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false }
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/bettertransformer/test_integration.py
# coding=utf-8 # Copyright 2023 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from transformers import AutoModelForSeq2SeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class BetterTransformerIntegrationTest(unittest.TestCase): # refer to the full test suite in Optimum library: # https://github.com/huggingface/optimum/tree/main/tests/bettertransformer def test_transform_and_reverse(self): r""" Classic tests to simply check if the conversion has been successfull. """ model_id = "hf-internal-testing/tiny-random-t5" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForSeq2SeqLM.from_pretrained(model_id) inp = tokenizer("This is me", return_tensors="pt") model = model.to_bettertransformer() self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules())) output = model.generate(**inp) model = model.reverse_bettertransformer() self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules())) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_reloaded = AutoModelForSeq2SeqLM.from_pretrained(tmpdirname) self.assertFalse( any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules()) ) output_from_pretrained = model_reloaded.generate(**inp) self.assertTrue(torch.allclose(output, output_from_pretrained)) def test_error_save_pretrained(self): r""" The save_pretrained method should raise a ValueError if the model is in BetterTransformer mode. All should be good if the model is reversed. """ model_id = "hf-internal-testing/tiny-random-t5" model = AutoModelForSeq2SeqLM.from_pretrained(model_id) model = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(ValueError): model.save_pretrained(tmpdirname) model = model.reverse_bettertransformer() model.save_pretrained(tmpdirname)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_video_classification.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class VideoClassificationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def get_test_pipeline(self, model, tokenizer, processor): example_video_filepath = hf_hub_download( repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" ) video_classifier = VideoClassificationPipeline(model=model, image_processor=processor, top_k=2) examples = [ example_video_filepath, "https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4", ] return video_classifier, examples def run_pipeline_test(self, video_classifier, examples): for example in examples: outputs = video_classifier(example) self.assertEqual( outputs, [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], ) @require_torch def test_small_model_pt(self): small_model = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification" small_feature_extractor = VideoMAEFeatureExtractor( size={"shortest_edge": 10}, crop_size={"height": 10, "width": 10} ) video_classifier = pipeline( "video-classification", model=small_model, feature_extractor=small_feature_extractor, frame_sampling_rate=4 ) video_file_path = hf_hub_download(repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset") outputs = video_classifier(video_file_path, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}], ) outputs = video_classifier( [ video_file_path, video_file_path, ], top_k=2, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}], [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}], ], ) @require_tf def test_small_model_tf(self): pass
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_zero_shot_audio_classification.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class ZeroShotAudioClassificationPipelineTests(unittest.TestCase): # Deactivating auto tests since we don't have a good MODEL_FOR_XX mapping, # and only CLAP would be there for now. # model_mapping = {CLAPConfig: CLAPModel} @require_torch def test_small_model_pt(self): audio_classifier = pipeline( task="zero-shot-audio-classification", model="hf-internal-testing/tiny-clap-htsat-unfused" ) dataset = load_dataset("ashraq/esc50") audio = dataset["train"]["audio"][-1]["array"] output = audio_classifier(audio, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"]) self.assertEqual( nested_simplify(output), [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}], ) @unittest.skip("No models are available in TF") def test_small_model_tf(self): pass @slow @require_torch def test_large_model_pt(self): audio_classifier = pipeline( task="zero-shot-audio-classification", model="laion/clap-htsat-unfused", ) # This is an audio of a dog dataset = load_dataset("ashraq/esc50") audio = dataset["train"]["audio"][-1]["array"] output = audio_classifier(audio, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"]) self.assertEqual( nested_simplify(output), [ {"score": 0.999, "label": "Sound of a dog"}, {"score": 0.001, "label": "Sound of vaccum cleaner"}, ], ) output = audio_classifier([audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"]) self.assertEqual( nested_simplify(output), [ [ {"score": 0.999, "label": "Sound of a dog"}, {"score": 0.001, "label": "Sound of vaccum cleaner"}, ], ] * 5, ) output = audio_classifier( [audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"], batch_size=5 ) self.assertEqual( nested_simplify(output), [ [ {"score": 0.999, "label": "Sound of a dog"}, {"score": 0.001, "label": "Sound of vaccum cleaner"}, ], ] * 5, ) @unittest.skip("No models are available in TF") def test_large_model_tf(self): pass
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_summarization.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, SummarizationPipeline, TFPreTrainedModel, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch, slow, torch_device from transformers.tokenization_utils import TruncationStrategy from .test_pipelines_common import ANY @is_pipeline_test class SummarizationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def get_test_pipeline(self, model, tokenizer, processor): summarizer = SummarizationPipeline(model=model, tokenizer=tokenizer) return summarizer, ["(CNN)The Palestinian Authority officially became", "Some other text"] def run_pipeline_test(self, summarizer, _): model = summarizer.model outputs = summarizer("(CNN)The Palestinian Authority officially became") self.assertEqual(outputs, [{"summary_text": ANY(str)}]) outputs = summarizer( "(CNN)The Palestinian Authority officially became ", num_beams=2, min_length=2, max_length=5, ) self.assertEqual(outputs, [{"summary_text": ANY(str)}]) # Some models (Switch Transformers, LED, T5, LongT5, etc) can handle long sequences. model_can_handle_longer_seq = [ "SwitchTransformersConfig", "T5Config", "LongT5Config", "LEDConfig", "PegasusXConfig", "FSMTConfig", "M2M100Config", "ProphetNetConfig", # positional embeddings up to a fixed maximum size (otherwise clamping the values) ] if model.config.__class__.__name__ not in model_can_handle_longer_seq: # Too long and exception is expected. # For TF models, if the weights are initialized in GPU context, we won't get expected index error from # the embedding layer. if not ( isinstance(model, TFPreTrainedModel) and len(summarizer.model.trainable_weights) > 0 and "GPU" in summarizer.model.trainable_weights[0].device ): with self.assertRaises(Exception): outputs = summarizer("This " * 1000) outputs = summarizer("This " * 1000, truncation=TruncationStrategy.ONLY_FIRST) @require_torch def test_small_model_pt(self): summarizer = pipeline(task="summarization", model="sshleifer/tiny-mbart", framework="pt") outputs = summarizer("This is a small test") self.assertEqual( outputs, [ { "summary_text": "เข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไป" } ], ) @require_tf def test_small_model_tf(self): summarizer = pipeline(task="summarization", model="sshleifer/tiny-mbart", framework="tf") outputs = summarizer("This is a small test") self.assertEqual( outputs, [ { "summary_text": "เข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไป" } ], ) @require_torch @slow def test_integration_torch_summarization(self): summarizer = pipeline(task="summarization", device=torch_device) cnn_article = ( " (CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The" " formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based." " The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted its" ' jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East' ' Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the' " situation in Palestinian territories, paving the way for possible war crimes investigations against" " Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and" " the United States, neither of which is an ICC member, opposed the Palestinians' efforts to join the" " body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony, said it was a" ' move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the' ' world is also a step closer to ending a long era of impunity and injustice," he said, according to an' ' ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge' " Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the" ' Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine' " acquires all the rights as well as responsibilities that come with being a State Party to the Statute." ' These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights' ' Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should' " immediately end their pressure, and countries that support universal acceptance of the court's treaty" ' should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the' " group. \"What's objectionable is the attempts to undermine international justice, not Palestine's" ' decision to join a treaty to which over 100 countries around the world are members." In January, when' " the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an" ' outrage, saying the court was overstepping its boundaries. The United States also said it "strongly"' " disagreed with the court's decision. \"As we have said repeatedly, we do not believe that Palestine is a" ' state and therefore we do not believe that it is eligible to join the ICC," the State Department said in' ' a statement. It urged the warring sides to resolve their differences through direct negotiations. "We' ' will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace,"' " it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the' " court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou" ' Bensouda said her office would "conduct its analysis in full independence and impartiality." The war' " between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry" " will include alleged war crimes committed since June. The International Criminal Court was set up in" " 2002 to prosecute genocide, crimes against humanity and war crimes. CNN's Vasco Cotovio, Kareem Khadder" " and Faith Karimi contributed to this report." ) expected_cnn_summary = ( " The Palestinian Authority becomes the 123rd member of the International Criminal Court . The move gives" " the court jurisdiction over alleged crimes in Palestinian territories . Israel and the United States" " opposed the Palestinians' efforts to join the court . Rights group Human Rights Watch welcomes the move," " says governments seeking to penalize Palestine should end pressure ." ) result = summarizer(cnn_article) self.assertEqual(result[0]["summary_text"], expected_cnn_summary)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_object_detection.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_vision @require_timm @require_torch class ObjectDetectionPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_OBJECT_DETECTION_MAPPING def get_test_pipeline(self, model, tokenizer, processor): object_detector = ObjectDetectionPipeline(model=model, image_processor=processor) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def run_pipeline_test(self, object_detector, examples): outputs = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png", threshold=0.0) self.assertGreater(len(outputs), 0) for detected_object in outputs: self.assertEqual( detected_object, { "score": ANY(float), "label": ANY(str), "box": {"xmin": ANY(int), "ymin": ANY(int), "xmax": ANY(int), "ymax": ANY(int)}, }, ) import datasets # we use revision="refs/pr/1" until the PR is merged # https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1 dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1") batch = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["image"], # LA dataset[1]["image"], # L dataset[2]["image"], ] batch_outputs = object_detector(batch, threshold=0.0) self.assertEqual(len(batch), len(batch_outputs)) for outputs in batch_outputs: self.assertGreater(len(outputs), 0) for detected_object in outputs: self.assertEqual( detected_object, { "score": ANY(float), "label": ANY(str), "box": {"xmin": ANY(int), "ymin": ANY(int), "xmax": ANY(int), "ymax": ANY(int)}, }, ) @require_tf @unittest.skip("Object detection not implemented in TF") def test_small_model_tf(self): pass @require_torch def test_small_model_pt(self): model_id = "hf-internal-testing/tiny-detr-mobilenetsv3" model = AutoModelForObjectDetection.from_pretrained(model_id) feature_extractor = AutoFeatureExtractor.from_pretrained(model_id) object_detector = ObjectDetectionPipeline(model=model, feature_extractor=feature_extractor) outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=0.0) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], ) outputs = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], threshold=0.0, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], ], ) @require_torch @slow def test_large_model_pt(self): model_id = "facebook/detr-resnet-50" model = AutoModelForObjectDetection.from_pretrained(model_id) feature_extractor = AutoFeatureExtractor.from_pretrained(model_id) object_detector = ObjectDetectionPipeline(model=model, feature_extractor=feature_extractor) outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ) outputs = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ], ) @require_torch @slow def test_integration_torch_object_detection(self): model_id = "facebook/detr-resnet-50" object_detector = pipeline("object-detection", model=model_id) outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ) outputs = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ], ) @require_torch @slow def test_threshold(self): threshold = 0.9985 model_id = "facebook/detr-resnet-50" object_detector = pipeline("object-detection", model=model_id) outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=threshold) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ) @require_torch @require_pytesseract @slow def test_layoutlm(self): model_id = "Narsil/layoutlmv3-finetuned-funsd" threshold = 0.9993 object_detector = pipeline("object-detection", model=model_id, threshold=threshold) outputs = object_detector( "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" ) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}}, {"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}}, ], )
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_zero_shot.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. _TO_SKIP = {"LayoutLMv2Config", "LayoutLMv3Config"} @is_pipeline_test class ZeroShotClassificationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: model_mapping = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: tf_model_mapping = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def get_test_pipeline(self, model, tokenizer, processor): classifier = ZeroShotClassificationPipeline( model=model, tokenizer=tokenizer, candidate_labels=["polics", "health"] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def run_pipeline_test(self, classifier, _): outputs = classifier("Who are you voting for in 2020?", candidate_labels="politics") self.assertEqual(outputs, {"sequence": ANY(str), "labels": [ANY(str)], "scores": [ANY(float)]}) # No kwarg outputs = classifier("Who are you voting for in 2020?", ["politics"]) self.assertEqual(outputs, {"sequence": ANY(str), "labels": [ANY(str)], "scores": [ANY(float)]}) outputs = classifier("Who are you voting for in 2020?", candidate_labels=["politics"]) self.assertEqual(outputs, {"sequence": ANY(str), "labels": [ANY(str)], "scores": [ANY(float)]}) outputs = classifier("Who are you voting for in 2020?", candidate_labels="politics, public health") self.assertEqual( outputs, {"sequence": ANY(str), "labels": [ANY(str), ANY(str)], "scores": [ANY(float), ANY(float)]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0) outputs = classifier("Who are you voting for in 2020?", candidate_labels=["politics", "public health"]) self.assertEqual( outputs, {"sequence": ANY(str), "labels": [ANY(str), ANY(str)], "scores": [ANY(float), ANY(float)]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0) outputs = classifier( "Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="This text is about {}" ) self.assertEqual(outputs, {"sequence": ANY(str), "labels": [ANY(str)], "scores": [ANY(float)]}) # https://github.com/huggingface/transformers/issues/13846 outputs = classifier(["I am happy"], ["positive", "negative"]) self.assertEqual( outputs, [ {"sequence": ANY(str), "labels": [ANY(str), ANY(str)], "scores": [ANY(float), ANY(float)]} for i in range(1) ], ) outputs = classifier(["I am happy", "I am sad"], ["positive", "negative"]) self.assertEqual( outputs, [ {"sequence": ANY(str), "labels": [ANY(str), ANY(str)], "scores": [ANY(float), ANY(float)]} for i in range(2) ], ) with self.assertRaises(ValueError): classifier("", candidate_labels="politics") with self.assertRaises(TypeError): classifier(None, candidate_labels="politics") with self.assertRaises(ValueError): classifier("Who are you voting for in 2020?", candidate_labels="") with self.assertRaises(TypeError): classifier("Who are you voting for in 2020?", candidate_labels=None) with self.assertRaises(ValueError): classifier( "Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="Not formatting template", ) with self.assertRaises(AttributeError): classifier( "Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template=None, ) self.run_entailment_id(classifier) def run_entailment_id(self, zero_shot_classifier: Pipeline): config = zero_shot_classifier.model.config original_label2id = config.label2id original_entailment = zero_shot_classifier.entailment_id config.label2id = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2} self.assertEqual(zero_shot_classifier.entailment_id, -1) config.label2id = {"entailment": 0, "neutral": 1, "contradiction": 2} self.assertEqual(zero_shot_classifier.entailment_id, 0) config.label2id = {"ENTAIL": 0, "NON-ENTAIL": 1} self.assertEqual(zero_shot_classifier.entailment_id, 0) config.label2id = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0} self.assertEqual(zero_shot_classifier.entailment_id, 2) zero_shot_classifier.model.config.label2id = original_label2id self.assertEqual(original_entailment, zero_shot_classifier.entailment_id) @require_torch def test_truncation(self): zero_shot_classifier = pipeline( "zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( "Who are you voting for in 2020?" * 100, candidate_labels=["politics", "public health", "science"] ) @require_torch def test_small_model_pt(self): zero_shot_classifier = pipeline( "zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", ) outputs = zero_shot_classifier( "Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(outputs), { "sequence": "Who are you voting for in 2020?", "labels": ["science", "public health", "politics"], "scores": [0.333, 0.333, 0.333], }, ) @require_tf def test_small_model_tf(self): zero_shot_classifier = pipeline( "zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="tf", ) outputs = zero_shot_classifier( "Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(outputs), { "sequence": "Who are you voting for in 2020?", "labels": ["science", "public health", "politics"], "scores": [0.333, 0.333, 0.333], }, ) @slow @require_torch def test_large_model_pt(self): zero_shot_classifier = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="pt") outputs = zero_shot_classifier( "Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(outputs), { "sequence": "Who are you voting for in 2020?", "labels": ["politics", "public health", "science"], "scores": [0.976, 0.015, 0.009], }, ) outputs = zero_shot_classifier( "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks" " in an encoder-decoder configuration. The best performing models also connect the encoder and decoder" " through an attention mechanism. We propose a new simple network architecture, the Transformer, based" " solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two" " machine translation tasks show these models to be superior in quality while being more parallelizable" " and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014" " English-to-German translation task, improving over the existing best results, including ensembles by" " over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new" " single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small" " fraction of the training costs of the best models from the literature. We show that the Transformer" " generalizes well to other tasks by applying it successfully to English constituency parsing both with" " large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=True, ) self.assertEqual( nested_simplify(outputs), { "sequence": ( "The dominant sequence transduction models are based on complex recurrent or convolutional neural" " networks in an encoder-decoder configuration. The best performing models also connect the" " encoder and decoder through an attention mechanism. We propose a new simple network" " architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence" " and convolutions entirely. Experiments on two machine translation tasks show these models to be" " superior in quality while being more parallelizable and requiring significantly less time to" " train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task," " improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014" " English-to-French translation task, our model establishes a new single-model state-of-the-art" " BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training" " costs of the best models from the literature. We show that the Transformer generalizes well to" " other tasks by applying it successfully to English constituency parsing both with large and" " limited training data." ), "labels": ["translation", "machine learning", "vision", "statistics"], "scores": [0.817, 0.713, 0.018, 0.018], }, ) @slow @require_tf def test_large_model_tf(self): zero_shot_classifier = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="tf") outputs = zero_shot_classifier( "Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(outputs), { "sequence": "Who are you voting for in 2020?", "labels": ["politics", "public health", "science"], "scores": [0.976, 0.015, 0.009], }, ) outputs = zero_shot_classifier( "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks" " in an encoder-decoder configuration. The best performing models also connect the encoder and decoder" " through an attention mechanism. We propose a new simple network architecture, the Transformer, based" " solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two" " machine translation tasks show these models to be superior in quality while being more parallelizable" " and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014" " English-to-German translation task, improving over the existing best results, including ensembles by" " over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new" " single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small" " fraction of the training costs of the best models from the literature. We show that the Transformer" " generalizes well to other tasks by applying it successfully to English constituency parsing both with" " large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=True, ) self.assertEqual( nested_simplify(outputs), { "sequence": ( "The dominant sequence transduction models are based on complex recurrent or convolutional neural" " networks in an encoder-decoder configuration. The best performing models also connect the" " encoder and decoder through an attention mechanism. We propose a new simple network" " architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence" " and convolutions entirely. Experiments on two machine translation tasks show these models to be" " superior in quality while being more parallelizable and requiring significantly less time to" " train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task," " improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014" " English-to-French translation task, our model establishes a new single-model state-of-the-art" " BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training" " costs of the best models from the literature. We show that the Transformer generalizes well to" " other tasks by applying it successfully to English constituency parsing both with large and" " limited training data." ), "labels": ["translation", "machine learning", "vision", "statistics"], "scores": [0.817, 0.713, 0.018, 0.018], }, )
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_audio_classification.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING from transformers.pipelines import AudioClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torchaudio, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class AudioClassificationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING def get_test_pipeline(self, model, tokenizer, processor): audio_classifier = AudioClassificationPipeline(model=model, feature_extractor=processor) # test with a raw waveform audio = np.zeros((34000,)) audio2 = np.zeros((14000,)) return audio_classifier, [audio2, audio] def run_pipeline_test(self, audio_classifier, examples): audio2, audio = examples output = audio_classifier(audio) # by default a model is initialized with num_labels=2 self.assertEqual( output, [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], ) output = audio_classifier(audio, top_k=1) self.assertEqual( output, [ {"score": ANY(float), "label": ANY(str)}, ], ) self.run_torchaudio(audio_classifier) @require_torchaudio def run_torchaudio(self, audio_classifier): import datasets # test with a local file dataset = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") audio = dataset[0]["audio"]["array"] output = audio_classifier(audio) self.assertEqual( output, [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], ) @require_torch def test_small_model_pt(self): model = "anton-l/wav2vec2-random-tiny-classifier" audio_classifier = pipeline("audio-classification", model=model) audio = np.ones((8000,)) output = audio_classifier(audio, top_k=4) EXPECTED_OUTPUT = [ {"score": 0.0842, "label": "no"}, {"score": 0.0838, "label": "up"}, {"score": 0.0837, "label": "go"}, {"score": 0.0834, "label": "right"}, ] EXPECTED_OUTPUT_PT_2 = [ {"score": 0.0845, "label": "stop"}, {"score": 0.0844, "label": "on"}, {"score": 0.0841, "label": "right"}, {"score": 0.0834, "label": "left"}, ] self.assertIn(nested_simplify(output, decimals=4), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2]) audio_dict = {"array": np.ones((8000,)), "sampling_rate": audio_classifier.feature_extractor.sampling_rate} output = audio_classifier(audio_dict, top_k=4) self.assertIn(nested_simplify(output, decimals=4), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2]) @require_torch @slow def test_large_model_pt(self): import datasets model = "superb/wav2vec2-base-superb-ks" audio_classifier = pipeline("audio-classification", model=model) dataset = datasets.load_dataset("anton-l/superb_dummy", "ks", split="test") audio = np.array(dataset[3]["speech"], dtype=np.float32) output = audio_classifier(audio, top_k=4) self.assertEqual( nested_simplify(output, decimals=3), [ {"score": 0.981, "label": "go"}, {"score": 0.007, "label": "up"}, {"score": 0.006, "label": "_unknown_"}, {"score": 0.001, "label": "down"}, ], ) @require_tf @unittest.skip("Audio classification is not implemented for TF") def test_small_model_tf(self): pass
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_text_generation.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_accelerator, require_torch_gpu, require_torch_or_tf, torch_device, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class TextGenerationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_CAUSAL_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def test_small_model_pt(self): text_generator = pipeline(task="text-generation", model="sshleifer/tiny-ctrl", framework="pt") # Using `do_sample=False` to force deterministic output outputs = text_generator("This is a test", do_sample=False) self.assertEqual( outputs, [ { "generated_text": ( "This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope." " oscope. FiliFili@@" ) } ], ) outputs = text_generator(["This is a test", "This is a second test"]) self.assertEqual( outputs, [ [ { "generated_text": ( "This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope." " oscope. FiliFili@@" ) } ], [ { "generated_text": ( "This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy" " oscope. oscope. FiliFili@@" ) } ], ], ) outputs = text_generator("This is a test", do_sample=True, num_return_sequences=2, return_tensors=True) self.assertEqual( outputs, [ {"generated_token_ids": ANY(list)}, {"generated_token_ids": ANY(list)}, ], ) ## -- test tokenizer_kwargs test_str = "testing tokenizer kwargs. using truncation must result in a different generation." output_str, output_str_with_truncation = ( text_generator(test_str, do_sample=False, return_full_text=False)[0]["generated_text"], text_generator( test_str, do_sample=False, return_full_text=False, truncation=True, max_length=3, )[0]["generated_text"], ) assert output_str != output_str_with_truncation # results must be different because one hd truncation # -- what is the point of this test? padding is hardcoded False in the pipeline anyway text_generator.tokenizer.pad_token_id = text_generator.model.config.eos_token_id text_generator.tokenizer.pad_token = "<pad>" outputs = text_generator( ["This is a test", "This is a second test"], do_sample=True, num_return_sequences=2, batch_size=2, return_tensors=True, ) self.assertEqual( outputs, [ [ {"generated_token_ids": ANY(list)}, {"generated_token_ids": ANY(list)}, ], [ {"generated_token_ids": ANY(list)}, {"generated_token_ids": ANY(list)}, ], ], ) @require_tf def test_small_model_tf(self): text_generator = pipeline(task="text-generation", model="sshleifer/tiny-ctrl", framework="tf") # Using `do_sample=False` to force deterministic output outputs = text_generator("This is a test", do_sample=False) self.assertEqual( outputs, [ { "generated_text": ( "This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵" " please," ) } ], ) outputs = text_generator(["This is a test", "This is a second test"], do_sample=False) self.assertEqual( outputs, [ [ { "generated_text": ( "This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵" " please," ) } ], [ { "generated_text": ( "This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes" " Cannes 閲閲Cannes Cannes Cannes 攵 please," ) } ], ], ) def get_test_pipeline(self, model, tokenizer, processor): text_generator = TextGenerationPipeline(model=model, tokenizer=tokenizer) return text_generator, ["This is a test", "Another test"] def test_stop_sequence_stopping_criteria(self): prompt = """Hello I believe in""" text_generator = pipeline("text-generation", model="hf-internal-testing/tiny-random-gpt2") output = text_generator(prompt) self.assertEqual( output, [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}], ) output = text_generator(prompt, stop_sequence=" fe") self.assertEqual(output, [{"generated_text": "Hello I believe in fe"}]) def run_pipeline_test(self, text_generator, _): model = text_generator.model tokenizer = text_generator.tokenizer outputs = text_generator("This is a test") self.assertEqual(outputs, [{"generated_text": ANY(str)}]) self.assertTrue(outputs[0]["generated_text"].startswith("This is a test")) outputs = text_generator("This is a test", return_full_text=False) self.assertEqual(outputs, [{"generated_text": ANY(str)}]) self.assertNotIn("This is a test", outputs[0]["generated_text"]) text_generator = pipeline(task="text-generation", model=model, tokenizer=tokenizer, return_full_text=False) outputs = text_generator("This is a test") self.assertEqual(outputs, [{"generated_text": ANY(str)}]) self.assertNotIn("This is a test", outputs[0]["generated_text"]) outputs = text_generator("This is a test", return_full_text=True) self.assertEqual(outputs, [{"generated_text": ANY(str)}]) self.assertTrue(outputs[0]["generated_text"].startswith("This is a test")) outputs = text_generator(["This is great !", "Something else"], num_return_sequences=2, do_sample=True) self.assertEqual( outputs, [ [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], ], ) if text_generator.tokenizer.pad_token is not None: outputs = text_generator( ["This is great !", "Something else"], num_return_sequences=2, batch_size=2, do_sample=True ) self.assertEqual( outputs, [ [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], ], ) with self.assertRaises(ValueError): outputs = text_generator("test", return_full_text=True, return_text=True) with self.assertRaises(ValueError): outputs = text_generator("test", return_full_text=True, return_tensors=True) with self.assertRaises(ValueError): outputs = text_generator("test", return_text=True, return_tensors=True) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): outputs = text_generator("") self.assertEqual(outputs, [{"generated_text": ANY(str)}]) else: with self.assertRaises((ValueError, AssertionError)): outputs = text_generator("") if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS = [ "RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM", "FuyuForCausalLM", ] if ( tokenizer.model_max_length < 10000 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError)): text_generator("This is a test" * 500, max_new_tokens=20) outputs = text_generator("This is a test" * 500, handle_long_generation="hole", max_new_tokens=20) # Hole strategy cannot work with self.assertRaises(ValueError): text_generator( "This is a test" * 500, handle_long_generation="hole", max_new_tokens=tokenizer.model_max_length + 10, ) @require_torch @require_accelerate @require_torch_gpu def test_small_model_pt_bloom_accelerate(self): import torch # Classic `model_kwargs` pipe = pipeline( model="hf-internal-testing/tiny-random-bloom", model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloat16}, ) self.assertEqual(pipe.model.lm_head.weight.dtype, torch.bfloat16) out = pipe("This is a test") self.assertEqual( out, [ { "generated_text": ( "This is a test test test test test test test test test test test test test test test test" " test" ) } ], ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) pipe = pipeline(model="hf-internal-testing/tiny-random-bloom", device_map="auto", torch_dtype=torch.bfloat16) self.assertEqual(pipe.model.lm_head.weight.dtype, torch.bfloat16) out = pipe("This is a test") self.assertEqual( out, [ { "generated_text": ( "This is a test test test test test test test test test test test test test test test test" " test" ) } ], ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 pipe = pipeline(model="hf-internal-testing/tiny-random-bloom", device_map="auto") self.assertEqual(pipe.model.lm_head.weight.dtype, torch.float32) out = pipe("This is a test") self.assertEqual( out, [ { "generated_text": ( "This is a test test test test test test test test test test test test test test test test" " test" ) } ], ) @require_torch @require_torch_accelerator def test_small_model_fp16(self): import torch pipe = pipeline( model="hf-internal-testing/tiny-random-bloom", device=torch_device, torch_dtype=torch.float16, ) pipe("This is a test") @require_torch @require_accelerate @require_torch_accelerator def test_pipeline_accelerate_top_p(self): import torch pipe = pipeline(model="hf-internal-testing/tiny-random-bloom", device_map="auto", torch_dtype=torch.float16) pipe("This is a test", do_sample=True, top_p=0.5) def test_pipeline_length_setting_warning(self): prompt = """Hello world""" text_generator = pipeline("text-generation", model="hf-internal-testing/tiny-random-gpt2") if text_generator.model.framework == "tf": logger = logging.get_logger("transformers.generation.tf_utils") else: logger = logging.get_logger("transformers.generation.utils") logger_msg = "Both `max_new_tokens`" # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(logger) as cl: _ = text_generator(prompt, max_length=10, max_new_tokens=1) self.assertIn(logger_msg, cl.out) # The user only sets one -> no warning with CaptureLogger(logger) as cl: _ = text_generator(prompt, max_new_tokens=1) self.assertNotIn(logger_msg, cl.out) with CaptureLogger(logger) as cl: _ = text_generator(prompt, max_length=10) self.assertNotIn(logger_msg, cl.out)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_image_to_image.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_IMAGE_TO_IMAGE_MAPPING, AutoImageProcessor, AutoModelForImageToImage, ImageToImagePipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_torch @require_vision class ImageToImagePipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_IMAGE_TO_IMAGE_MAPPING examples = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", ] @require_torch @require_vision @slow def test_pipeline(self): model_id = "caidas/swin2SR-classical-sr-x2-64" upscaler = pipeline("image-to-image", model=model_id) upscaled_list = upscaler(self.examples) self.assertEqual(len(upscaled_list), len(self.examples)) for output in upscaled_list: self.assertIsInstance(output, Image.Image) self.assertEqual(upscaled_list[0].size, (1296, 976)) self.assertEqual(upscaled_list[1].size, (1296, 976)) @require_torch @require_vision @slow def test_pipeline_model_processor(self): model_id = "caidas/swin2SR-classical-sr-x2-64" model = AutoModelForImageToImage.from_pretrained(model_id) image_processor = AutoImageProcessor.from_pretrained(model_id) upscaler = ImageToImagePipeline(model=model, image_processor=image_processor) upscaled_list = upscaler(self.examples) self.assertEqual(len(upscaled_list), len(self.examples)) for output in upscaled_list: self.assertIsInstance(output, Image.Image) self.assertEqual(upscaled_list[0].size, (1296, 976)) self.assertEqual(upscaled_list[1].size, (1296, 976))
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_feature_extraction.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers import ( FEATURE_EXTRACTOR_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_MAPPING, TF_MODEL_MAPPING, FeatureExtractionPipeline, LxmertConfig, is_tf_available, is_torch_available, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @is_pipeline_test class FeatureExtractionPipelineTests(unittest.TestCase): model_mapping = MODEL_MAPPING tf_model_mapping = TF_MODEL_MAPPING @require_torch def test_small_model_pt(self): feature_extractor = pipeline( task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) outputs = feature_extractor("This is a test") self.assertEqual( nested_simplify(outputs), [[[2.287, 1.234, 0.042, 1.53, 1.306, 0.879, -0.526, -1.71, -1.276, 0.756, -0.775, -1.048, -0.25, -0.595, -0.137, -0.598, 2.022, -0.812, 0.284, -0.488, -0.391, -0.403, -0.525, -0.061, -0.228, 1.086, 0.378, -0.14, 0.599, -0.087, -2.259, -0.098], [1.676, 0.232, -1.508, -0.145, 1.798, -1.388, 1.331, -0.37, -0.939, 0.043, 0.06, -0.414, -1.408, 0.24, 0.622, -0.55, -0.569, 1.873, -0.706, 1.924, -0.254, 1.927, -0.423, 0.152, -0.952, 0.509, -0.496, -0.968, 0.093, -1.049, -0.65, 0.312], [0.207, -0.775, -1.822, 0.321, -0.71, -0.201, 0.3, 1.146, -0.233, -0.753, -0.305, 1.309, -1.47, -0.21, 1.802, -1.555, -1.175, 1.323, -0.303, 0.722, -0.076, 0.103, -1.406, 1.931, 0.091, 0.237, 1.172, 1.607, 0.253, -0.9, -1.068, 0.438], [0.615, 1.077, 0.171, -0.175, 1.3, 0.901, -0.653, -0.138, 0.341, -0.654, -0.184, -0.441, -0.424, 0.356, -0.075, 0.26, -1.023, 0.814, 0.524, -0.904, -0.204, -0.623, 1.234, -1.03, 2.594, 0.56, 1.831, -0.199, -1.508, -0.492, -1.687, -2.165], [0.129, 0.008, -1.279, -0.412, -0.004, 1.663, 0.196, 0.104, 0.123, 0.119, 0.635, 1.757, 2.334, -0.799, -1.626, -1.26, 0.595, -0.316, -1.399, 0.232, 0.264, 1.386, -1.171, -0.256, -0.256, -1.944, 1.168, -0.368, -0.714, -0.51, 0.454, 1.148], [-0.32, 0.29, -1.309, -0.177, 0.453, 0.636, -0.024, 0.509, 0.931, -1.754, -1.575, 0.786, 0.046, -1.165, -1.416, 1.373, 1.293, -0.285, -1.541, -1.186, -0.106, -0.994, 2.001, 0.972, -0.02, 1.654, -0.236, 0.643, 1.02, 0.572, -0.914, -0.154], [0.7, -0.937, 0.441, 0.25, 0.78, -0.022, 0.282, -0.095, 1.558, -0.336, 1.706, 0.884, 1.28, 0.198, -0.796, 1.218, -1.769, 1.197, -0.342, -0.177, -0.645, 1.364, 0.008, -0.597, -0.484, -2.772, -0.696, -0.632, -0.34, -1.527, -0.562, 0.862], [2.504, 0.831, -1.271, -0.033, 0.298, -0.735, 1.339, 1.74, 0.233, -1.424, -0.819, -0.761, 0.291, 0.853, -0.092, -0.885, 0.164, 1.025, 0.907, 0.749, -1.515, -0.545, -1.365, 0.271, 0.034, -2.005, 0.031, 0.244, 0.621, 0.176, 0.336, -1.196], [-0.711, 0.591, -1.001, -0.946, 0.784, -1.66, 1.545, 0.799, -0.857, 1.148, 0.213, -0.285, 0.464, -0.139, 0.79, -1.663, -1.121, 0.575, -0.178, -0.508, 1.565, -0.242, -0.346, 1.024, -1.135, -0.158, -2.101, 0.275, 2.009, -0.425, 0.716, 0.981], [0.912, -1.186, -0.846, -0.421, -1.315, -0.827, 0.309, 0.533, 1.029, -2.343, 1.513, -1.238, 1.487, -0.849, 0.896, -0.927, -0.459, 0.159, 0.177, 0.873, 0.935, 1.433, -0.485, 0.737, 1.327, -0.338, 1.608, -0.47, -0.445, -1.118, -0.213, -0.446], [-0.434, -1.362, -1.098, -1.068, 1.507, 0.003, 0.413, -0.395, 0.897, -0.237, 1.405, -0.344, 1.693, 0.677, 0.097, -0.257, -0.602, 1.026, -1.229, 0.855, -0.713, 1.014, 0.443, 0.238, 0.425, -2.184, 1.933, -1.157, -1.132, -0.597, -0.785, 0.967], [0.58, -0.971, 0.789, -0.468, -0.576, 1.779, 1.747, 1.715, -1.939, 0.125, 0.656, -0.042, -1.024, -1.767, 0.107, -0.408, -0.866, -1.774, 1.248, 0.939, -0.033, 1.523, 1.168, -0.744, 0.209, -0.168, -0.316, 0.207, -0.432, 0.047, -0.646, -0.664], [-0.185, -0.613, -1.695, 1.602, -0.32, -0.277, 0.967, 0.728, -0.965, -0.234, 1.069, -0.63, -1.631, 0.711, 0.426, 1.298, -0.191, -0.467, -0.771, 0.971, -0.118, -1.577, -2.064, -0.055, -0.59, 0.642, -0.997, 1.251, 0.538, 1.367, 0.106, 1.704]]]) # fmt: skip @require_tf def test_small_model_tf(self): feature_extractor = pipeline( task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert", framework="tf" ) outputs = feature_extractor("This is a test") self.assertEqual( nested_simplify(outputs), [[[2.287, 1.234, 0.042, 1.53, 1.306, 0.879, -0.526, -1.71, -1.276, 0.756, -0.775, -1.048, -0.25, -0.595, -0.137, -0.598, 2.022, -0.812, 0.284, -0.488, -0.391, -0.403, -0.525, -0.061, -0.228, 1.086, 0.378, -0.14, 0.599, -0.087, -2.259, -0.098], [1.676, 0.232, -1.508, -0.145, 1.798, -1.388, 1.331, -0.37, -0.939, 0.043, 0.06, -0.414, -1.408, 0.24, 0.622, -0.55, -0.569, 1.873, -0.706, 1.924, -0.254, 1.927, -0.423, 0.152, -0.952, 0.509, -0.496, -0.968, 0.093, -1.049, -0.65, 0.312], [0.207, -0.775, -1.822, 0.321, -0.71, -0.201, 0.3, 1.146, -0.233, -0.753, -0.305, 1.309, -1.47, -0.21, 1.802, -1.555, -1.175, 1.323, -0.303, 0.722, -0.076, 0.103, -1.406, 1.931, 0.091, 0.237, 1.172, 1.607, 0.253, -0.9, -1.068, 0.438], [0.615, 1.077, 0.171, -0.175, 1.3, 0.901, -0.653, -0.138, 0.341, -0.654, -0.184, -0.441, -0.424, 0.356, -0.075, 0.26, -1.023, 0.814, 0.524, -0.904, -0.204, -0.623, 1.234, -1.03, 2.594, 0.56, 1.831, -0.199, -1.508, -0.492, -1.687, -2.165], [0.129, 0.008, -1.279, -0.412, -0.004, 1.663, 0.196, 0.104, 0.123, 0.119, 0.635, 1.757, 2.334, -0.799, -1.626, -1.26, 0.595, -0.316, -1.399, 0.232, 0.264, 1.386, -1.171, -0.256, -0.256, -1.944, 1.168, -0.368, -0.714, -0.51, 0.454, 1.148], [-0.32, 0.29, -1.309, -0.177, 0.453, 0.636, -0.024, 0.509, 0.931, -1.754, -1.575, 0.786, 0.046, -1.165, -1.416, 1.373, 1.293, -0.285, -1.541, -1.186, -0.106, -0.994, 2.001, 0.972, -0.02, 1.654, -0.236, 0.643, 1.02, 0.572, -0.914, -0.154], [0.7, -0.937, 0.441, 0.25, 0.78, -0.022, 0.282, -0.095, 1.558, -0.336, 1.706, 0.884, 1.28, 0.198, -0.796, 1.218, -1.769, 1.197, -0.342, -0.177, -0.645, 1.364, 0.008, -0.597, -0.484, -2.772, -0.696, -0.632, -0.34, -1.527, -0.562, 0.862], [2.504, 0.831, -1.271, -0.033, 0.298, -0.735, 1.339, 1.74, 0.233, -1.424, -0.819, -0.761, 0.291, 0.853, -0.092, -0.885, 0.164, 1.025, 0.907, 0.749, -1.515, -0.545, -1.365, 0.271, 0.034, -2.005, 0.031, 0.244, 0.621, 0.176, 0.336, -1.196], [-0.711, 0.591, -1.001, -0.946, 0.784, -1.66, 1.545, 0.799, -0.857, 1.148, 0.213, -0.285, 0.464, -0.139, 0.79, -1.663, -1.121, 0.575, -0.178, -0.508, 1.565, -0.242, -0.346, 1.024, -1.135, -0.158, -2.101, 0.275, 2.009, -0.425, 0.716, 0.981], [0.912, -1.186, -0.846, -0.421, -1.315, -0.827, 0.309, 0.533, 1.029, -2.343, 1.513, -1.238, 1.487, -0.849, 0.896, -0.927, -0.459, 0.159, 0.177, 0.873, 0.935, 1.433, -0.485, 0.737, 1.327, -0.338, 1.608, -0.47, -0.445, -1.118, -0.213, -0.446], [-0.434, -1.362, -1.098, -1.068, 1.507, 0.003, 0.413, -0.395, 0.897, -0.237, 1.405, -0.344, 1.693, 0.677, 0.097, -0.257, -0.602, 1.026, -1.229, 0.855, -0.713, 1.014, 0.443, 0.238, 0.425, -2.184, 1.933, -1.157, -1.132, -0.597, -0.785, 0.967], [0.58, -0.971, 0.789, -0.468, -0.576, 1.779, 1.747, 1.715, -1.939, 0.125, 0.656, -0.042, -1.024, -1.767, 0.107, -0.408, -0.866, -1.774, 1.248, 0.939, -0.033, 1.523, 1.168, -0.744, 0.209, -0.168, -0.316, 0.207, -0.432, 0.047, -0.646, -0.664], [-0.185, -0.613, -1.695, 1.602, -0.32, -0.277, 0.967, 0.728, -0.965, -0.234, 1.069, -0.63, -1.631, 0.711, 0.426, 1.298, -0.191, -0.467, -0.771, 0.971, -0.118, -1.577, -2.064, -0.055, -0.59, 0.642, -0.997, 1.251, 0.538, 1.367, 0.106, 1.704]]]) # fmt: skip @require_torch def test_tokenization_small_model_pt(self): feature_extractor = pipeline( task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) # test with empty parameters outputs = feature_extractor("This is a test") self.assertEqual( nested_simplify(outputs), [[[2.287, 1.234, 0.042, 1.53, 1.306, 0.879, -0.526, -1.71, -1.276, 0.756, -0.775, -1.048, -0.25, -0.595, -0.137, -0.598, 2.022, -0.812, 0.284, -0.488, -0.391, -0.403, -0.525, -0.061, -0.228, 1.086, 0.378, -0.14, 0.599, -0.087, -2.259, -0.098], [1.676, 0.232, -1.508, -0.145, 1.798, -1.388, 1.331, -0.37, -0.939, 0.043, 0.06, -0.414, -1.408, 0.24, 0.622, -0.55, -0.569, 1.873, -0.706, 1.924, -0.254, 1.927, -0.423, 0.152, -0.952, 0.509, -0.496, -0.968, 0.093, -1.049, -0.65, 0.312], [0.207, -0.775, -1.822, 0.321, -0.71, -0.201, 0.3, 1.146, -0.233, -0.753, -0.305, 1.309, -1.47, -0.21, 1.802, -1.555, -1.175, 1.323, -0.303, 0.722, -0.076, 0.103, -1.406, 1.931, 0.091, 0.237, 1.172, 1.607, 0.253, -0.9, -1.068, 0.438], [0.615, 1.077, 0.171, -0.175, 1.3, 0.901, -0.653, -0.138, 0.341, -0.654, -0.184, -0.441, -0.424, 0.356, -0.075, 0.26, -1.023, 0.814, 0.524, -0.904, -0.204, -0.623, 1.234, -1.03, 2.594, 0.56, 1.831, -0.199, -1.508, -0.492, -1.687, -2.165], [0.129, 0.008, -1.279, -0.412, -0.004, 1.663, 0.196, 0.104, 0.123, 0.119, 0.635, 1.757, 2.334, -0.799, -1.626, -1.26, 0.595, -0.316, -1.399, 0.232, 0.264, 1.386, -1.171, -0.256, -0.256, -1.944, 1.168, -0.368, -0.714, -0.51, 0.454, 1.148], [-0.32, 0.29, -1.309, -0.177, 0.453, 0.636, -0.024, 0.509, 0.931, -1.754, -1.575, 0.786, 0.046, -1.165, -1.416, 1.373, 1.293, -0.285, -1.541, -1.186, -0.106, -0.994, 2.001, 0.972, -0.02, 1.654, -0.236, 0.643, 1.02, 0.572, -0.914, -0.154], [0.7, -0.937, 0.441, 0.25, 0.78, -0.022, 0.282, -0.095, 1.558, -0.336, 1.706, 0.884, 1.28, 0.198, -0.796, 1.218, -1.769, 1.197, -0.342, -0.177, -0.645, 1.364, 0.008, -0.597, -0.484, -2.772, -0.696, -0.632, -0.34, -1.527, -0.562, 0.862], [2.504, 0.831, -1.271, -0.033, 0.298, -0.735, 1.339, 1.74, 0.233, -1.424, -0.819, -0.761, 0.291, 0.853, -0.092, -0.885, 0.164, 1.025, 0.907, 0.749, -1.515, -0.545, -1.365, 0.271, 0.034, -2.005, 0.031, 0.244, 0.621, 0.176, 0.336, -1.196], [-0.711, 0.591, -1.001, -0.946, 0.784, -1.66, 1.545, 0.799, -0.857, 1.148, 0.213, -0.285, 0.464, -0.139, 0.79, -1.663, -1.121, 0.575, -0.178, -0.508, 1.565, -0.242, -0.346, 1.024, -1.135, -0.158, -2.101, 0.275, 2.009, -0.425, 0.716, 0.981], [0.912, -1.186, -0.846, -0.421, -1.315, -0.827, 0.309, 0.533, 1.029, -2.343, 1.513, -1.238, 1.487, -0.849, 0.896, -0.927, -0.459, 0.159, 0.177, 0.873, 0.935, 1.433, -0.485, 0.737, 1.327, -0.338, 1.608, -0.47, -0.445, -1.118, -0.213, -0.446], [-0.434, -1.362, -1.098, -1.068, 1.507, 0.003, 0.413, -0.395, 0.897, -0.237, 1.405, -0.344, 1.693, 0.677, 0.097, -0.257, -0.602, 1.026, -1.229, 0.855, -0.713, 1.014, 0.443, 0.238, 0.425, -2.184, 1.933, -1.157, -1.132, -0.597, -0.785, 0.967], [0.58, -0.971, 0.789, -0.468, -0.576, 1.779, 1.747, 1.715, -1.939, 0.125, 0.656, -0.042, -1.024, -1.767, 0.107, -0.408, -0.866, -1.774, 1.248, 0.939, -0.033, 1.523, 1.168, -0.744, 0.209, -0.168, -0.316, 0.207, -0.432, 0.047, -0.646, -0.664], [-0.185, -0.613, -1.695, 1.602, -0.32, -0.277, 0.967, 0.728, -0.965, -0.234, 1.069, -0.63, -1.631, 0.711, 0.426, 1.298, -0.191, -0.467, -0.771, 0.971, -0.118, -1.577, -2.064, -0.055, -0.59, 0.642, -0.997, 1.251, 0.538, 1.367, 0.106, 1.704]]]) # fmt: skip # test with various tokenizer parameters tokenize_kwargs = {"max_length": 3} outputs = feature_extractor("This is a test", tokenize_kwargs=tokenize_kwargs) self.assertEqual(np.squeeze(outputs).shape, (3, 32)) tokenize_kwargs = {"truncation": True, "padding": True, "max_length": 4} outputs = feature_extractor( ["This is a test", "This", "This is", "This is a", "This is a test test test test"], tokenize_kwargs=tokenize_kwargs, ) self.assertEqual(np.squeeze(outputs).shape, (5, 4, 32)) tokenize_kwargs = {"padding": True, "max_length": 4} outputs = feature_extractor( ["This is a test", "This", "This is", "This is a", "This is a test test test test"], truncation=True, tokenize_kwargs=tokenize_kwargs, ) self.assertEqual(np.squeeze(outputs).shape, (5, 4, 32)) # raise value error if truncation parameter given for two places tokenize_kwargs = {"truncation": True} with self.assertRaises(ValueError): _ = feature_extractor( ["This is a test", "This", "This is", "This is a", "This is a test test test test"], truncation=True, tokenize_kwargs=tokenize_kwargs, ) @require_tf def test_tokenization_small_model_tf(self): feature_extractor = pipeline( task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert", framework="tf" ) # test with empty parameters outputs = feature_extractor("This is a test") self.assertEqual( nested_simplify(outputs), [[[2.287, 1.234, 0.042, 1.53, 1.306, 0.879, -0.526, -1.71, -1.276, 0.756, -0.775, -1.048, -0.25, -0.595, -0.137, -0.598, 2.022, -0.812, 0.284, -0.488, -0.391, -0.403, -0.525, -0.061, -0.228, 1.086, 0.378, -0.14, 0.599, -0.087, -2.259, -0.098], [1.676, 0.232, -1.508, -0.145, 1.798, -1.388, 1.331, -0.37, -0.939, 0.043, 0.06, -0.414, -1.408, 0.24, 0.622, -0.55, -0.569, 1.873, -0.706, 1.924, -0.254, 1.927, -0.423, 0.152, -0.952, 0.509, -0.496, -0.968, 0.093, -1.049, -0.65, 0.312], [0.207, -0.775, -1.822, 0.321, -0.71, -0.201, 0.3, 1.146, -0.233, -0.753, -0.305, 1.309, -1.47, -0.21, 1.802, -1.555, -1.175, 1.323, -0.303, 0.722, -0.076, 0.103, -1.406, 1.931, 0.091, 0.237, 1.172, 1.607, 0.253, -0.9, -1.068, 0.438], [0.615, 1.077, 0.171, -0.175, 1.3, 0.901, -0.653, -0.138, 0.341, -0.654, -0.184, -0.441, -0.424, 0.356, -0.075, 0.26, -1.023, 0.814, 0.524, -0.904, -0.204, -0.623, 1.234, -1.03, 2.594, 0.56, 1.831, -0.199, -1.508, -0.492, -1.687, -2.165], [0.129, 0.008, -1.279, -0.412, -0.004, 1.663, 0.196, 0.104, 0.123, 0.119, 0.635, 1.757, 2.334, -0.799, -1.626, -1.26, 0.595, -0.316, -1.399, 0.232, 0.264, 1.386, -1.171, -0.256, -0.256, -1.944, 1.168, -0.368, -0.714, -0.51, 0.454, 1.148], [-0.32, 0.29, -1.309, -0.177, 0.453, 0.636, -0.024, 0.509, 0.931, -1.754, -1.575, 0.786, 0.046, -1.165, -1.416, 1.373, 1.293, -0.285, -1.541, -1.186, -0.106, -0.994, 2.001, 0.972, -0.02, 1.654, -0.236, 0.643, 1.02, 0.572, -0.914, -0.154], [0.7, -0.937, 0.441, 0.25, 0.78, -0.022, 0.282, -0.095, 1.558, -0.336, 1.706, 0.884, 1.28, 0.198, -0.796, 1.218, -1.769, 1.197, -0.342, -0.177, -0.645, 1.364, 0.008, -0.597, -0.484, -2.772, -0.696, -0.632, -0.34, -1.527, -0.562, 0.862], [2.504, 0.831, -1.271, -0.033, 0.298, -0.735, 1.339, 1.74, 0.233, -1.424, -0.819, -0.761, 0.291, 0.853, -0.092, -0.885, 0.164, 1.025, 0.907, 0.749, -1.515, -0.545, -1.365, 0.271, 0.034, -2.005, 0.031, 0.244, 0.621, 0.176, 0.336, -1.196], [-0.711, 0.591, -1.001, -0.946, 0.784, -1.66, 1.545, 0.799, -0.857, 1.148, 0.213, -0.285, 0.464, -0.139, 0.79, -1.663, -1.121, 0.575, -0.178, -0.508, 1.565, -0.242, -0.346, 1.024, -1.135, -0.158, -2.101, 0.275, 2.009, -0.425, 0.716, 0.981], [0.912, -1.186, -0.846, -0.421, -1.315, -0.827, 0.309, 0.533, 1.029, -2.343, 1.513, -1.238, 1.487, -0.849, 0.896, -0.927, -0.459, 0.159, 0.177, 0.873, 0.935, 1.433, -0.485, 0.737, 1.327, -0.338, 1.608, -0.47, -0.445, -1.118, -0.213, -0.446], [-0.434, -1.362, -1.098, -1.068, 1.507, 0.003, 0.413, -0.395, 0.897, -0.237, 1.405, -0.344, 1.693, 0.677, 0.097, -0.257, -0.602, 1.026, -1.229, 0.855, -0.713, 1.014, 0.443, 0.238, 0.425, -2.184, 1.933, -1.157, -1.132, -0.597, -0.785, 0.967], [0.58, -0.971, 0.789, -0.468, -0.576, 1.779, 1.747, 1.715, -1.939, 0.125, 0.656, -0.042, -1.024, -1.767, 0.107, -0.408, -0.866, -1.774, 1.248, 0.939, -0.033, 1.523, 1.168, -0.744, 0.209, -0.168, -0.316, 0.207, -0.432, 0.047, -0.646, -0.664], [-0.185, -0.613, -1.695, 1.602, -0.32, -0.277, 0.967, 0.728, -0.965, -0.234, 1.069, -0.63, -1.631, 0.711, 0.426, 1.298, -0.191, -0.467, -0.771, 0.971, -0.118, -1.577, -2.064, -0.055, -0.59, 0.642, -0.997, 1.251, 0.538, 1.367, 0.106, 1.704]]]) # fmt: skip # test with various tokenizer parameters tokenize_kwargs = {"max_length": 3} outputs = feature_extractor("This is a test", tokenize_kwargs=tokenize_kwargs) self.assertEqual(np.squeeze(outputs).shape, (3, 32)) tokenize_kwargs = {"truncation": True, "padding": True, "max_length": 4} outputs = feature_extractor( ["This is a test", "This", "This is", "This is a", "This is a test test test test"], tokenize_kwargs=tokenize_kwargs, ) self.assertEqual(np.squeeze(outputs).shape, (5, 4, 32)) tokenize_kwargs = {"padding": True, "max_length": 4} outputs = feature_extractor( ["This is a test", "This", "This is", "This is a", "This is a test test test test"], truncation=True, tokenize_kwargs=tokenize_kwargs, ) self.assertEqual(np.squeeze(outputs).shape, (5, 4, 32)) # raise value error if truncation parameter given for two places tokenize_kwargs = {"truncation": True} with self.assertRaises(ValueError): _ = feature_extractor( ["This is a test", "This", "This is", "This is a", "This is a test test test test"], truncation=True, tokenize_kwargs=tokenize_kwargs, ) @require_torch def test_return_tensors_pt(self): feature_extractor = pipeline( task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) outputs = feature_extractor("This is a test", return_tensors=True) self.assertTrue(torch.is_tensor(outputs)) @require_tf def test_return_tensors_tf(self): feature_extractor = pipeline( task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert", framework="tf" ) outputs = feature_extractor("This is a test", return_tensors=True) self.assertTrue(tf.is_tensor(outputs)) def get_shape(self, input_, shape=None): if shape is None: shape = [] if isinstance(input_, list): subshapes = [self.get_shape(in_, shape) for in_ in input_] if all(s == 0 for s in subshapes): shape.append(len(input_)) else: subshape = subshapes[0] shape = [len(input_), *subshape] elif isinstance(input_, float): return 0 else: raise ValueError("We expect lists of floats, nothing else") return shape def get_test_pipeline(self, model, tokenizer, processor): if tokenizer is None: self.skipTest("No tokenizer") return elif ( type(model.config) in FEATURE_EXTRACTOR_MAPPING or isinstance(model.config, LxmertConfig) or type(model.config) in IMAGE_PROCESSOR_MAPPING ): self.skipTest("This is a bimodal model, we need to find a more consistent way to switch on those models.") return elif model.config.is_encoder_decoder: self.skipTest( """encoder_decoder models are trickier for this pipeline. Do we want encoder + decoder inputs to get some featues? Do we want encoder only features ? For now ignore those. """ ) return feature_extractor = FeatureExtractionPipeline(model=model, tokenizer=tokenizer, feature_extractor=processor) return feature_extractor, ["This is a test", "This is another test"] def run_pipeline_test(self, feature_extractor, examples): outputs = feature_extractor("This is a test") shape = self.get_shape(outputs) self.assertEqual(shape[0], 1) # If we send too small input # there's a bug within FunnelModel (output with shape [1, 4, 2, 1] doesn't match the broadcast shape [1, 4, 2, 2]) outputs = feature_extractor(["This is a test", "Another longer test"]) shape = self.get_shape(outputs) self.assertEqual(shape[0], 2) outputs = feature_extractor("This is a test" * 100, truncation=True) shape = self.get_shape(outputs) self.assertEqual(shape[0], 1)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_table_question_answering.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, AutoModelForTableQuestionAnswering, AutoTokenizer, TableQuestionAnsweringPipeline, TFAutoModelForTableQuestionAnswering, is_torch_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, require_pandas, require_tensorflow_probability, require_tf, require_torch, slow, ) if is_torch_available(): from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_12 else: is_torch_greater_or_equal_than_1_12 = False @is_pipeline_test class TQAPipelineTests(unittest.TestCase): # Putting it there for consistency, but TQA do not have fast tokenizer # which are needed to generate automatic tests model_mapping = MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING @require_tensorflow_probability @require_pandas @require_tf @require_torch def test_small_model_tf(self): model_id = "lysandre/tiny-tapas-random-wtq" model = TFAutoModelForTableQuestionAnswering.from_pretrained(model_id, from_pt=True) tokenizer = AutoTokenizer.from_pretrained(model_id) self.assertIsInstance(model.config.aggregation_labels, dict) self.assertIsInstance(model.config.no_aggregation_label_index, int) table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query="how many movies has george clooney played in?", ) self.assertEqual( outputs, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, ) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query=["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"], ) self.assertEqual( outputs, [ {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, ], ) outputs = table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, query=[ "What repository has the largest number of stars?", "Given that the numbers of stars defines if a repository is active, what repository is the most" " active?", "What is the number of repositories?", "What is the average number of stars?", "What is the total amount of stars?", ], ) self.assertEqual( outputs, [ {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, ], ) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table=None) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table="") with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table={}) with self.assertRaises(ValueError): table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], } ) with self.assertRaises(ValueError): table_querier( query="", table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) with self.assertRaises(ValueError): table_querier( query=None, table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) @unittest.skipIf(not is_torch_greater_or_equal_than_1_12, reason="Tapas is only available in torch v1.12+") @require_torch def test_small_model_pt(self): model_id = "lysandre/tiny-tapas-random-wtq" model = AutoModelForTableQuestionAnswering.from_pretrained(model_id) tokenizer = AutoTokenizer.from_pretrained(model_id) self.assertIsInstance(model.config.aggregation_labels, dict) self.assertIsInstance(model.config.no_aggregation_label_index, int) table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query="how many movies has george clooney played in?", ) self.assertEqual( outputs, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, ) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query=["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"], ) self.assertEqual( outputs, [ {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, ], ) outputs = table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, query=[ "What repository has the largest number of stars?", "Given that the numbers of stars defines if a repository is active, what repository is the most" " active?", "What is the number of repositories?", "What is the average number of stars?", "What is the total amount of stars?", ], ) self.assertEqual( outputs, [ {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, ], ) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table=None) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table="") with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table={}) with self.assertRaises(ValueError): table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], } ) with self.assertRaises(ValueError): table_querier( query="", table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) with self.assertRaises(ValueError): table_querier( query=None, table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) @unittest.skipIf(not is_torch_greater_or_equal_than_1_12, reason="Tapas is only available in torch v1.12+") @require_torch def test_slow_tokenizer_sqa_pt(self): model_id = "lysandre/tiny-tapas-random-sqa" model = AutoModelForTableQuestionAnswering.from_pretrained(model_id) tokenizer = AutoTokenizer.from_pretrained(model_id) table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer) inputs = { "table": { "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, "query": ["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"], } sequential_outputs = table_querier(**inputs, sequential=True) batch_outputs = table_querier(**inputs, sequential=False) self.assertEqual(len(sequential_outputs), 3) self.assertEqual(len(batch_outputs), 3) self.assertEqual(sequential_outputs[0], batch_outputs[0]) self.assertNotEqual(sequential_outputs[1], batch_outputs[1]) # self.assertNotEqual(sequential_outputs[2], batch_outputs[2]) table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query="how many movies has george clooney played in?", ) self.assertEqual( outputs, {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, ) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query=["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"], ) self.assertEqual( outputs, [ {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, ], ) outputs = table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, query=[ "What repository has the largest number of stars?", "Given that the numbers of stars defines if a repository is active, what repository is the most" " active?", "What is the number of repositories?", "What is the average number of stars?", "What is the total amount of stars?", ], ) self.assertEqual( outputs, [ {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, ], ) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table=None) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table="") with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table={}) with self.assertRaises(ValueError): table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], } ) with self.assertRaises(ValueError): table_querier( query="", table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) with self.assertRaises(ValueError): table_querier( query=None, table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) @require_tf @require_tensorflow_probability @require_pandas @require_torch def test_slow_tokenizer_sqa_tf(self): model_id = "lysandre/tiny-tapas-random-sqa" model = TFAutoModelForTableQuestionAnswering.from_pretrained(model_id, from_pt=True) tokenizer = AutoTokenizer.from_pretrained(model_id) table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer) inputs = { "table": { "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, "query": ["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"], } sequential_outputs = table_querier(**inputs, sequential=True) batch_outputs = table_querier(**inputs, sequential=False) self.assertEqual(len(sequential_outputs), 3) self.assertEqual(len(batch_outputs), 3) self.assertEqual(sequential_outputs[0], batch_outputs[0]) self.assertNotEqual(sequential_outputs[1], batch_outputs[1]) # self.assertNotEqual(sequential_outputs[2], batch_outputs[2]) table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query="how many movies has george clooney played in?", ) self.assertEqual( outputs, {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, ) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query=["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"], ) self.assertEqual( outputs, [ {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, ], ) outputs = table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, query=[ "What repository has the largest number of stars?", "Given that the numbers of stars defines if a repository is active, what repository is the most" " active?", "What is the number of repositories?", "What is the average number of stars?", "What is the total amount of stars?", ], ) self.assertEqual( outputs, [ {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, ], ) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table=None) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table="") with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table={}) with self.assertRaises(ValueError): table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], } ) with self.assertRaises(ValueError): table_querier( query="", table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) with self.assertRaises(ValueError): table_querier( query=None, table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) @unittest.skipIf(not is_torch_greater_or_equal_than_1_12, reason="Tapas is only available in torch v1.12+") @slow @require_torch def test_integration_wtq_pt(self): table_querier = pipeline("table-question-answering") data = { "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], } queries = [ "What repository has the largest number of stars?", "Given that the numbers of stars defines if a repository is active, what repository is the most active?", "What is the number of repositories?", "What is the average number of stars?", "What is the total amount of stars?", ] results = table_querier(data, queries) expected_results = [ {"answer": "Transformers", "coordinates": [(0, 0)], "cells": ["Transformers"], "aggregator": "NONE"}, {"answer": "Transformers", "coordinates": [(0, 0)], "cells": ["Transformers"], "aggregator": "NONE"}, { "answer": "COUNT > Transformers, Datasets, Tokenizers", "coordinates": [(0, 0), (1, 0), (2, 0)], "cells": ["Transformers", "Datasets", "Tokenizers"], "aggregator": "COUNT", }, { "answer": "AVERAGE > 36542, 4512, 3934", "coordinates": [(0, 1), (1, 1), (2, 1)], "cells": ["36542", "4512", "3934"], "aggregator": "AVERAGE", }, { "answer": "SUM > 36542, 4512, 3934", "coordinates": [(0, 1), (1, 1), (2, 1)], "cells": ["36542", "4512", "3934"], "aggregator": "SUM", }, ] self.assertListEqual(results, expected_results) @slow @require_tensorflow_probability @require_pandas def test_integration_wtq_tf(self): model_id = "google/tapas-base-finetuned-wtq" model = TFAutoModelForTableQuestionAnswering.from_pretrained(model_id) tokenizer = AutoTokenizer.from_pretrained(model_id) table_querier = pipeline("table-question-answering", model=model, tokenizer=tokenizer) data = { "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], } queries = [ "What repository has the largest number of stars?", "Given that the numbers of stars defines if a repository is active, what repository is the most active?", "What is the number of repositories?", "What is the average number of stars?", "What is the total amount of stars?", ] results = table_querier(data, queries) expected_results = [ {"answer": "Transformers", "coordinates": [(0, 0)], "cells": ["Transformers"], "aggregator": "NONE"}, {"answer": "Transformers", "coordinates": [(0, 0)], "cells": ["Transformers"], "aggregator": "NONE"}, { "answer": "COUNT > Transformers, Datasets, Tokenizers", "coordinates": [(0, 0), (1, 0), (2, 0)], "cells": ["Transformers", "Datasets", "Tokenizers"], "aggregator": "COUNT", }, { "answer": "AVERAGE > 36542, 4512, 3934", "coordinates": [(0, 1), (1, 1), (2, 1)], "cells": ["36542", "4512", "3934"], "aggregator": "AVERAGE", }, { "answer": "SUM > 36542, 4512, 3934", "coordinates": [(0, 1), (1, 1), (2, 1)], "cells": ["36542", "4512", "3934"], "aggregator": "SUM", }, ] self.assertListEqual(results, expected_results) @unittest.skipIf(not is_torch_greater_or_equal_than_1_12, reason="Tapas is only available in torch v1.12+") @slow @require_torch def test_integration_sqa_pt(self): table_querier = pipeline( "table-question-answering", model="google/tapas-base-finetuned-sqa", tokenizer="google/tapas-base-finetuned-sqa", ) data = { "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], "Age": ["56", "45", "59"], "Number of movies": ["87", "53", "69"], "Date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], } queries = ["How many movies has George Clooney played in?", "How old is he?", "What's his date of birth?"] results = table_querier(data, queries, sequential=True) expected_results = [ {"answer": "69", "coordinates": [(2, 2)], "cells": ["69"]}, {"answer": "59", "coordinates": [(2, 1)], "cells": ["59"]}, {"answer": "28 november 1967", "coordinates": [(2, 3)], "cells": ["28 november 1967"]}, ] self.assertListEqual(results, expected_results) @slow @require_tensorflow_probability @require_pandas def test_integration_sqa_tf(self): model_id = "google/tapas-base-finetuned-sqa" model = TFAutoModelForTableQuestionAnswering.from_pretrained(model_id) tokenizer = AutoTokenizer.from_pretrained(model_id) table_querier = pipeline( "table-question-answering", model=model, tokenizer=tokenizer, ) data = { "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], "Age": ["56", "45", "59"], "Number of movies": ["87", "53", "69"], "Date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], } queries = ["How many movies has George Clooney played in?", "How old is he?", "What's his date of birth?"] results = table_querier(data, queries, sequential=True) expected_results = [ {"answer": "69", "coordinates": [(2, 2)], "cells": ["69"]}, {"answer": "59", "coordinates": [(2, 1)], "cells": ["59"]}, {"answer": "28 november 1967", "coordinates": [(2, 3)], "cells": ["28 november 1967"]}, ] self.assertListEqual(results, expected_results) @slow @require_torch def test_large_model_pt_tapex(self): model_id = "microsoft/tapex-large-finetuned-wtq" table_querier = pipeline( "table-question-answering", model=model_id, ) data = { "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], "Age": ["56", "45", "59"], "Number of movies": ["87", "53", "69"], "Date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], } queries = [ "How many movies has George Clooney played in?", "How old is Mr Clooney ?", "What's the date of birth of Leonardo ?", ] results = table_querier(data, queries, sequential=True) expected_results = [ {"answer": " 69"}, {"answer": " 59"}, {"answer": " 10 june 1996"}, ] self.assertListEqual(results, expected_results)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_image_to_text.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import requests from transformers import MODEL_FOR_VISION_2_SEQ_MAPPING, TF_MODEL_FOR_VISION_2_SEQ_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_vision class ImageToTextPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_VISION_2_SEQ_MAPPING tf_model_mapping = TF_MODEL_FOR_VISION_2_SEQ_MAPPING def get_test_pipeline(self, model, tokenizer, processor): pipe = pipeline("image-to-text", model=model, tokenizer=tokenizer, image_processor=processor) examples = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "./tests/fixtures/tests_samples/COCO/000000039769.png", ] return pipe, examples def run_pipeline_test(self, pipe, examples): outputs = pipe(examples) self.assertEqual( outputs, [ [{"generated_text": ANY(str)}], [{"generated_text": ANY(str)}], ], ) @require_tf def test_small_model_tf(self): pipe = pipeline("image-to-text", model="hf-internal-testing/tiny-random-vit-gpt2", framework="tf") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = pipe(image) self.assertEqual( outputs, [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" }, ], ) outputs = pipe([image, image]) self.assertEqual( outputs, [ [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" } ], [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" } ], ], ) outputs = pipe(image, max_new_tokens=1) self.assertEqual( outputs, [{"generated_text": "growth"}], ) @require_torch def test_small_model_pt(self): pipe = pipeline("image-to-text", model="hf-internal-testing/tiny-random-vit-gpt2") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = pipe(image) self.assertEqual( outputs, [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" }, ], ) outputs = pipe([image, image]) self.assertEqual( outputs, [ [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" } ], [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" } ], ], ) @require_torch def test_small_model_pt_conditional(self): pipe = pipeline("image-to-text", model="hf-internal-testing/tiny-random-BlipForConditionalGeneration") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" prompt = "a photo of" outputs = pipe(image, prompt=prompt) self.assertTrue(outputs[0]["generated_text"].startswith(prompt)) @slow @require_torch def test_large_model_pt(self): pipe = pipeline("image-to-text", model="ydshieh/vit-gpt2-coco-en") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = pipe(image) self.assertEqual(outputs, [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}]) outputs = pipe([image, image]) self.assertEqual( outputs, [ [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}], [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}], ], ) @slow @require_torch def test_generation_pt_blip(self): pipe = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base") url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/pokemon.png" image = Image.open(requests.get(url, stream=True).raw) outputs = pipe(image) self.assertEqual(outputs, [{"generated_text": "a pink pokemon pokemon with a blue shirt and a blue shirt"}]) @slow @require_torch def test_generation_pt_git(self): pipe = pipeline("image-to-text", model="microsoft/git-base-coco") url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/pokemon.png" image = Image.open(requests.get(url, stream=True).raw) outputs = pipe(image) self.assertEqual(outputs, [{"generated_text": "a cartoon of a purple character."}]) @slow @require_torch def test_conditional_generation_pt_blip(self): pipe = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base") url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg" image = Image.open(requests.get(url, stream=True).raw) prompt = "a photography of" outputs = pipe(image, prompt=prompt) self.assertEqual(outputs, [{"generated_text": "a photography of a volcano"}]) with self.assertRaises(ValueError): outputs = pipe([image, image], prompt=[prompt, prompt]) @slow @require_torch def test_conditional_generation_pt_git(self): pipe = pipeline("image-to-text", model="microsoft/git-base-coco") url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg" image = Image.open(requests.get(url, stream=True).raw) prompt = "a photo of a" outputs = pipe(image, prompt=prompt) self.assertEqual(outputs, [{"generated_text": "a photo of a tent with a tent and a tent in the background."}]) with self.assertRaises(ValueError): outputs = pipe([image, image], prompt=[prompt, prompt]) @slow @require_torch def test_conditional_generation_pt_pix2struct(self): pipe = pipeline("image-to-text", model="google/pix2struct-ai2d-base") url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg" image = Image.open(requests.get(url, stream=True).raw) prompt = "What does the label 15 represent? (1) lava (2) core (3) tunnel (4) ash cloud" outputs = pipe(image, prompt=prompt) self.assertEqual(outputs, [{"generated_text": "ash cloud"}]) with self.assertRaises(ValueError): outputs = pipe([image, image], prompt=[prompt, prompt]) @slow @require_tf def test_large_model_tf(self): pipe = pipeline("image-to-text", model="ydshieh/vit-gpt2-coco-en", framework="tf") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = pipe(image) self.assertEqual(outputs, [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}]) outputs = pipe([image, image]) self.assertEqual( outputs, [ [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}], [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}], ], ) @slow @require_torch def test_conditional_generation_llava(self): pipe = pipeline("image-to-text", model="llava-hf/bakLlava-v1-hf") url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg" image = Image.open(requests.get(url, stream=True).raw) prompt = ( "<image>\nUSER: What does the label 15 represent? (1) lava (2) core (3) tunnel (4) ash cloud?\nASSISTANT:" ) outputs = pipe(image, prompt=prompt, generate_kwargs={"max_new_tokens": 200}) self.assertEqual( outputs, [ { "generated_text": "<image> \nUSER: What does the label 15 represent? (1) lava (2) core (3) tunnel (4) ash cloud?\nASSISTANT: Lava" } ], )
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_question_answering.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, LxmertConfig, QuestionAnsweringPipeline, ) from transformers.data.processors.squad import SquadExample from transformers.pipelines import QuestionAnsweringArgumentHandler, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torch_or_tf, slow, ) from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. _TO_SKIP = {"LayoutLMv2Config", "LayoutLMv3Config"} @is_pipeline_test class QAPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_QUESTION_ANSWERING_MAPPING tf_model_mapping = TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING if model_mapping is not None: model_mapping = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: tf_model_mapping = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def get_test_pipeline(self, model, tokenizer, processor): if isinstance(model.config, LxmertConfig): # This is an bimodal model, we need to find a more consistent way # to switch on those models. return None, None question_answerer = QuestionAnsweringPipeline(model, tokenizer) examples = [ {"question": "Where was HuggingFace founded ?", "context": "HuggingFace was founded in Paris."}, {"question": "In what field is HuggingFace ?", "context": "HuggingFace is an AI startup."}, ] return question_answerer, examples def run_pipeline_test(self, question_answerer, _): outputs = question_answerer( question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." ) self.assertEqual(outputs, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)}) outputs = question_answerer( question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris.", handle_impossible_answer=True, ) self.assertEqual(outputs, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)}) outputs = question_answerer( question=["In what field is HuggingFace working ?", "In what field is HuggingFace working ?"], context="HuggingFace was founded in Paris.", ) self.assertEqual( outputs, [ {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)}, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)}, ], ) outputs = question_answerer( question=["What field is HuggingFace working ?", "In what field is HuggingFace ?"], context=[ "HuggingFace is a startup based in New-York", "HuggingFace is a startup founded in Paris", ], ) self.assertEqual( outputs, [ {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)}, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)}, ], ) with self.assertRaises(ValueError): question_answerer(question="", context="HuggingFace was founded in Paris.") with self.assertRaises(ValueError): question_answerer(question=None, context="HuggingFace was founded in Paris.") with self.assertRaises(ValueError): question_answerer(question="In what field is HuggingFace working ?", context="") with self.assertRaises(ValueError): question_answerer(question="In what field is HuggingFace working ?", context=None) outputs = question_answerer( question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris.", top_k=20 ) self.assertEqual( outputs, [{"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)} for i in range(20)] ) # Very long context require multiple features outputs = question_answerer( question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." * 20 ) self.assertEqual(outputs, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)}) # Using batch is OK if question_answerer.tokenizer.pad_token_id is None: question_answerer.tokenizer.pad_token_id = question_answerer.model.config.eos_token_id new_outputs = question_answerer( question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." * 20, batch_size=2 ) self.assertEqual(new_outputs, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)}) self.assertEqual(nested_simplify(outputs), nested_simplify(new_outputs)) @require_torch def test_small_model_pt(self): question_answerer = pipeline( "question-answering", model="sshleifer/tiny-distilbert-base-cased-distilled-squad" ) outputs = question_answerer( question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." ) self.assertEqual(nested_simplify(outputs), {"score": 0.01, "start": 0, "end": 11, "answer": "HuggingFace"}) @require_torch def test_small_model_pt_iterator(self): # https://github.com/huggingface/transformers/issues/18510 pipe = pipeline(model="sshleifer/tiny-distilbert-base-cased-distilled-squad", batch_size=16, framework="pt") def data(): for i in range(10): yield {"question": "Where was HuggingFace founded ?", "context": "HuggingFace was founded in Paris."} for outputs in pipe(data()): self.assertEqual(nested_simplify(outputs), {"score": 0.01, "start": 0, "end": 11, "answer": "HuggingFace"}) @require_torch def test_small_model_pt_softmax_trick(self): question_answerer = pipeline( "question-answering", model="sshleifer/tiny-distilbert-base-cased-distilled-squad" ) real_postprocess = question_answerer.postprocess # Tweak start and stop to make sure we encounter the softmax logits # bug. def ensure_large_logits_postprocess( model_outputs, top_k=1, handle_impossible_answer=False, max_answer_len=15, ): for output in model_outputs: output["start"] = output["start"] * 1e6 output["end"] = output["end"] * 1e6 return real_postprocess( model_outputs, top_k=top_k, handle_impossible_answer=handle_impossible_answer, max_answer_len=max_answer_len, ) question_answerer.postprocess = ensure_large_logits_postprocess outputs = question_answerer( question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." ) self.assertEqual(nested_simplify(outputs), {"score": 0.028, "start": 0, "end": 11, "answer": "HuggingFace"}) @slow @require_torch def test_small_model_japanese(self): question_answerer = pipeline( "question-answering", model="KoichiYasuoka/deberta-base-japanese-aozora-ud-head", ) output = question_answerer(question="国語", context="全学年にわたって小学校の国語の教科書に挿し絵が用いられている") # fmt: skip # Wrong answer, the whole text is identified as one "word" since the tokenizer does not include # a pretokenizer self.assertEqual(nested_simplify(output),{"score": 1.0, "start": 0, "end": 30, "answer": "全学年にわたって小学校の国語の教科書に挿し絵が用いられている"}) # fmt: skip # Disable word alignment output = question_answerer(question="国語", context="全学年にわたって小学校の国語の教科書に挿し絵が用いられている", align_to_words=False) # fmt: skip self.assertEqual( nested_simplify(output), {"score": 1.0, "start": 15, "end": 18, "answer": "教科書"}, ) @slow @require_torch def test_small_model_long_context_cls_slow(self): question_answerer = pipeline( "question-answering", model="deepset/roberta-base-squad2", handle_impossible_answer=True, max_seq_length=512, ) outputs = question_answerer( question="What country is Paris the capital of?", context="""London is the capital and largest city of England and the United Kingdom. It stands on the River Thames in south-east England at the head of a 50-mile (80 km) estuary down to the North Sea, and has been a major settlement for two millennia. The City of London, its ancient core and financial centre, was founded by the Romans as Londinium and retains boundaries close to its medieval ones. Since the 19th century, \"London\" has also referred to the metropolis around this core, historically split between the counties of Middlesex, Essex, Surrey, Kent, and Hertfordshire, which largely comprises Greater London, governed by the Greater London Authority. The City of Westminster, to the west of the City of London, has for centuries held the national government and parliament. As one of the world's global cities, London exerts strong influence on its arts, commerce, education, entertainment, fashion, finance, health care, media, tourism, and communications, and has sometimes been called the capital of the world. Its GDP (€801.66 billion in 2017) makes it the biggest urban economy in Europe, and it is one of the major financial centres in the world. In 2019 it had the second-highest number of ultra high-net-worth individuals in Europe after Paris and the second-highest number of billionaires in Europe after Moscow. As of 2021, London has the most millionaires of any city. With Europe's largest concentration of higher education institutions, it includes Imperial College London in natural and applied sciences, the London School of Economics in social sciences, and the comprehensive University College London. The city is home to the most 5-star hotels of any city in the world. In 2012, London became the first city to host three Summer Olympic Games. London is the capital and largest city of England and the United Kingdom. It stands on the River Thames in south-east England at the head of a 50-mile (80 km) estuary down to the North Sea, and has been a major settlement for two millennia. The City of London, its ancient core and financial centre, was founded by the Romans as Londinium and retains boundaries close to its medieval ones. Since the 19th century, \"London\" has also referred to the metropolis around this core, historically split between the counties of Middlesex, Essex, Surrey, Kent, and Hertfordshire, which largely comprises Greater London, governed by the Greater London Authority. The City of Westminster, to the west of the City of London, has for centuries held the national government and parliament. As one of the world's global cities, London exerts strong influence on its arts, commerce, education, entertainment, fashion, finance, health care, media, tourism, and communications, and has sometimes been called the capital of the world. Its GDP (€801.66 billion in 2017) makes it the biggest urban economy in Europe, and it is one of the major financial centres in the world. In 2019 it had the second-highest number of ultra high-net-worth individuals in Europe after Paris and the second-highest number of billionaires in Europe after Moscow. As of 2021, London has the most millionaires of any city. With Europe's largest concentration of higher education institutions, it includes Imperial College London in natural and applied sciences, the London School of Economics in social sciences, and the comprehensive University College London. The city is home to the most 5-star hotels of any city in the world. In 2012, London became the first city to host three Summer Olympic Games.""", ) self.assertEqual(nested_simplify(outputs), {"score": 0.988, "start": 0, "end": 0, "answer": ""}) @require_tf def test_small_model_tf(self): question_answerer = pipeline( "question-answering", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="tf" ) outputs = question_answerer( question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." ) self.assertEqual(nested_simplify(outputs), {"score": 0.011, "start": 0, "end": 11, "answer": "HuggingFace"}) @slow @require_torch def test_large_model_pt(self): question_answerer = pipeline( "question-answering", ) outputs = question_answerer( question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." ) self.assertEqual(nested_simplify(outputs), {"score": 0.979, "start": 27, "end": 32, "answer": "Paris"}) @slow @require_torch def test_large_model_issue(self): qa_pipeline = pipeline( "question-answering", model="mrm8488/bert-multi-cased-finetuned-xquadv1", ) outputs = qa_pipeline( { "context": ( "Yes Bank founder Rana Kapoor has approached the Bombay High Court, challenging a special court's" " order from August this year that had remanded him in police custody for a week in a multi-crore" " loan fraud case. Kapoor, who is currently lodged in Taloja Jail, is an accused in the loan fraud" " case and some related matters being probed by the CBI and Enforcement Directorate. A single" " bench presided over by Justice S K Shinde on Tuesday posted the plea for further hearing on" " October 14. In his plea filed through advocate Vijay Agarwal, Kapoor claimed that the special" " court's order permitting the CBI's request for police custody on August 14 was illegal and in" " breach of the due process of law. Therefore, his police custody and subsequent judicial custody" " in the case were all illegal. Kapoor has urged the High Court to quash and set aside the special" " court's order dated August 14. As per his plea, in August this year, the CBI had moved two" " applications before the special court, one seeking permission to arrest Kapoor, who was already" " in judicial custody at the time in another case, and the other, seeking his police custody." " While the special court refused to grant permission to the CBI to arrest Kapoor, it granted the" " central agency's plea for his custody. Kapoor, however, said in his plea that before filing an" " application for his arrest, the CBI had not followed the process of issuing him a notice under" " Section 41 of the CrPC for appearance before it. He further said that the CBI had not taken" " prior sanction as mandated under section 17 A of the Prevention of Corruption Act for" " prosecuting him. The special court, however, had said in its order at the time that as Kapoor" " was already in judicial custody in another case and was not a free man the procedure mandated" " under Section 41 of the CrPC need not have been adhered to as far as issuing a prior notice of" " appearance was concerned. ADVERTISING It had also said that case records showed that the" " investigating officer had taken an approval from a managing director of Yes Bank before" " beginning the proceedings against Kapoor and such a permission was a valid sanction. However," " Kapoor in his plea said that the above order was bad in law and sought that it be quashed and" " set aside. The law mandated that if initial action was not in consonance with legal procedures," " then all subsequent actions must be held as illegal, he said, urging the High Court to declare" " the CBI remand and custody and all subsequent proceedings including the further custody as" " illegal and void ab-initio. In a separate plea before the High Court, Kapoor's daughter Rakhee" " Kapoor-Tandon has sought exemption from in-person appearance before a special PMLA court. Rakhee" " has stated that she is a resident of the United Kingdom and is unable to travel to India owing" " to restrictions imposed due to the COVID-19 pandemic. According to the CBI, in the present case," " Kapoor had obtained a gratification or pecuniary advantage of ₹ 307 crore, and thereby caused" " Yes Bank a loss of ₹ 1,800 crore by extending credit facilities to Avantha Group, when it was" " not eligible for the same" ), "question": "Is this person invovled in fraud?", } ) self.assertEqual( nested_simplify(outputs), {"answer": "an accused in the loan fraud case", "end": 294, "score": 0.001, "start": 261}, ) @slow @require_torch def test_large_model_course(self): question_answerer = pipeline("question-answering") long_context = """ 🤗 Transformers: State of the Art NLP 🤗 Transformers provides thousands of pretrained models to perform tasks on texts such as classification, information extraction, question answering, summarization, translation, text generation and more in over 100 languages. Its aim is to make cutting-edge NLP easier to use for everyone. 🤗 Transformers provides APIs to quickly download and use those pretrained models on a given text, fine-tune them on your own datasets and then share them with the community on our model hub. At the same time, each python module defining an architecture is fully standalone and can be modified to enable quick research experiments. Why should I use transformers? 1. Easy-to-use state-of-the-art models: - High performance on NLU and NLG tasks. - Low barrier to entry for educators and practitioners. - Few user-facing abstractions with just three classes to learn. - A unified API for using all our pretrained models. - Lower compute costs, smaller carbon footprint: 2. Researchers can share trained models instead of always retraining. - Practitioners can reduce compute time and production costs. - Dozens of architectures with over 10,000 pretrained models, some in more than 100 languages. 3. Choose the right framework for every part of a model's lifetime: - Train state-of-the-art models in 3 lines of code. - Move a single model between TF2.0/PyTorch frameworks at will. - Seamlessly pick the right framework for training, evaluation and production. 4. Easily customize a model or an example to your needs: - We provide examples for each architecture to reproduce the results published by its original authors. - Model internals are exposed as consistently as possible. - Model files can be used independently of the library for quick experiments. 🤗 Transformers is backed by the three most popular deep learning libraries — Jax, PyTorch and TensorFlow — with a seamless integration between them. It's straightforward to train your models with one before loading them for inference with the other. """ question = "Which deep learning libraries back 🤗 Transformers?" outputs = question_answerer(question=question, context=long_context) self.assertEqual( nested_simplify(outputs), {"answer": "Jax, PyTorch and TensorFlow", "end": 1919, "score": 0.971, "start": 1892}, ) @slow @require_tf def test_large_model_tf(self): question_answerer = pipeline("question-answering", framework="tf") outputs = question_answerer( question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." ) self.assertEqual(nested_simplify(outputs), {"score": 0.979, "start": 27, "end": 32, "answer": "Paris"}) @require_torch_or_tf class QuestionAnsweringArgumentHandlerTests(unittest.TestCase): def test_argument_handler(self): qa = QuestionAnsweringArgumentHandler() Q = "Where was HuggingFace founded ?" C = "HuggingFace was founded in Paris" normalized = qa(Q, C) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 1) self.assertEqual({type(el) for el in normalized}, {SquadExample}) normalized = qa(question=Q, context=C) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 1) self.assertEqual({type(el) for el in normalized}, {SquadExample}) normalized = qa(question=Q, context=C) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 1) self.assertEqual({type(el) for el in normalized}, {SquadExample}) normalized = qa(question=[Q, Q], context=C) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 2) self.assertEqual({type(el) for el in normalized}, {SquadExample}) normalized = qa({"question": Q, "context": C}) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 1) self.assertEqual({type(el) for el in normalized}, {SquadExample}) normalized = qa([{"question": Q, "context": C}]) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 1) self.assertEqual({type(el) for el in normalized}, {SquadExample}) normalized = qa([{"question": Q, "context": C}, {"question": Q, "context": C}]) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 2) self.assertEqual({type(el) for el in normalized}, {SquadExample}) normalized = qa(X={"question": Q, "context": C}) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 1) self.assertEqual({type(el) for el in normalized}, {SquadExample}) normalized = qa(X=[{"question": Q, "context": C}]) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 1) self.assertEqual({type(el) for el in normalized}, {SquadExample}) normalized = qa(data={"question": Q, "context": C}) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 1) self.assertEqual({type(el) for el in normalized}, {SquadExample}) def test_argument_handler_error_handling(self): qa = QuestionAnsweringArgumentHandler() Q = "Where was HuggingFace founded ?" C = "HuggingFace was founded in Paris" with self.assertRaises(KeyError): qa({"context": C}) with self.assertRaises(KeyError): qa({"question": Q}) with self.assertRaises(KeyError): qa([{"context": C}]) with self.assertRaises(ValueError): qa(None, C) with self.assertRaises(ValueError): qa("", C) with self.assertRaises(ValueError): qa(Q, None) with self.assertRaises(ValueError): qa(Q, "") with self.assertRaises(ValueError): qa(question=None, context=C) with self.assertRaises(ValueError): qa(question="", context=C) with self.assertRaises(ValueError): qa(question=Q, context=None) with self.assertRaises(ValueError): qa(question=Q, context="") with self.assertRaises(ValueError): qa({"question": None, "context": C}) with self.assertRaises(ValueError): qa({"question": "", "context": C}) with self.assertRaises(ValueError): qa({"question": Q, "context": None}) with self.assertRaises(ValueError): qa({"question": Q, "context": ""}) with self.assertRaises(ValueError): qa([{"question": Q, "context": C}, {"question": None, "context": C}]) with self.assertRaises(ValueError): qa([{"question": Q, "context": C}, {"question": "", "context": C}]) with self.assertRaises(ValueError): qa([{"question": Q, "context": C}, {"question": Q, "context": None}]) with self.assertRaises(ValueError): qa([{"question": Q, "context": C}, {"question": Q, "context": ""}]) with self.assertRaises(ValueError): qa(question={"This": "Is weird"}, context="This is a context") with self.assertRaises(ValueError): qa(question=[Q, Q], context=[C, C, C]) with self.assertRaises(ValueError): qa(question=[Q, Q, Q], context=[C, C]) def test_argument_handler_old_format(self): qa = QuestionAnsweringArgumentHandler() Q = "Where was HuggingFace founded ?" C = "HuggingFace was founded in Paris" # Backward compatibility for this normalized = qa(question=[Q, Q], context=[C, C]) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 2) self.assertEqual({type(el) for el in normalized}, {SquadExample}) def test_argument_handler_error_handling_odd(self): qa = QuestionAnsweringArgumentHandler() with self.assertRaises(ValueError): qa(None) with self.assertRaises(ValueError): qa(Y=None) with self.assertRaises(ValueError): qa(1)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_common.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import logging import os import sys import tempfile import unittest from pathlib import Path import datasets import numpy as np from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DistilBertForSequenceClassification, TextClassificationPipeline, TFAutoModelForSequenceClassification, pipeline, ) from transformers.pipelines import PIPELINE_REGISTRY, get_task from transformers.pipelines.base import Pipeline, _pad from transformers.testing_utils import ( TOKEN, USER, CaptureLogger, RequestCounter, backend_empty_cache, is_pipeline_test, is_staging_test, nested_simplify, require_tensorflow_probability, require_tf, require_torch, require_torch_accelerator, require_torch_or_tf, slow, torch_device, ) from transformers.utils import direct_transformers_import, is_tf_available, is_torch_available from transformers.utils import logging as transformers_logging sys.path.append(str(Path(__file__).parent.parent.parent / "utils")) from test_module.custom_pipeline import PairClassificationPipeline # noqa E402 logger = logging.getLogger(__name__) PATH_TO_TRANSFORMERS = os.path.join(Path(__file__).parent.parent.parent, "src/transformers") # Dynamically import the Transformers module to grab the attribute classes of the processor form their names. transformers_module = direct_transformers_import(PATH_TO_TRANSFORMERS) class ANY: def __init__(self, *_types): self._types = _types def __eq__(self, other): return isinstance(other, self._types) def __repr__(self): return f"ANY({', '.join(_type.__name__ for _type in self._types)})" @is_pipeline_test class CommonPipelineTest(unittest.TestCase): @require_torch def test_pipeline_iteration(self): from torch.utils.data import Dataset class MyDataset(Dataset): data = [ "This is a test", "This restaurant is great", "This restaurant is awful", ] def __len__(self): return 3 def __getitem__(self, i): return self.data[i] text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) dataset = MyDataset() for output in text_classifier(dataset): self.assertEqual(output, {"label": ANY(str), "score": ANY(float)}) @require_torch def test_check_task_auto_inference(self): pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert") self.assertIsInstance(pipe, TextClassificationPipeline) @require_torch def test_pipeline_batch_size_global(self): pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert") self.assertEqual(pipe._batch_size, None) self.assertEqual(pipe._num_workers, None) pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert", batch_size=2, num_workers=1) self.assertEqual(pipe._batch_size, 2) self.assertEqual(pipe._num_workers, 1) @require_torch def test_pipeline_pathlike(self): pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert") with tempfile.TemporaryDirectory() as d: pipe.save_pretrained(d) path = Path(d) newpipe = pipeline(task="text-classification", model=path) self.assertIsInstance(newpipe, TextClassificationPipeline) @require_torch def test_pipeline_override(self): class MyPipeline(TextClassificationPipeline): pass text_classifier = pipeline(model="hf-internal-testing/tiny-random-distilbert", pipeline_class=MyPipeline) self.assertIsInstance(text_classifier, MyPipeline) def test_check_task(self): task = get_task("gpt2") self.assertEqual(task, "text-generation") with self.assertRaises(RuntimeError): # Wrong framework get_task("espnet/siddhana_slurp_entity_asr_train_asr_conformer_raw_en_word_valid.acc.ave_10best") @require_torch def test_iterator_data(self): def data(n: int): for _ in range(n): yield "This is a test" pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert") results = [] for out in pipe(data(10)): self.assertEqual(nested_simplify(out), {"label": "LABEL_0", "score": 0.504}) results.append(out) self.assertEqual(len(results), 10) # When using multiple workers on streamable data it should still work # This will force using `num_workers=1` with a warning for now. results = [] for out in pipe(data(10), num_workers=2): self.assertEqual(nested_simplify(out), {"label": "LABEL_0", "score": 0.504}) results.append(out) self.assertEqual(len(results), 10) @require_tf def test_iterator_data_tf(self): def data(n: int): for _ in range(n): yield "This is a test" pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert", framework="tf") out = pipe("This is a test") results = [] for out in pipe(data(10)): self.assertEqual(nested_simplify(out), {"label": "LABEL_0", "score": 0.504}) results.append(out) self.assertEqual(len(results), 10) @require_torch def test_unbatch_attentions_hidden_states(self): model = DistilBertForSequenceClassification.from_pretrained( "hf-internal-testing/tiny-random-distilbert", output_hidden_states=True, output_attentions=True ) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-distilbert") text_classifier = TextClassificationPipeline(model=model, tokenizer=tokenizer) # Used to throw an error because `hidden_states` are a tuple of tensors # instead of the expected tensor. outputs = text_classifier(["This is great !"] * 20, batch_size=32) self.assertEqual(len(outputs), 20) @is_pipeline_test class PipelineScikitCompatTest(unittest.TestCase): @require_torch def test_pipeline_predict_pt(self): data = ["This is a test"] text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) expected_output = [{"label": ANY(str), "score": ANY(float)}] actual_output = text_classifier.predict(data) self.assertEqual(expected_output, actual_output) @require_tf def test_pipeline_predict_tf(self): data = ["This is a test"] text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="tf" ) expected_output = [{"label": ANY(str), "score": ANY(float)}] actual_output = text_classifier.predict(data) self.assertEqual(expected_output, actual_output) @require_torch def test_pipeline_transform_pt(self): data = ["This is a test"] text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) expected_output = [{"label": ANY(str), "score": ANY(float)}] actual_output = text_classifier.transform(data) self.assertEqual(expected_output, actual_output) @require_tf def test_pipeline_transform_tf(self): data = ["This is a test"] text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="tf" ) expected_output = [{"label": ANY(str), "score": ANY(float)}] actual_output = text_classifier.transform(data) self.assertEqual(expected_output, actual_output) @is_pipeline_test class PipelinePadTest(unittest.TestCase): @require_torch def test_pipeline_padding(self): import torch items = [ { "label": "label1", "input_ids": torch.LongTensor([[1, 23, 24, 2]]), "attention_mask": torch.LongTensor([[0, 1, 1, 0]]), }, { "label": "label2", "input_ids": torch.LongTensor([[1, 23, 24, 43, 44, 2]]), "attention_mask": torch.LongTensor([[0, 1, 1, 1, 1, 0]]), }, ] self.assertEqual(_pad(items, "label", 0, "right"), ["label1", "label2"]) self.assertTrue( torch.allclose( _pad(items, "input_ids", 10, "right"), torch.LongTensor([[1, 23, 24, 2, 10, 10], [1, 23, 24, 43, 44, 2]]), ) ) self.assertTrue( torch.allclose( _pad(items, "input_ids", 10, "left"), torch.LongTensor([[10, 10, 1, 23, 24, 2], [1, 23, 24, 43, 44, 2]]), ) ) self.assertTrue( torch.allclose( _pad(items, "attention_mask", 0, "right"), torch.LongTensor([[0, 1, 1, 0, 0, 0], [0, 1, 1, 1, 1, 0]]) ) ) @require_torch def test_pipeline_image_padding(self): import torch items = [ { "label": "label1", "pixel_values": torch.zeros((1, 3, 10, 10)), }, { "label": "label2", "pixel_values": torch.zeros((1, 3, 10, 10)), }, ] self.assertEqual(_pad(items, "label", 0, "right"), ["label1", "label2"]) self.assertTrue( torch.allclose( _pad(items, "pixel_values", 10, "right"), torch.zeros((2, 3, 10, 10)), ) ) @require_torch def test_pipeline_offset_mapping(self): import torch items = [ { "offset_mappings": torch.zeros([1, 11, 2], dtype=torch.long), }, { "offset_mappings": torch.zeros([1, 4, 2], dtype=torch.long), }, ] self.assertTrue( torch.allclose( _pad(items, "offset_mappings", 0, "right"), torch.zeros((2, 11, 2), dtype=torch.long), ), ) @is_pipeline_test class PipelineUtilsTest(unittest.TestCase): @require_torch def test_pipeline_dataset(self): from transformers.pipelines.pt_utils import PipelineDataset dummy_dataset = [0, 1, 2, 3] def add(number, extra=0): return number + extra dataset = PipelineDataset(dummy_dataset, add, {"extra": 2}) self.assertEqual(len(dataset), 4) outputs = [dataset[i] for i in range(4)] self.assertEqual(outputs, [2, 3, 4, 5]) @require_torch def test_pipeline_iterator(self): from transformers.pipelines.pt_utils import PipelineIterator dummy_dataset = [0, 1, 2, 3] def add(number, extra=0): return number + extra dataset = PipelineIterator(dummy_dataset, add, {"extra": 2}) self.assertEqual(len(dataset), 4) outputs = list(dataset) self.assertEqual(outputs, [2, 3, 4, 5]) @require_torch def test_pipeline_iterator_no_len(self): from transformers.pipelines.pt_utils import PipelineIterator def dummy_dataset(): for i in range(4): yield i def add(number, extra=0): return number + extra dataset = PipelineIterator(dummy_dataset(), add, {"extra": 2}) with self.assertRaises(TypeError): len(dataset) outputs = list(dataset) self.assertEqual(outputs, [2, 3, 4, 5]) @require_torch def test_pipeline_batch_unbatch_iterator(self): from transformers.pipelines.pt_utils import PipelineIterator dummy_dataset = [{"id": [0, 1, 2]}, {"id": [3]}] def add(number, extra=0): return {"id": [i + extra for i in number["id"]]} dataset = PipelineIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3) outputs = list(dataset) self.assertEqual(outputs, [{"id": 2}, {"id": 3}, {"id": 4}, {"id": 5}]) @require_torch def test_pipeline_batch_unbatch_iterator_tensors(self): import torch from transformers.pipelines.pt_utils import PipelineIterator dummy_dataset = [{"id": torch.LongTensor([[10, 20], [0, 1], [0, 2]])}, {"id": torch.LongTensor([[3]])}] def add(number, extra=0): return {"id": number["id"] + extra} dataset = PipelineIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3) outputs = list(dataset) self.assertEqual( nested_simplify(outputs), [{"id": [[12, 22]]}, {"id": [[2, 3]]}, {"id": [[2, 4]]}, {"id": [[5]]}] ) @require_torch def test_pipeline_chunk_iterator(self): from transformers.pipelines.pt_utils import PipelineChunkIterator def preprocess_chunk(n: int): for i in range(n): yield i dataset = [2, 3] dataset = PipelineChunkIterator(dataset, preprocess_chunk, {}, loader_batch_size=3) outputs = list(dataset) self.assertEqual(outputs, [0, 1, 0, 1, 2]) @require_torch def test_pipeline_pack_iterator(self): from transformers.pipelines.pt_utils import PipelinePackIterator def pack(item): return {"id": item["id"] + 1, "is_last": item["is_last"]} dataset = [ {"id": 0, "is_last": False}, {"id": 1, "is_last": True}, {"id": 0, "is_last": False}, {"id": 1, "is_last": False}, {"id": 2, "is_last": True}, ] dataset = PipelinePackIterator(dataset, pack, {}) outputs = list(dataset) self.assertEqual( outputs, [ [ {"id": 1}, {"id": 2}, ], [ {"id": 1}, {"id": 2}, {"id": 3}, ], ], ) @require_torch def test_pipeline_pack_unbatch_iterator(self): from transformers.pipelines.pt_utils import PipelinePackIterator dummy_dataset = [{"id": [0, 1, 2], "is_last": [False, True, False]}, {"id": [3], "is_last": [True]}] def add(number, extra=0): return {"id": [i + extra for i in number["id"]], "is_last": number["is_last"]} dataset = PipelinePackIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3) outputs = list(dataset) self.assertEqual(outputs, [[{"id": 2}, {"id": 3}], [{"id": 4}, {"id": 5}]]) # is_false Across batch dummy_dataset = [{"id": [0, 1, 2], "is_last": [False, False, False]}, {"id": [3], "is_last": [True]}] def add(number, extra=0): return {"id": [i + extra for i in number["id"]], "is_last": number["is_last"]} dataset = PipelinePackIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3) outputs = list(dataset) self.assertEqual(outputs, [[{"id": 2}, {"id": 3}, {"id": 4}, {"id": 5}]]) def test_pipeline_negative_device(self): # To avoid regressing, pipeline used to accept device=-1 classifier = pipeline("text-generation", "hf-internal-testing/tiny-random-bert", device=-1) expected_output = [{"generated_text": ANY(str)}] actual_output = classifier("Test input.") self.assertEqual(expected_output, actual_output) @slow @require_torch def test_load_default_pipelines_pt(self): import torch from transformers.pipelines import SUPPORTED_TASKS set_seed_fn = lambda: torch.manual_seed(0) # noqa: E731 for task in SUPPORTED_TASKS.keys(): if task == "table-question-answering": # test table in seperate test due to more dependencies continue self.check_default_pipeline(task, "pt", set_seed_fn, self.check_models_equal_pt) # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() backend_empty_cache(torch_device) @slow @require_tf def test_load_default_pipelines_tf(self): import tensorflow as tf from transformers.pipelines import SUPPORTED_TASKS set_seed_fn = lambda: tf.random.set_seed(0) # noqa: E731 for task in SUPPORTED_TASKS.keys(): if task == "table-question-answering": # test table in seperate test due to more dependencies continue self.check_default_pipeline(task, "tf", set_seed_fn, self.check_models_equal_tf) # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() @slow @require_torch def test_load_default_pipelines_pt_table_qa(self): import torch set_seed_fn = lambda: torch.manual_seed(0) # noqa: E731 self.check_default_pipeline("table-question-answering", "pt", set_seed_fn, self.check_models_equal_pt) # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() backend_empty_cache(torch_device) @slow @require_torch @require_torch_accelerator def test_pipeline_accelerator(self): pipe = pipeline("text-generation", device=torch_device) _ = pipe("Hello") @slow @require_torch @require_torch_accelerator def test_pipeline_accelerator_indexed(self): pipe = pipeline("text-generation", device=torch_device) _ = pipe("Hello") @slow @require_tf @require_tensorflow_probability def test_load_default_pipelines_tf_table_qa(self): import tensorflow as tf set_seed_fn = lambda: tf.random.set_seed(0) # noqa: E731 self.check_default_pipeline("table-question-answering", "tf", set_seed_fn, self.check_models_equal_tf) # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() def check_default_pipeline(self, task, framework, set_seed_fn, check_models_equal_fn): from transformers.pipelines import SUPPORTED_TASKS, pipeline task_dict = SUPPORTED_TASKS[task] # test to compare pipeline to manually loading the respective model model = None relevant_auto_classes = task_dict[framework] if len(relevant_auto_classes) == 0: # task has no default logger.debug(f"{task} in {framework} has no default") return # by default use first class auto_model_cls = relevant_auto_classes[0] # retrieve correct model ids if task == "translation": # special case for translation pipeline which has multiple languages model_ids = [] revisions = [] tasks = [] for translation_pair in task_dict["default"].keys(): model_id, revision = task_dict["default"][translation_pair]["model"][framework] model_ids.append(model_id) revisions.append(revision) tasks.append(task + f"_{'_to_'.join(translation_pair)}") else: # normal case - non-translation pipeline model_id, revision = task_dict["default"]["model"][framework] model_ids = [model_id] revisions = [revision] tasks = [task] # check for equality for model_id, revision, task in zip(model_ids, revisions, tasks): # load default model try: set_seed_fn() model = auto_model_cls.from_pretrained(model_id, revision=revision) except ValueError: # first auto class is possible not compatible with model, go to next model class auto_model_cls = relevant_auto_classes[1] set_seed_fn() model = auto_model_cls.from_pretrained(model_id, revision=revision) # load default pipeline set_seed_fn() default_pipeline = pipeline(task, framework=framework) # compare pipeline model with default model models_are_equal = check_models_equal_fn(default_pipeline.model, model) self.assertTrue(models_are_equal, f"{task} model doesn't match pipeline.") logger.debug(f"{task} in {framework} succeeded with {model_id}.") def check_models_equal_pt(self, model1, model2): models_are_equal = True for model1_p, model2_p in zip(model1.parameters(), model2.parameters()): if model1_p.data.ne(model2_p.data).sum() > 0: models_are_equal = False return models_are_equal def check_models_equal_tf(self, model1, model2): models_are_equal = True for model1_p, model2_p in zip(model1.weights, model2.weights): if np.abs(model1_p.numpy() - model2_p.numpy()).sum() > 1e-5: models_are_equal = False return models_are_equal class CustomPipeline(Pipeline): def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} if "maybe_arg" in kwargs: preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"] return preprocess_kwargs, {}, {} def preprocess(self, text, maybe_arg=2): input_ids = self.tokenizer(text, return_tensors="pt") return input_ids def _forward(self, model_inputs): outputs = self.model(**model_inputs) return outputs def postprocess(self, model_outputs): return model_outputs["logits"].softmax(-1).numpy() @is_pipeline_test class CustomPipelineTest(unittest.TestCase): def test_warning_logs(self): transformers_logging.set_verbosity_debug() logger_ = transformers_logging.get_logger("transformers.pipelines.base") alias = "text-classification" # Get the original task, so we can restore it at the end. # (otherwise the subsequential tests in `TextClassificationPipelineTests` will fail) _, original_task, _ = PIPELINE_REGISTRY.check_task(alias) try: with CaptureLogger(logger_) as cm: PIPELINE_REGISTRY.register_pipeline(alias, PairClassificationPipeline) self.assertIn(f"{alias} is already registered", cm.out) finally: # restore PIPELINE_REGISTRY.supported_tasks[alias] = original_task def test_register_pipeline(self): PIPELINE_REGISTRY.register_pipeline( "custom-text-classification", pipeline_class=PairClassificationPipeline, pt_model=AutoModelForSequenceClassification if is_torch_available() else None, tf_model=TFAutoModelForSequenceClassification if is_tf_available() else None, default={"pt": "hf-internal-testing/tiny-random-distilbert"}, type="text", ) assert "custom-text-classification" in PIPELINE_REGISTRY.get_supported_tasks() _, task_def, _ = PIPELINE_REGISTRY.check_task("custom-text-classification") self.assertEqual(task_def["pt"], (AutoModelForSequenceClassification,) if is_torch_available() else ()) self.assertEqual(task_def["tf"], (TFAutoModelForSequenceClassification,) if is_tf_available() else ()) self.assertEqual(task_def["type"], "text") self.assertEqual(task_def["impl"], PairClassificationPipeline) self.assertEqual(task_def["default"], {"model": {"pt": "hf-internal-testing/tiny-random-distilbert"}}) # Clean registry for next tests. del PIPELINE_REGISTRY.supported_tasks["custom-text-classification"] @require_torch_or_tf def test_dynamic_pipeline(self): PIPELINE_REGISTRY.register_pipeline( "pair-classification", pipeline_class=PairClassificationPipeline, pt_model=AutoModelForSequenceClassification if is_torch_available() else None, tf_model=TFAutoModelForSequenceClassification if is_tf_available() else None, ) classifier = pipeline("pair-classification", model="hf-internal-testing/tiny-random-bert") # Clean registry as we won't need the pipeline to be in it for the rest to work. del PIPELINE_REGISTRY.supported_tasks["pair-classification"] with tempfile.TemporaryDirectory() as tmp_dir: classifier.save_pretrained(tmp_dir) # checks self.assertDictEqual( classifier.model.config.custom_pipelines, { "pair-classification": { "impl": "custom_pipeline.PairClassificationPipeline", "pt": ("AutoModelForSequenceClassification",) if is_torch_available() else (), "tf": ("TFAutoModelForSequenceClassification",) if is_tf_available() else (), } }, ) # Fails if the user forget to pass along `trust_remote_code=True` with self.assertRaises(ValueError): _ = pipeline(model=tmp_dir) new_classifier = pipeline(model=tmp_dir, trust_remote_code=True) # Using trust_remote_code=False forces the traditional pipeline tag old_classifier = pipeline("text-classification", model=tmp_dir, trust_remote_code=False) # Can't make an isinstance check because the new_classifier is from the PairClassificationPipeline class of a # dynamic module self.assertEqual(new_classifier.__class__.__name__, "PairClassificationPipeline") self.assertEqual(new_classifier.task, "pair-classification") results = new_classifier("I hate you", second_text="I love you") self.assertDictEqual( nested_simplify(results), {"label": "LABEL_0", "score": 0.505, "logits": [-0.003, -0.024]}, ) self.assertEqual(old_classifier.__class__.__name__, "TextClassificationPipeline") self.assertEqual(old_classifier.task, "text-classification") results = old_classifier("I hate you", text_pair="I love you") self.assertListEqual( nested_simplify(results), [{"label": "LABEL_0", "score": 0.505}], ) @require_torch_or_tf def test_cached_pipeline_has_minimum_calls_to_head(self): # Make sure we have cached the pipeline. _ = pipeline("text-classification", model="hf-internal-testing/tiny-random-bert") with RequestCounter() as counter: _ = pipeline("text-classification", model="hf-internal-testing/tiny-random-bert") self.assertEqual(counter["GET"], 0) self.assertEqual(counter["HEAD"], 1) self.assertEqual(counter.total_calls, 1) @require_torch def test_chunk_pipeline_batching_single_file(self): # Make sure we have cached the pipeline. pipe = pipeline(model="hf-internal-testing/tiny-random-Wav2Vec2ForCTC") ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") audio = ds[40]["audio"]["array"] pipe = pipeline(model="hf-internal-testing/tiny-random-Wav2Vec2ForCTC") # For some reason scoping doesn't work if not using `self.` self.COUNT = 0 forward = pipe.model.forward def new_forward(*args, **kwargs): self.COUNT += 1 return forward(*args, **kwargs) pipe.model.forward = new_forward for out in pipe(audio, return_timestamps="char", chunk_length_s=3, stride_length_s=[1, 1], batch_size=1024): pass self.assertEqual(self.COUNT, 1) @require_torch @is_staging_test class DynamicPipelineTester(unittest.TestCase): vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "I", "love", "hate", "you"] @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-dynamic-pipeline") except HTTPError: pass def test_push_to_hub_dynamic_pipeline(self): from transformers import BertConfig, BertForSequenceClassification, BertTokenizer PIPELINE_REGISTRY.register_pipeline( "pair-classification", pipeline_class=PairClassificationPipeline, pt_model=AutoModelForSequenceClassification, ) config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = BertForSequenceClassification(config).eval() with tempfile.TemporaryDirectory() as tmp_dir: create_repo(f"{USER}/test-dynamic-pipeline", token=self._token) repo = Repository(tmp_dir, clone_from=f"{USER}/test-dynamic-pipeline", token=self._token) vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) tokenizer = BertTokenizer(vocab_file) classifier = pipeline("pair-classification", model=model, tokenizer=tokenizer) # Clean registry as we won't need the pipeline to be in it for the rest to work. del PIPELINE_REGISTRY.supported_tasks["pair-classification"] classifier.save_pretrained(tmp_dir) # checks self.assertDictEqual( classifier.model.config.custom_pipelines, { "pair-classification": { "impl": "custom_pipeline.PairClassificationPipeline", "pt": ("AutoModelForSequenceClassification",), "tf": (), } }, ) repo.push_to_hub() # Fails if the user forget to pass along `trust_remote_code=True` with self.assertRaises(ValueError): _ = pipeline(model=f"{USER}/test-dynamic-pipeline") new_classifier = pipeline(model=f"{USER}/test-dynamic-pipeline", trust_remote_code=True) # Can't make an isinstance check because the new_classifier is from the PairClassificationPipeline class of a # dynamic module self.assertEqual(new_classifier.__class__.__name__, "PairClassificationPipeline") results = classifier("I hate you", second_text="I love you") new_results = new_classifier("I hate you", second_text="I love you") self.assertDictEqual(nested_simplify(results), nested_simplify(new_results)) # Using trust_remote_code=False forces the traditional pipeline tag old_classifier = pipeline( "text-classification", model=f"{USER}/test-dynamic-pipeline", trust_remote_code=False ) self.assertEqual(old_classifier.__class__.__name__, "TextClassificationPipeline") self.assertEqual(old_classifier.task, "text-classification") new_results = old_classifier("I hate you", text_pair="I love you") self.assertListEqual( nested_simplify([{"label": results["label"], "score": results["score"]}]), nested_simplify(new_results) )
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_text_to_audio.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers import ( MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING, AutoProcessor, TextToAudioPipeline, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, require_torch, require_torch_accelerator, require_torch_or_tf, slow, torch_device, ) from transformers.trainer_utils import set_seed from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class TextToAudioPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING # for now only test text_to_waveform and not text_to_spectrogram @slow @require_torch def test_small_musicgen_pt(self): music_generator = pipeline(task="text-to-audio", model="facebook/musicgen-small", framework="pt") forward_params = { "do_sample": False, "max_new_tokens": 250, } outputs = music_generator("This is a test", forward_params=forward_params) self.assertEqual({"audio": ANY(np.ndarray), "sampling_rate": 32000}, outputs) # test two examples side-by-side outputs = music_generator(["This is a test", "This is a second test"], forward_params=forward_params) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) # test batching outputs = music_generator( ["This is a test", "This is a second test"], forward_params=forward_params, batch_size=2 ) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) @slow @require_torch def test_small_bark_pt(self): speech_generator = pipeline(task="text-to-audio", model="suno/bark-small", framework="pt") forward_params = { # Using `do_sample=False` to force deterministic output "do_sample": False, "semantic_max_new_tokens": 100, } outputs = speech_generator("This is a test", forward_params=forward_params) self.assertEqual( {"audio": ANY(np.ndarray), "sampling_rate": 24000}, outputs, ) # test two examples side-by-side outputs = speech_generator( ["This is a test", "This is a second test"], forward_params=forward_params, ) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) # test other generation strategy forward_params = { "do_sample": True, "semantic_max_new_tokens": 100, "semantic_num_return_sequences": 2, } outputs = speech_generator("This is a test", forward_params=forward_params) audio = outputs["audio"] self.assertEqual(ANY(np.ndarray), audio) # test using a speaker embedding processor = AutoProcessor.from_pretrained("suno/bark-small") temp_inp = processor("hey, how are you?", voice_preset="v2/en_speaker_5") history_prompt = temp_inp["history_prompt"] forward_params["history_prompt"] = history_prompt outputs = speech_generator( ["This is a test", "This is a second test"], forward_params=forward_params, batch_size=2, ) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) @slow @require_torch_accelerator def test_conversion_additional_tensor(self): speech_generator = pipeline(task="text-to-audio", model="suno/bark-small", framework="pt", device=torch_device) processor = AutoProcessor.from_pretrained("suno/bark-small") forward_params = { "do_sample": True, "semantic_max_new_tokens": 100, } # atm, must do to stay coherent with BarkProcessor preprocess_params = { "max_length": 256, "add_special_tokens": False, "return_attention_mask": True, "return_token_type_ids": False, "padding": "max_length", } outputs = speech_generator( "This is a test", forward_params=forward_params, preprocess_params=preprocess_params, ) temp_inp = processor("hey, how are you?", voice_preset="v2/en_speaker_5") history_prompt = temp_inp["history_prompt"] forward_params["history_prompt"] = history_prompt # history_prompt is a torch.Tensor passed as a forward_param # if generation is successful, it means that it was passed to the right device outputs = speech_generator( "This is a test", forward_params=forward_params, preprocess_params=preprocess_params ) self.assertEqual( {"audio": ANY(np.ndarray), "sampling_rate": 24000}, outputs, ) @slow @require_torch def test_vits_model_pt(self): speech_generator = pipeline(task="text-to-audio", model="facebook/mms-tts-eng", framework="pt") outputs = speech_generator("This is a test") self.assertEqual(outputs["sampling_rate"], 16000) audio = outputs["audio"] self.assertEqual(ANY(np.ndarray), audio) # test two examples side-by-side outputs = speech_generator(["This is a test", "This is a second test"]) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) # test batching outputs = speech_generator(["This is a test", "This is a second test"], batch_size=2) self.assertEqual(ANY(np.ndarray), outputs[0]["audio"]) @slow @require_torch def test_forward_model_kwargs(self): # use vits - a forward model speech_generator = pipeline(task="text-to-audio", model="kakao-enterprise/vits-vctk", framework="pt") # for reproducibility set_seed(555) outputs = speech_generator("This is a test", forward_params={"speaker_id": 5}) audio = outputs["audio"] with self.assertRaises(TypeError): # assert error if generate parameter outputs = speech_generator("This is a test", forward_params={"speaker_id": 5, "do_sample": True}) forward_params = {"speaker_id": 5} generate_kwargs = {"do_sample": True} with self.assertRaises(ValueError): # assert error if generate_kwargs with forward-only models outputs = speech_generator( "This is a test", forward_params=forward_params, generate_kwargs=generate_kwargs ) self.assertTrue(np.abs(outputs["audio"] - audio).max() < 1e-5) @slow @require_torch def test_generative_model_kwargs(self): # use musicgen - a generative model music_generator = pipeline(task="text-to-audio", model="facebook/musicgen-small", framework="pt") forward_params = { "do_sample": True, "max_new_tokens": 250, } # for reproducibility set_seed(555) outputs = music_generator("This is a test", forward_params=forward_params) audio = outputs["audio"] self.assertEqual(ANY(np.ndarray), audio) # make sure generate kwargs get priority over forward params forward_params = { "do_sample": False, "max_new_tokens": 250, } generate_kwargs = {"do_sample": True} # for reproducibility set_seed(555) outputs = music_generator("This is a test", forward_params=forward_params, generate_kwargs=generate_kwargs) self.assertListEqual(outputs["audio"].tolist(), audio.tolist()) def get_test_pipeline(self, model, tokenizer, processor): speech_generator = TextToAudioPipeline(model=model, tokenizer=tokenizer) return speech_generator, ["This is a test", "Another test"] def run_pipeline_test(self, speech_generator, _): outputs = speech_generator("This is a test") self.assertEqual(ANY(np.ndarray), outputs["audio"]) forward_params = ( {"num_return_sequences": 2, "do_sample": True} if speech_generator.model.can_generate() else {} ) outputs = speech_generator(["This is great !", "Something else"], forward_params=forward_params) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_token_classification.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers import ( MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, AutoModelForTokenClassification, AutoTokenizer, TokenClassificationPipeline, pipeline, ) from transformers.pipelines import AggregationStrategy, TokenClassificationArgumentHandler from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torch_accelerator, slow, torch_device, ) from .test_pipelines_common import ANY VALID_INPUTS = ["A simple string", ["list of strings", "A simple string that is quite a bit longer"]] # These 2 model types require different inputs than those of the usual text models. _TO_SKIP = {"LayoutLMv2Config", "LayoutLMv3Config"} @is_pipeline_test class TokenClassificationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING if model_mapping is not None: model_mapping = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: tf_model_mapping = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def get_test_pipeline(self, model, tokenizer, processor): token_classifier = TokenClassificationPipeline(model=model, tokenizer=tokenizer) return token_classifier, ["A simple string", "A simple string that is quite a bit longer"] def run_pipeline_test(self, token_classifier, _): model = token_classifier.model tokenizer = token_classifier.tokenizer if not tokenizer.is_fast: return # Slow tokenizers do not return offsets mappings, so this test will fail outputs = token_classifier("A simple string") self.assertIsInstance(outputs, list) n = len(outputs) self.assertEqual( nested_simplify(outputs), [ { "entity": ANY(str), "score": ANY(float), "start": ANY(int), "end": ANY(int), "index": ANY(int), "word": ANY(str), } for i in range(n) ], ) outputs = token_classifier(["list of strings", "A simple string that is quite a bit longer"]) self.assertIsInstance(outputs, list) self.assertEqual(len(outputs), 2) n = len(outputs[0]) m = len(outputs[1]) self.assertEqual( nested_simplify(outputs), [ [ { "entity": ANY(str), "score": ANY(float), "start": ANY(int), "end": ANY(int), "index": ANY(int), "word": ANY(str), } for i in range(n) ], [ { "entity": ANY(str), "score": ANY(float), "start": ANY(int), "end": ANY(int), "index": ANY(int), "word": ANY(str), } for i in range(m) ], ], ) self.run_aggregation_strategy(model, tokenizer) def run_aggregation_strategy(self, model, tokenizer): token_classifier = TokenClassificationPipeline(model=model, tokenizer=tokenizer, aggregation_strategy="simple") self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.SIMPLE) outputs = token_classifier("A simple string") self.assertIsInstance(outputs, list) n = len(outputs) self.assertEqual( nested_simplify(outputs), [ { "entity_group": ANY(str), "score": ANY(float), "start": ANY(int), "end": ANY(int), "word": ANY(str), } for i in range(n) ], ) token_classifier = TokenClassificationPipeline(model=model, tokenizer=tokenizer, aggregation_strategy="first") self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.FIRST) outputs = token_classifier("A simple string") self.assertIsInstance(outputs, list) n = len(outputs) self.assertEqual( nested_simplify(outputs), [ { "entity_group": ANY(str), "score": ANY(float), "start": ANY(int), "end": ANY(int), "word": ANY(str), } for i in range(n) ], ) token_classifier = TokenClassificationPipeline(model=model, tokenizer=tokenizer, aggregation_strategy="max") self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.MAX) outputs = token_classifier("A simple string") self.assertIsInstance(outputs, list) n = len(outputs) self.assertEqual( nested_simplify(outputs), [ { "entity_group": ANY(str), "score": ANY(float), "start": ANY(int), "end": ANY(int), "word": ANY(str), } for i in range(n) ], ) token_classifier = TokenClassificationPipeline( model=model, tokenizer=tokenizer, aggregation_strategy="average" ) self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.AVERAGE) outputs = token_classifier("A simple string") self.assertIsInstance(outputs, list) n = len(outputs) self.assertEqual( nested_simplify(outputs), [ { "entity_group": ANY(str), "score": ANY(float), "start": ANY(int), "end": ANY(int), "word": ANY(str), } for i in range(n) ], ) with self.assertWarns(UserWarning): token_classifier = pipeline(task="ner", model=model, tokenizer=tokenizer, grouped_entities=True) self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.SIMPLE) with self.assertWarns(UserWarning): token_classifier = pipeline( task="ner", model=model, tokenizer=tokenizer, grouped_entities=True, ignore_subwords=True ) self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.FIRST) @slow @require_torch def test_chunking(self): NER_MODEL = "elastic/distilbert-base-uncased-finetuned-conll03-english" model = AutoModelForTokenClassification.from_pretrained(NER_MODEL) tokenizer = AutoTokenizer.from_pretrained(NER_MODEL, use_fast=True) tokenizer.model_max_length = 10 stride = 5 sentence = ( "Hugging Face, Inc. is a French company that develops tools for building applications using machine learning. " "The company, based in New York City was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf." ) token_classifier = TokenClassificationPipeline( model=model, tokenizer=tokenizer, aggregation_strategy="simple", stride=stride ) output = token_classifier(sentence) self.assertEqual( nested_simplify(output), [ {"entity_group": "ORG", "score": 0.978, "word": "hugging face, inc.", "start": 0, "end": 18}, {"entity_group": "MISC", "score": 0.999, "word": "french", "start": 24, "end": 30}, {"entity_group": "LOC", "score": 0.997, "word": "new york city", "start": 131, "end": 144}, {"entity_group": "MISC", "score": 0.999, "word": "french", "start": 168, "end": 174}, {"entity_group": "PER", "score": 0.999, "word": "clement delangue", "start": 189, "end": 205}, {"entity_group": "PER", "score": 0.999, "word": "julien chaumond", "start": 207, "end": 222}, {"entity_group": "PER", "score": 0.999, "word": "thomas wolf", "start": 228, "end": 239}, ], ) token_classifier = TokenClassificationPipeline( model=model, tokenizer=tokenizer, aggregation_strategy="first", stride=stride ) output = token_classifier(sentence) self.assertEqual( nested_simplify(output), [ {"entity_group": "ORG", "score": 0.978, "word": "hugging face, inc.", "start": 0, "end": 18}, {"entity_group": "MISC", "score": 0.999, "word": "french", "start": 24, "end": 30}, {"entity_group": "LOC", "score": 0.997, "word": "new york city", "start": 131, "end": 144}, {"entity_group": "MISC", "score": 0.999, "word": "french", "start": 168, "end": 174}, {"entity_group": "PER", "score": 0.999, "word": "clement delangue", "start": 189, "end": 205}, {"entity_group": "PER", "score": 0.999, "word": "julien chaumond", "start": 207, "end": 222}, {"entity_group": "PER", "score": 0.999, "word": "thomas wolf", "start": 228, "end": 239}, ], ) token_classifier = TokenClassificationPipeline( model=model, tokenizer=tokenizer, aggregation_strategy="max", stride=stride ) output = token_classifier(sentence) self.assertEqual( nested_simplify(output), [ {"entity_group": "ORG", "score": 0.978, "word": "hugging face, inc.", "start": 0, "end": 18}, {"entity_group": "MISC", "score": 0.999, "word": "french", "start": 24, "end": 30}, {"entity_group": "LOC", "score": 0.997, "word": "new york city", "start": 131, "end": 144}, {"entity_group": "MISC", "score": 0.999, "word": "french", "start": 168, "end": 174}, {"entity_group": "PER", "score": 0.999, "word": "clement delangue", "start": 189, "end": 205}, {"entity_group": "PER", "score": 0.999, "word": "julien chaumond", "start": 207, "end": 222}, {"entity_group": "PER", "score": 0.999, "word": "thomas wolf", "start": 228, "end": 239}, ], ) token_classifier = TokenClassificationPipeline( model=model, tokenizer=tokenizer, aggregation_strategy="average", stride=stride ) output = token_classifier(sentence) self.assertEqual( nested_simplify(output), [ {"entity_group": "ORG", "score": 0.978, "word": "hugging face, inc.", "start": 0, "end": 18}, {"entity_group": "MISC", "score": 0.999, "word": "french", "start": 24, "end": 30}, {"entity_group": "LOC", "score": 0.997, "word": "new york city", "start": 131, "end": 144}, {"entity_group": "MISC", "score": 0.999, "word": "french", "start": 168, "end": 174}, {"entity_group": "PER", "score": 0.999, "word": "clement delangue", "start": 189, "end": 205}, {"entity_group": "PER", "score": 0.999, "word": "julien chaumond", "start": 207, "end": 222}, {"entity_group": "PER", "score": 0.999, "word": "thomas wolf", "start": 228, "end": 239}, ], ) @require_torch def test_chunking_fast(self): # Note: We cannot run the test on "conflicts" on the chunking. # The problem is that the model is random, and thus the results do heavily # depend on the chunking, so we cannot expect "abcd" and "bcd" to find # the same entities. We defer to slow tests for this. pipe = pipeline(model="hf-internal-testing/tiny-bert-for-token-classification") sentence = "The company, based in New York City was founded in 2016 by French entrepreneurs" results = pipe(sentence, aggregation_strategy="first") # This is what this random model gives on the full sentence self.assertEqual( nested_simplify(results), [ # This is 2 actual tokens {"end": 39, "entity_group": "MISC", "score": 0.115, "start": 31, "word": "city was"}, {"end": 79, "entity_group": "MISC", "score": 0.115, "start": 66, "word": "entrepreneurs"}, ], ) # This will force the tokenizer to split after "city was". pipe.tokenizer.model_max_length = 12 self.assertEqual( pipe.tokenizer.decode(pipe.tokenizer.encode(sentence, truncation=True)), "[CLS] the company, based in new york city was [SEP]", ) stride = 4 results = pipe(sentence, aggregation_strategy="first", stride=stride) self.assertEqual( nested_simplify(results), [ {"end": 39, "entity_group": "MISC", "score": 0.115, "start": 31, "word": "city was"}, # This is an extra entity found by this random model, but at least both original # entities are there {"end": 58, "entity_group": "MISC", "score": 0.115, "start": 56, "word": "by"}, {"end": 79, "entity_group": "MISC", "score": 0.115, "start": 66, "word": "entrepreneurs"}, ], ) @require_torch @slow def test_spanish_bert(self): # https://github.com/huggingface/transformers/pull/4987 NER_MODEL = "mrm8488/bert-spanish-cased-finetuned-ner" model = AutoModelForTokenClassification.from_pretrained(NER_MODEL) tokenizer = AutoTokenizer.from_pretrained(NER_MODEL, use_fast=True) sentence = """Consuelo Araújo Noguera, ministra de cultura del presidente Andrés Pastrana (1998.2002) fue asesinada por las Farc luego de haber permanecido secuestrada por algunos meses.""" token_classifier = pipeline("ner", model=model, tokenizer=tokenizer) output = token_classifier(sentence) self.assertEqual( nested_simplify(output[:3]), [ {"entity": "B-PER", "score": 0.999, "word": "Cons", "start": 0, "end": 4, "index": 1}, {"entity": "B-PER", "score": 0.803, "word": "##uelo", "start": 4, "end": 8, "index": 2}, {"entity": "I-PER", "score": 0.999, "word": "Ara", "start": 9, "end": 12, "index": 3}, ], ) token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="simple") output = token_classifier(sentence) self.assertEqual( nested_simplify(output[:3]), [ {"entity_group": "PER", "score": 0.999, "word": "Cons", "start": 0, "end": 4}, {"entity_group": "PER", "score": 0.966, "word": "##uelo Araújo Noguera", "start": 4, "end": 23}, {"entity_group": "PER", "score": 1.0, "word": "Andrés Pastrana", "start": 60, "end": 75}, ], ) token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="first") output = token_classifier(sentence) self.assertEqual( nested_simplify(output[:3]), [ {"entity_group": "PER", "score": 0.999, "word": "Consuelo Araújo Noguera", "start": 0, "end": 23}, {"entity_group": "PER", "score": 1.0, "word": "Andrés Pastrana", "start": 60, "end": 75}, {"entity_group": "ORG", "score": 0.999, "word": "Farc", "start": 110, "end": 114}, ], ) token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="max") output = token_classifier(sentence) self.assertEqual( nested_simplify(output[:3]), [ {"entity_group": "PER", "score": 0.999, "word": "Consuelo Araújo Noguera", "start": 0, "end": 23}, {"entity_group": "PER", "score": 1.0, "word": "Andrés Pastrana", "start": 60, "end": 75}, {"entity_group": "ORG", "score": 0.999, "word": "Farc", "start": 110, "end": 114}, ], ) token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="average") output = token_classifier(sentence) self.assertEqual( nested_simplify(output[:3]), [ {"entity_group": "PER", "score": 0.966, "word": "Consuelo Araújo Noguera", "start": 0, "end": 23}, {"entity_group": "PER", "score": 1.0, "word": "Andrés Pastrana", "start": 60, "end": 75}, {"entity_group": "ORG", "score": 0.542, "word": "Farc", "start": 110, "end": 114}, ], ) @require_torch_accelerator @slow def test_accelerator(self): sentence = "This is dummy sentence" ner = pipeline( "token-classification", device=torch_device, aggregation_strategy=AggregationStrategy.SIMPLE, ) output = ner(sentence) self.assertEqual(nested_simplify(output), []) @require_torch @slow def test_dbmdz_english(self): # Other sentence NER_MODEL = "dbmdz/bert-large-cased-finetuned-conll03-english" model = AutoModelForTokenClassification.from_pretrained(NER_MODEL) tokenizer = AutoTokenizer.from_pretrained(NER_MODEL, use_fast=True) sentence = """Enzo works at the UN""" token_classifier = pipeline("ner", model=model, tokenizer=tokenizer) output = token_classifier(sentence) self.assertEqual( nested_simplify(output), [ {"entity": "I-PER", "score": 0.998, "word": "En", "start": 0, "end": 2, "index": 1}, {"entity": "I-PER", "score": 0.997, "word": "##zo", "start": 2, "end": 4, "index": 2}, {"entity": "I-ORG", "score": 0.999, "word": "UN", "start": 18, "end": 20, "index": 6}, ], ) token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="simple") output = token_classifier(sentence) self.assertEqual( nested_simplify(output), [ {"entity_group": "PER", "score": 0.997, "word": "Enzo", "start": 0, "end": 4}, {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 18, "end": 20}, ], ) token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="first") output = token_classifier(sentence) self.assertEqual( nested_simplify(output[:3]), [ {"entity_group": "PER", "score": 0.998, "word": "Enzo", "start": 0, "end": 4}, {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 18, "end": 20}, ], ) token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="max") output = token_classifier(sentence) self.assertEqual( nested_simplify(output[:3]), [ {"entity_group": "PER", "score": 0.998, "word": "Enzo", "start": 0, "end": 4}, {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 18, "end": 20}, ], ) token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="average") output = token_classifier(sentence) self.assertEqual( nested_simplify(output), [ {"entity_group": "PER", "score": 0.997, "word": "Enzo", "start": 0, "end": 4}, {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 18, "end": 20}, ], ) @require_torch @slow def test_aggregation_strategy_byte_level_tokenizer(self): sentence = "Groenlinks praat over Schiphol." ner = pipeline("ner", model="xlm-roberta-large-finetuned-conll02-dutch", aggregation_strategy="max") self.assertEqual( nested_simplify(ner(sentence)), [ {"end": 10, "entity_group": "ORG", "score": 0.994, "start": 0, "word": "Groenlinks"}, {"entity_group": "LOC", "score": 1.0, "word": "Schiphol.", "start": 22, "end": 31}, ], ) @require_torch def test_aggregation_strategy_no_b_i_prefix(self): model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english" tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True) token_classifier = pipeline(task="ner", model=model_name, tokenizer=tokenizer, framework="pt") # Just to understand scores indexes in this test token_classifier.model.config.id2label = {0: "O", 1: "MISC", 2: "PER", 3: "ORG", 4: "LOC"} example = [ { "scores": np.array([0, 0, 0, 0, 0.9968166351318359]), # fmt : skip "index": 1, "is_subword": False, "word": "En", "start": 0, "end": 2, }, { "scores": np.array([0, 0, 0, 0, 0.9957635998725891]), # fmt : skip "index": 2, "is_subword": True, "word": "##zo", "start": 2, "end": 4, }, { "scores": np.array([0, 0, 0, 0.9986497163772583, 0]), # fmt : skip "index": 7, "word": "UN", "is_subword": False, "start": 11, "end": 13, }, ] self.assertEqual( nested_simplify(token_classifier.aggregate(example, AggregationStrategy.NONE)), [ {"end": 2, "entity": "LOC", "score": 0.997, "start": 0, "word": "En", "index": 1}, {"end": 4, "entity": "LOC", "score": 0.996, "start": 2, "word": "##zo", "index": 2}, {"end": 13, "entity": "ORG", "score": 0.999, "start": 11, "word": "UN", "index": 7}, ], ) self.assertEqual( nested_simplify(token_classifier.aggregate(example, AggregationStrategy.SIMPLE)), [ {"entity_group": "LOC", "score": 0.996, "word": "Enzo", "start": 0, "end": 4}, {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13}, ], ) @require_torch def test_aggregation_strategy(self): model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english" tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True) token_classifier = pipeline(task="ner", model=model_name, tokenizer=tokenizer, framework="pt") # Just to understand scores indexes in this test self.assertEqual( token_classifier.model.config.id2label, {0: "O", 1: "B-MISC", 2: "I-MISC", 3: "B-PER", 4: "I-PER", 5: "B-ORG", 6: "I-ORG", 7: "B-LOC", 8: "I-LOC"}, ) example = [ { "scores": np.array([0, 0, 0, 0, 0.9968166351318359, 0, 0, 0]), # fmt : skip "index": 1, "is_subword": False, "word": "En", "start": 0, "end": 2, }, { "scores": np.array([0, 0, 0, 0, 0.9957635998725891, 0, 0, 0]), # fmt : skip "index": 2, "is_subword": True, "word": "##zo", "start": 2, "end": 4, }, { "scores": np.array([0, 0, 0, 0, 0, 0.9986497163772583, 0, 0]), # fmt : skip "index": 7, "word": "UN", "is_subword": False, "start": 11, "end": 13, }, ] self.assertEqual( nested_simplify(token_classifier.aggregate(example, AggregationStrategy.NONE)), [ {"end": 2, "entity": "I-PER", "score": 0.997, "start": 0, "word": "En", "index": 1}, {"end": 4, "entity": "I-PER", "score": 0.996, "start": 2, "word": "##zo", "index": 2}, {"end": 13, "entity": "B-ORG", "score": 0.999, "start": 11, "word": "UN", "index": 7}, ], ) self.assertEqual( nested_simplify(token_classifier.aggregate(example, AggregationStrategy.SIMPLE)), [ {"entity_group": "PER", "score": 0.996, "word": "Enzo", "start": 0, "end": 4}, {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13}, ], ) self.assertEqual( nested_simplify(token_classifier.aggregate(example, AggregationStrategy.FIRST)), [ {"entity_group": "PER", "score": 0.997, "word": "Enzo", "start": 0, "end": 4}, {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13}, ], ) self.assertEqual( nested_simplify(token_classifier.aggregate(example, AggregationStrategy.MAX)), [ {"entity_group": "PER", "score": 0.997, "word": "Enzo", "start": 0, "end": 4}, {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13}, ], ) self.assertEqual( nested_simplify(token_classifier.aggregate(example, AggregationStrategy.AVERAGE)), [ {"entity_group": "PER", "score": 0.996, "word": "Enzo", "start": 0, "end": 4}, {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13}, ], ) @require_torch def test_aggregation_strategy_example2(self): model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english" tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True) token_classifier = pipeline(task="ner", model=model_name, tokenizer=tokenizer, framework="pt") # Just to understand scores indexes in this test self.assertEqual( token_classifier.model.config.id2label, {0: "O", 1: "B-MISC", 2: "I-MISC", 3: "B-PER", 4: "I-PER", 5: "B-ORG", 6: "I-ORG", 7: "B-LOC", 8: "I-LOC"}, ) example = [ { # Necessary for AVERAGE "scores": np.array([0, 0.55, 0, 0.45, 0, 0, 0, 0, 0, 0]), "is_subword": False, "index": 1, "word": "Ra", "start": 0, "end": 2, }, { "scores": np.array([0, 0, 0, 0.2, 0, 0, 0, 0.8, 0, 0]), "is_subword": True, "word": "##ma", "start": 2, "end": 4, "index": 2, }, { # 4th score will have the higher average # 4th score is B-PER for this model # It's does not correspond to any of the subtokens. "scores": np.array([0, 0, 0, 0.4, 0, 0, 0.6, 0, 0, 0]), "is_subword": True, "word": "##zotti", "start": 11, "end": 13, "index": 3, }, ] self.assertEqual( token_classifier.aggregate(example, AggregationStrategy.NONE), [ {"end": 2, "entity": "B-MISC", "score": 0.55, "start": 0, "word": "Ra", "index": 1}, {"end": 4, "entity": "B-LOC", "score": 0.8, "start": 2, "word": "##ma", "index": 2}, {"end": 13, "entity": "I-ORG", "score": 0.6, "start": 11, "word": "##zotti", "index": 3}, ], ) self.assertEqual( token_classifier.aggregate(example, AggregationStrategy.FIRST), [{"entity_group": "MISC", "score": 0.55, "word": "Ramazotti", "start": 0, "end": 13}], ) self.assertEqual( token_classifier.aggregate(example, AggregationStrategy.MAX), [{"entity_group": "LOC", "score": 0.8, "word": "Ramazotti", "start": 0, "end": 13}], ) self.assertEqual( nested_simplify(token_classifier.aggregate(example, AggregationStrategy.AVERAGE)), [{"entity_group": "PER", "score": 0.35, "word": "Ramazotti", "start": 0, "end": 13}], ) @require_torch @slow def test_aggregation_strategy_offsets_with_leading_space(self): sentence = "We're from New York" model_name = "brandon25/deberta-base-finetuned-ner" ner = pipeline("ner", model=model_name, ignore_labels=[], aggregation_strategy="max") self.assertEqual( nested_simplify(ner(sentence)), [ {"entity_group": "O", "score": 1.0, "word": " We're from", "start": 0, "end": 10}, {"entity_group": "LOC", "score": 1.0, "word": " New York", "start": 10, "end": 19}, ], ) @require_torch def test_gather_pre_entities(self): model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english" tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True) token_classifier = pipeline(task="ner", model=model_name, tokenizer=tokenizer, framework="pt") sentence = "Hello there" tokens = tokenizer( sentence, return_attention_mask=False, return_tensors="pt", truncation=True, return_special_tokens_mask=True, return_offsets_mapping=True, ) offset_mapping = tokens.pop("offset_mapping").cpu().numpy()[0] special_tokens_mask = tokens.pop("special_tokens_mask").cpu().numpy()[0] input_ids = tokens["input_ids"].numpy()[0] # First element in [CLS] scores = np.array([[1, 0, 0], [0.1, 0.3, 0.6], [0.8, 0.1, 0.1]]) pre_entities = token_classifier.gather_pre_entities( sentence, input_ids, scores, offset_mapping, special_tokens_mask, aggregation_strategy=AggregationStrategy.NONE, ) self.assertEqual( nested_simplify(pre_entities), [ {"word": "Hello", "scores": [0.1, 0.3, 0.6], "start": 0, "end": 5, "is_subword": False, "index": 1}, { "word": "there", "scores": [0.8, 0.1, 0.1], "index": 2, "start": 6, "end": 11, "is_subword": False, }, ], ) @require_torch def test_word_heuristic_leading_space(self): model_name = "hf-internal-testing/tiny-random-deberta-v2" tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True) token_classifier = pipeline(task="ner", model=model_name, tokenizer=tokenizer, framework="pt") sentence = "I play the theremin" tokens = tokenizer( sentence, return_attention_mask=False, return_tensors="pt", return_special_tokens_mask=True, return_offsets_mapping=True, ) offset_mapping = tokens.pop("offset_mapping").cpu().numpy()[0] special_tokens_mask = tokens.pop("special_tokens_mask").cpu().numpy()[0] input_ids = tokens["input_ids"].numpy()[0] scores = np.array([[1, 0] for _ in input_ids]) # values irrelevant for heuristic pre_entities = token_classifier.gather_pre_entities( sentence, input_ids, scores, offset_mapping, special_tokens_mask, aggregation_strategy=AggregationStrategy.FIRST, ) # ensure expected tokenization and correct is_subword values self.assertEqual( [(entity["word"], entity["is_subword"]) for entity in pre_entities], [("▁I", False), ("▁play", False), ("▁the", False), ("▁there", False), ("min", True)], ) @require_tf def test_tf_only(self): model_name = "hf-internal-testing/tiny-random-bert-tf-only" # This model only has a TensorFlow version # We test that if we don't specificy framework='tf', it gets detected automatically token_classifier = pipeline(task="ner", model=model_name) self.assertEqual(token_classifier.framework, "tf") @require_tf def test_small_model_tf(self): model_name = "hf-internal-testing/tiny-bert-for-token-classification" token_classifier = pipeline(task="token-classification", model=model_name, framework="tf") outputs = token_classifier("This is a test !") self.assertEqual( nested_simplify(outputs), [ {"entity": "I-MISC", "score": 0.115, "index": 1, "word": "this", "start": 0, "end": 4}, {"entity": "I-MISC", "score": 0.115, "index": 2, "word": "is", "start": 5, "end": 7}, ], ) @require_torch def test_no_offset_tokenizer(self): model_name = "hf-internal-testing/tiny-bert-for-token-classification" tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False) token_classifier = pipeline(task="token-classification", model=model_name, tokenizer=tokenizer, framework="pt") outputs = token_classifier("This is a test !") self.assertEqual( nested_simplify(outputs), [ {"entity": "I-MISC", "score": 0.115, "index": 1, "word": "this", "start": None, "end": None}, {"entity": "I-MISC", "score": 0.115, "index": 2, "word": "is", "start": None, "end": None}, ], ) @require_torch def test_small_model_pt(self): model_name = "hf-internal-testing/tiny-bert-for-token-classification" token_classifier = pipeline(task="token-classification", model=model_name, framework="pt") outputs = token_classifier("This is a test !") self.assertEqual( nested_simplify(outputs), [ {"entity": "I-MISC", "score": 0.115, "index": 1, "word": "this", "start": 0, "end": 4}, {"entity": "I-MISC", "score": 0.115, "index": 2, "word": "is", "start": 5, "end": 7}, ], ) token_classifier = pipeline( task="token-classification", model=model_name, framework="pt", ignore_labels=["O", "I-MISC"] ) outputs = token_classifier("This is a test !") self.assertEqual( nested_simplify(outputs), [], ) token_classifier = pipeline(task="token-classification", model=model_name, framework="pt") # Overload offset_mapping outputs = token_classifier( "This is a test !", offset_mapping=[(0, 0), (0, 1), (0, 2), (0, 0), (0, 0), (0, 0), (0, 0)] ) self.assertEqual( nested_simplify(outputs), [ {"entity": "I-MISC", "score": 0.115, "index": 1, "word": "this", "start": 0, "end": 1}, {"entity": "I-MISC", "score": 0.115, "index": 2, "word": "is", "start": 0, "end": 2}, ], ) # Batch size does not affect outputs (attention_mask are required) sentences = ["This is a test !", "Another test this is with longer sentence"] outputs = token_classifier(sentences) outputs_batched = token_classifier(sentences, batch_size=2) # Batching does not make a difference in predictions self.assertEqual(nested_simplify(outputs_batched), nested_simplify(outputs)) self.assertEqual( nested_simplify(outputs_batched), [ [ {"entity": "I-MISC", "score": 0.115, "index": 1, "word": "this", "start": 0, "end": 4}, {"entity": "I-MISC", "score": 0.115, "index": 2, "word": "is", "start": 5, "end": 7}, ], [], ], ) @require_torch def test_pt_ignore_subwords_slow_tokenizer_raises(self): model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english" tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False) with self.assertRaises(ValueError): pipeline(task="ner", model=model_name, tokenizer=tokenizer, aggregation_strategy=AggregationStrategy.FIRST) with self.assertRaises(ValueError): pipeline( task="ner", model=model_name, tokenizer=tokenizer, aggregation_strategy=AggregationStrategy.AVERAGE ) with self.assertRaises(ValueError): pipeline(task="ner", model=model_name, tokenizer=tokenizer, aggregation_strategy=AggregationStrategy.MAX) @slow @require_torch def test_simple(self): token_classifier = pipeline(task="ner", model="dslim/bert-base-NER", grouped_entities=True) sentence = "Hello Sarah Jessica Parker who Jessica lives in New York" sentence2 = "This is a simple test" output = token_classifier(sentence) output_ = nested_simplify(output) self.assertEqual( output_, [ { "entity_group": "PER", "score": 0.996, "word": "Sarah Jessica Parker", "start": 6, "end": 26, }, {"entity_group": "PER", "score": 0.977, "word": "Jessica", "start": 31, "end": 38}, {"entity_group": "LOC", "score": 0.999, "word": "New York", "start": 48, "end": 56}, ], ) output = token_classifier([sentence, sentence2]) output_ = nested_simplify(output) self.assertEqual( output_, [ [ {"entity_group": "PER", "score": 0.996, "word": "Sarah Jessica Parker", "start": 6, "end": 26}, {"entity_group": "PER", "score": 0.977, "word": "Jessica", "start": 31, "end": 38}, {"entity_group": "LOC", "score": 0.999, "word": "New York", "start": 48, "end": 56}, ], [], ], ) class TokenClassificationArgumentHandlerTestCase(unittest.TestCase): def setUp(self): self.args_parser = TokenClassificationArgumentHandler() def test_simple(self): string = "This is a simple input" inputs, offset_mapping = self.args_parser(string) self.assertEqual(inputs, [string]) self.assertEqual(offset_mapping, None) inputs, offset_mapping = self.args_parser([string, string]) self.assertEqual(inputs, [string, string]) self.assertEqual(offset_mapping, None) inputs, offset_mapping = self.args_parser(string, offset_mapping=[(0, 1), (1, 2)]) self.assertEqual(inputs, [string]) self.assertEqual(offset_mapping, [[(0, 1), (1, 2)]]) inputs, offset_mapping = self.args_parser( [string, string], offset_mapping=[[(0, 1), (1, 2)], [(0, 2), (2, 3)]] ) self.assertEqual(inputs, [string, string]) self.assertEqual(offset_mapping, [[(0, 1), (1, 2)], [(0, 2), (2, 3)]]) def test_errors(self): string = "This is a simple input" # 2 sentences, 1 offset_mapping, args with self.assertRaises(TypeError): self.args_parser(string, string, offset_mapping=[[(0, 1), (1, 2)]]) # 2 sentences, 1 offset_mapping, args with self.assertRaises(TypeError): self.args_parser(string, string, offset_mapping=[(0, 1), (1, 2)]) # 2 sentences, 1 offset_mapping, input_list with self.assertRaises(ValueError): self.args_parser([string, string], offset_mapping=[[(0, 1), (1, 2)]]) # 2 sentences, 1 offset_mapping, input_list with self.assertRaises(ValueError): self.args_parser([string, string], offset_mapping=[(0, 1), (1, 2)]) # 1 sentences, 2 offset_mapping with self.assertRaises(ValueError): self.args_parser(string, offset_mapping=[[(0, 1), (1, 2)], [(0, 2), (2, 3)]]) # 0 sentences, 1 offset_mapping with self.assertRaises(TypeError): self.args_parser(offset_mapping=[[(0, 1), (1, 2)]])
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_automatic_speech_recognition.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import pytest from datasets import Audio, load_dataset from huggingface_hub import hf_hub_download, snapshot_download from transformers import ( MODEL_FOR_CTC_MAPPING, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, Speech2TextForConditionalGeneration, Wav2Vec2ForCTC, WhisperForConditionalGeneration, ) from transformers.pipelines import AutomaticSpeechRecognitionPipeline, pipeline from transformers.pipelines.audio_utils import chunk_bytes_iter from transformers.pipelines.automatic_speech_recognition import _find_timestamp_sequence, chunk_iter from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_pyctcdecode, require_tf, require_torch, require_torch_accelerator, require_torchaudio, slow, torch_device, ) from .test_pipelines_common import ANY if is_torch_available(): import torch # We can't use this mixin because it assumes TF support. # from .test_pipelines_common import CustomInputPipelineCommonMixin @is_pipeline_test class AutomaticSpeechRecognitionPipelineTests(unittest.TestCase): model_mapping = dict( (list(MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING.items()) if MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING else []) + (MODEL_FOR_CTC_MAPPING.items() if MODEL_FOR_CTC_MAPPING else []) ) def get_test_pipeline(self, model, tokenizer, processor): if tokenizer is None: # Side effect of no Fast Tokenizer class for these model, so skipping # But the slow tokenizer test should still run as they're quite small self.skipTest("No tokenizer available") return # return None, None speech_recognizer = AutomaticSpeechRecognitionPipeline( model=model, tokenizer=tokenizer, feature_extractor=processor ) # test with a raw waveform audio = np.zeros((34000,)) audio2 = np.zeros((14000,)) return speech_recognizer, [audio, audio2] def run_pipeline_test(self, speech_recognizer, examples): audio = np.zeros((34000,)) outputs = speech_recognizer(audio) self.assertEqual(outputs, {"text": ANY(str)}) # Striding audio = {"raw": audio, "stride": (0, 4000), "sampling_rate": speech_recognizer.feature_extractor.sampling_rate} if speech_recognizer.type == "ctc": outputs = speech_recognizer(audio) self.assertEqual(outputs, {"text": ANY(str)}) elif "Whisper" in speech_recognizer.model.__class__.__name__: outputs = speech_recognizer(audio) self.assertEqual(outputs, {"text": ANY(str)}) else: # Non CTC models cannot use striding. with self.assertRaises(ValueError): outputs = speech_recognizer(audio) # Timestamps audio = np.zeros((34000,)) if speech_recognizer.type == "ctc": outputs = speech_recognizer(audio, return_timestamps="char") self.assertIsInstance(outputs["chunks"], list) n = len(outputs["chunks"]) self.assertEqual( outputs, { "text": ANY(str), "chunks": [{"text": ANY(str), "timestamp": (ANY(float), ANY(float))} for i in range(n)], }, ) outputs = speech_recognizer(audio, return_timestamps="word") self.assertIsInstance(outputs["chunks"], list) n = len(outputs["chunks"]) self.assertEqual( outputs, { "text": ANY(str), "chunks": [{"text": ANY(str), "timestamp": (ANY(float), ANY(float))} for i in range(n)], }, ) elif "Whisper" in speech_recognizer.model.__class__.__name__: outputs = speech_recognizer(audio, return_timestamps=True) self.assertIsInstance(outputs["chunks"], list) nb_chunks = len(outputs["chunks"]) self.assertGreater(nb_chunks, 0) self.assertEqual( outputs, { "text": ANY(str), "chunks": [{"text": ANY(str), "timestamp": (ANY(float), ANY(float))} for i in range(nb_chunks)], }, ) else: # Non CTC models cannot use return_timestamps with self.assertRaisesRegex( ValueError, "^We cannot return_timestamps yet on non-CTC models apart from Whisper!$" ): outputs = speech_recognizer(audio, return_timestamps="char") @require_torch @slow def test_pt_defaults(self): pipeline("automatic-speech-recognition", framework="pt") @require_torch def test_small_model_pt(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="facebook/s2t-small-mustc-en-fr-st", tokenizer="facebook/s2t-small-mustc-en-fr-st", framework="pt", ) waveform = np.tile(np.arange(1000, dtype=np.float32), 34) output = speech_recognizer(waveform) self.assertEqual(output, {"text": "(Applaudissements)"}) output = speech_recognizer(waveform, chunk_length_s=10) self.assertEqual(output, {"text": "(Applaudissements)"}) # Non CTC models cannot use return_timestamps with self.assertRaisesRegex( ValueError, "^We cannot return_timestamps yet on non-CTC models apart from Whisper!$" ): _ = speech_recognizer(waveform, return_timestamps="char") @slow @require_torch_accelerator def test_whisper_fp16(self): speech_recognizer = pipeline( model="openai/whisper-base", device=torch_device, torch_dtype=torch.float16, ) waveform = np.tile(np.arange(1000, dtype=np.float32), 34) speech_recognizer(waveform) @require_torch def test_small_model_pt_seq2seq(self): speech_recognizer = pipeline( model="hf-internal-testing/tiny-random-speech-encoder-decoder", framework="pt", ) waveform = np.tile(np.arange(1000, dtype=np.float32), 34) output = speech_recognizer(waveform) self.assertEqual(output, {"text": "あл ش 湯 清 ه ܬ া लᆨしث ल eか u w 全 u"}) @require_torch def test_small_model_pt_seq2seq_gen_kwargs(self): speech_recognizer = pipeline( model="hf-internal-testing/tiny-random-speech-encoder-decoder", framework="pt", ) waveform = np.tile(np.arange(1000, dtype=np.float32), 34) output = speech_recognizer(waveform, max_new_tokens=10, generate_kwargs={"num_beams": 2}) self.assertEqual(output, {"text": "あл † γ ت ב オ 束 泣 足"}) @slow @require_torch @require_pyctcdecode def test_large_model_pt_with_lm(self): dataset = load_dataset("Narsil/asr_dummy", streaming=True) third_item = next(iter(dataset["test"].skip(3))) filename = third_item["file"] speech_recognizer = pipeline( task="automatic-speech-recognition", model="patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm", framework="pt", ) self.assertEqual(speech_recognizer.type, "ctc_with_lm") output = speech_recognizer(filename) self.assertEqual( output, {"text": "y en las ramas medio sumergidas revoloteaban algunos pájaros de quimérico y legendario plumaje"}, ) # Override back to pure CTC speech_recognizer.type = "ctc" output = speech_recognizer(filename) # plumajre != plumaje self.assertEqual( output, { "text": ( "y en las ramas medio sumergidas revoloteaban algunos pájaros de quimérico y legendario plumajre" ) }, ) speech_recognizer.type = "ctc_with_lm" # Simple test with CTC with LM, chunking + timestamps output = speech_recognizer(filename, chunk_length_s=2.0, return_timestamps="word") self.assertEqual( output, { "text": ( "y en las ramas medio sumergidas revoloteaban algunos pájaros de quimérico y legendario plumajcri" ), "chunks": [ {"text": "y", "timestamp": (0.52, 0.54)}, {"text": "en", "timestamp": (0.6, 0.68)}, {"text": "las", "timestamp": (0.74, 0.84)}, {"text": "ramas", "timestamp": (0.94, 1.24)}, {"text": "medio", "timestamp": (1.32, 1.52)}, {"text": "sumergidas", "timestamp": (1.56, 2.22)}, {"text": "revoloteaban", "timestamp": (2.36, 3.0)}, {"text": "algunos", "timestamp": (3.06, 3.38)}, {"text": "pájaros", "timestamp": (3.46, 3.86)}, {"text": "de", "timestamp": (3.92, 4.0)}, {"text": "quimérico", "timestamp": (4.08, 4.6)}, {"text": "y", "timestamp": (4.66, 4.68)}, {"text": "legendario", "timestamp": (4.74, 5.26)}, {"text": "plumajcri", "timestamp": (5.34, 5.74)}, ], }, ) # CTC + LM models cannot use return_timestamps="char" with self.assertRaisesRegex( ValueError, "^CTC with LM can only predict word level timestamps, set `return_timestamps='word'`$" ): _ = speech_recognizer(filename, return_timestamps="char") @require_tf def test_small_model_tf(self): self.skipTest("Tensorflow not supported yet.") @require_torch def test_torch_small_no_tokenizer_files(self): # test that model without tokenizer file cannot be loaded with pytest.raises(OSError): pipeline( task="automatic-speech-recognition", model="patrickvonplaten/tiny-wav2vec2-no-tokenizer", framework="pt", ) @require_torch @slow def test_torch_large(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="facebook/wav2vec2-base-960h", tokenizer="facebook/wav2vec2-base-960h", framework="pt", ) waveform = np.tile(np.arange(1000, dtype=np.float32), 34) output = speech_recognizer(waveform) self.assertEqual(output, {"text": ""}) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") filename = ds[40]["file"] output = speech_recognizer(filename) self.assertEqual(output, {"text": "A MAN SAID TO THE UNIVERSE SIR I EXIST"}) @slow @require_torch @slow def test_return_timestamps_in_preprocess(self): pipe = pipeline( task="automatic-speech-recognition", model="openai/whisper-tiny", chunk_length_s=8, stride_length_s=1, ) data = load_dataset("librispeech_asr", "clean", split="test", streaming=True) sample = next(iter(data)) pipe.model.config.forced_decoder_ids = pipe.tokenizer.get_decoder_prompt_ids(language="en", task="transcribe") res = pipe(sample["audio"]["array"]) self.assertEqual(res, {"text": " Conquered returned to its place amidst the tents."}) res = pipe(sample["audio"]["array"], return_timestamps=True) self.assertEqual( res, { "text": " Conquered returned to its place amidst the tents.", "chunks": [{"timestamp": (0.0, 3.36), "text": " Conquered returned to its place amidst the tents."}], }, ) pipe.model.generation_config.alignment_heads = [[2, 2], [3, 0], [3, 2], [3, 3], [3, 4], [3, 5]] res = pipe(sample["audio"]["array"], return_timestamps="word") # fmt: off self.assertEqual( res, { 'text': ' Conquered returned to its place amidst the tents.', 'chunks': [ {'text': ' Conquered', 'timestamp': (0.5, 1.2)}, {'text': ' returned', 'timestamp': (1.2, 1.64)}, {'text': ' to', 'timestamp': (1.64, 1.84)}, {'text': ' its', 'timestamp': (1.84, 2.02)}, {'text': ' place', 'timestamp': (2.02, 2.28)}, {'text': ' amidst', 'timestamp': (2.28, 2.8)}, {'text': ' the', 'timestamp': (2.8, 2.98)}, {'text': ' tents.', 'timestamp': (2.98, 3.48)}, ], }, ) # fmt: on @require_torch def test_return_timestamps_in_init(self): # segment-level timestamps are accepted model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") tokenizer = AutoTokenizer.from_pretrained("openai/whisper-tiny") feature_extractor = AutoFeatureExtractor.from_pretrained("openai/whisper-tiny") dummy_speech = np.ones(100) pipe = pipeline( task="automatic-speech-recognition", model=model, feature_extractor=feature_extractor, tokenizer=tokenizer, chunk_length_s=8, stride_length_s=1, return_timestamps=True, ) _ = pipe(dummy_speech) # word-level timestamps are accepted pipe = pipeline( task="automatic-speech-recognition", model=model, feature_extractor=feature_extractor, tokenizer=tokenizer, chunk_length_s=8, stride_length_s=1, return_timestamps="word", ) _ = pipe(dummy_speech) # char-level timestamps are not accepted with self.assertRaisesRegex( ValueError, "^Whisper cannot return `char` timestamps, only word level or segment level timestamps. " "Use `return_timestamps='word'` or `return_timestamps=True` respectively.$", ): pipe = pipeline( task="automatic-speech-recognition", model=model, feature_extractor=feature_extractor, tokenizer=tokenizer, chunk_length_s=8, stride_length_s=1, return_timestamps="char", ) _ = pipe(dummy_speech) @require_torch @slow def test_torch_whisper(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="openai/whisper-tiny", framework="pt", ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") filename = ds[40]["file"] output = speech_recognizer(filename) self.assertEqual(output, {"text": " A man said to the universe, Sir, I exist."}) output = speech_recognizer([filename], chunk_length_s=5, batch_size=4) self.assertEqual(output, [{"text": " A man said to the universe, Sir, I exist."}]) @slow def test_find_longest_common_subsequence(self): max_source_positions = 1500 processor = AutoProcessor.from_pretrained("openai/whisper-tiny") previous_sequence = [[51492, 406, 3163, 1953, 466, 13, 51612, 51612]] self.assertEqual( processor.decode(previous_sequence[0], output_offsets=True), { "text": " not worth thinking about.", "offsets": [{"text": " not worth thinking about.", "timestamp": (22.56, 24.96)}], }, ) # Merge when the previous sequence is a suffix of the next sequence # fmt: off next_sequences_1 = [ [50364, 295, 6177, 3391, 11, 19817, 3337, 507, 307, 406, 3163, 1953, 466, 13, 50614, 50614, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50834, 50257] ] # fmt: on self.assertEqual( processor.decode(next_sequences_1[0], output_offsets=True), { "text": ( " of spectators, retrievality is not worth thinking about. His instant panic was followed by a" " small, sharp blow high on his chest.<|endoftext|>" ), "offsets": [ {"text": " of spectators, retrievality is not worth thinking about.", "timestamp": (0.0, 5.0)}, { "text": " His instant panic was followed by a small, sharp blow high on his chest.", "timestamp": (5.0, 9.4), }, ], }, ) merge = _find_timestamp_sequence( [[previous_sequence, (480_000, 0, 0)], [next_sequences_1, (480_000, 120_000, 0)]], processor.tokenizer, processor.feature_extractor, max_source_positions, ) # fmt: off self.assertEqual( merge, [51492, 406, 3163, 1953, 466, 13, 51739, 51739, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 51959], ) # fmt: on self.assertEqual( processor.decode(merge, output_offsets=True), { "text": ( " not worth thinking about. His instant panic was followed by a small, sharp blow high on his" " chest." ), "offsets": [ {"text": " not worth thinking about.", "timestamp": (22.56, 27.5)}, { "text": " His instant panic was followed by a small, sharp blow high on his chest.", "timestamp": (27.5, 31.900000000000002), }, ], }, ) # Merge when the sequence is in the middle of the 1st next sequence # fmt: off next_sequences_2 = [ [50364, 295, 6177, 3391, 11, 19817, 3337, 507, 307, 406, 3163, 1953, 466, 13, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50834, 50257] ] # fmt: on # {'text': ' of spectators, retrievality is not worth thinking about. His instant panic was followed by a small, sharp blow high on his chest.','timestamp': (0.0, 9.4)} merge = _find_timestamp_sequence( [[previous_sequence, (480_000, 0, 0)], [next_sequences_2, (480_000, 120_000, 0)]], processor.tokenizer, processor.feature_extractor, max_source_positions, ) # fmt: off self.assertEqual( merge, [51492, 406, 3163, 1953, 466, 13, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 51959], ) # fmt: on self.assertEqual( processor.decode(merge, output_offsets=True), { "text": ( " not worth thinking about. His instant panic was followed by a small, sharp blow high on his" " chest." ), "offsets": [ { "text": ( " not worth thinking about. His instant panic was followed by a small, sharp blow high on" " his chest." ), "timestamp": (22.56, 31.900000000000002), }, ], }, ) # Merge when the previous sequence is not included in the current sequence next_sequences_3 = [[50364, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50584, 50257]] # fmt: skip # {'text': ' His instant panic was followed by a small, sharp blow high on his chest.','timestamp': (0.0, 9.4)} merge = _find_timestamp_sequence( [[previous_sequence, (480_000, 0, 0)], [next_sequences_3, (480_000, 120_000, 0)]], processor.tokenizer, processor.feature_extractor, max_source_positions, ) self.assertEqual( merge, [51492, 406, 3163, 1953, 466, 13, 51612, 51612, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 51832], ) # fmt: skip self.assertEqual( processor.decode(merge, output_offsets=True), { "text": ( " not worth thinking about. His instant panic was followed by a small, sharp blow high on his" " chest." ), "offsets": [ {"text": " not worth thinking about.", "timestamp": (22.56, 24.96)}, { "text": " His instant panic was followed by a small, sharp blow high on his chest.", "timestamp": (24.96, 29.36), }, ], }, ) # last case is when the sequence is not in the first next predicted start and end of timestamp next_sequences_3 = [ [50364, 2812, 9836, 14783, 390, 406, 3163, 1953, 466, 13, 50634, 50634, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50934] ] # fmt: skip merge = _find_timestamp_sequence( [[previous_sequence, (480_000, 0, 0)], [next_sequences_3, (480_000, 167_000, 0)]], processor.tokenizer, processor.feature_extractor, max_source_positions, ) self.assertEqual( merge, [51492, 406, 3163, 1953, 466, 13, 51612, 51612, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 51912] ) # fmt: skip self.assertEqual( processor.decode(merge, output_offsets=True), { "text": ( " not worth thinking about. His instant panic was followed by a small, sharp blow high on his" " chest." ), "offsets": [ {"text": " not worth thinking about.", "timestamp": (22.56, 24.96)}, { "text": " His instant panic was followed by a small, sharp blow high on his chest.", "timestamp": (24.96, 30.96), }, ], }, ) @slow @require_torch def test_whisper_timestamp_prediction(self): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") array = np.concatenate( [ds[40]["audio"]["array"], ds[41]["audio"]["array"], ds[42]["audio"]["array"], ds[43]["audio"]["array"]] ) pipe = pipeline( model="openai/whisper-small", return_timestamps=True, ) output = pipe(ds[40]["audio"]) self.assertDictEqual( output, { "text": " A man said to the universe, Sir, I exist.", "chunks": [{"text": " A man said to the universe, Sir, I exist.", "timestamp": (0.0, 4.26)}], }, ) output = pipe(array, chunk_length_s=10) self.assertDictEqual( nested_simplify(output), { "chunks": [ {"text": " A man said to the universe, Sir, I exist.", "timestamp": (0.0, 5.5)}, { "text": ( " Sweat covered Brion's body, trickling into the " "tight-loan cloth that was the only garment he wore, the " "cut" ), "timestamp": (5.5, 11.95), }, { "text": ( " on his chest still dripping blood, the ache of his " "overstrained eyes, even the soaring arena around him " "with" ), "timestamp": (11.95, 19.61), }, { "text": " the thousands of spectators, retrievality is not worth thinking about.", "timestamp": (19.61, 25.0), }, { "text": " His instant panic was followed by a small, sharp blow high on his chest.", "timestamp": (25.0, 29.4), }, ], "text": ( " A man said to the universe, Sir, I exist. Sweat covered Brion's " "body, trickling into the tight-loan cloth that was the only garment " "he wore, the cut on his chest still dripping blood, the ache of his " "overstrained eyes, even the soaring arena around him with the " "thousands of spectators, retrievality is not worth thinking about. " "His instant panic was followed by a small, sharp blow high on his " "chest." ), }, ) output = pipe(array) self.assertDictEqual( output, { "chunks": [ {"text": " A man said to the universe, Sir, I exist.", "timestamp": (0.0, 5.5)}, { "text": ( " Sweat covered Brion's body, trickling into the " "tight-loan cloth that was the only garment" ), "timestamp": (5.5, 10.18), }, {"text": " he wore.", "timestamp": (10.18, 11.68)}, {"text": " The cut on his chest still dripping blood.", "timestamp": (11.68, 14.92)}, {"text": " The ache of his overstrained eyes.", "timestamp": (14.92, 17.6)}, { "text": ( " Even the soaring arena around him with the thousands of spectators were trivialities" ), "timestamp": (17.6, 22.56), }, {"text": " not worth thinking about.", "timestamp": (22.56, 24.96)}, ], "text": ( " A man said to the universe, Sir, I exist. Sweat covered Brion's " "body, trickling into the tight-loan cloth that was the only garment " "he wore. The cut on his chest still dripping blood. The ache of his " "overstrained eyes. Even the soaring arena around him with the " "thousands of spectators were trivialities not worth thinking about." ), }, ) @slow @require_torch def test_whisper_word_timestamps_batched(self): pipe = pipeline( task="automatic-speech-recognition", model="openai/whisper-tiny", chunk_length_s=3, return_timestamps="word", ) data = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") sample = data[0]["audio"] # not the same output as test_simple_whisper_asr because of chunking EXPECTED_OUTPUT = { "text": " Mr. Quilder is the apostle of the middle classes and we are glad to welcome his gospel.", "chunks": [ {"text": " Mr.", "timestamp": (0.48, 0.96)}, {"text": " Quilder", "timestamp": (0.96, 1.24)}, {"text": " is", "timestamp": (1.24, 1.5)}, {"text": " the", "timestamp": (1.5, 1.72)}, {"text": " apostle", "timestamp": (1.72, 1.98)}, {"text": " of", "timestamp": (1.98, 2.32)}, {"text": " the", "timestamp": (2.32, 2.5)}, {"text": " middle", "timestamp": (2.5, 2.68)}, {"text": " classes", "timestamp": (2.68, 3.2)}, {"text": " and", "timestamp": (3.2, 3.56)}, {"text": " we", "timestamp": (3.56, 3.68)}, {"text": " are", "timestamp": (3.68, 3.8)}, {"text": " glad", "timestamp": (3.8, 4.1)}, {"text": " to", "timestamp": (4.1, 4.34)}, {"text": " welcome", "timestamp": (4.3, 4.6)}, {"text": " his", "timestamp": (4.6, 4.94)}, {"text": " gospel.", "timestamp": (4.94, 5.82)}, ], } # batch size 1: copy the audio sample since pipeline consumes it output = pipe(sample.copy(), batch_size=1) self.assertDictEqual(output, EXPECTED_OUTPUT) # batch size 2: input audio is chunked into smaller pieces so it's testing batching output = pipe(sample, batch_size=2) self.assertDictEqual(output, EXPECTED_OUTPUT) @require_torch @slow def test_torch_speech_encoder_decoder(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="facebook/s2t-wav2vec2-large-en-de", feature_extractor="facebook/s2t-wav2vec2-large-en-de", framework="pt", ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") filename = ds[40]["file"] output = speech_recognizer(filename) self.assertEqual(output, {"text": 'Ein Mann sagte zum Universum : " Sir, ich existiert! "'}) @slow @require_torch def test_simple_wav2vec2(self): model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") tokenizer = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h") feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h") asr = AutomaticSpeechRecognitionPipeline(model=model, tokenizer=tokenizer, feature_extractor=feature_extractor) waveform = np.tile(np.arange(1000, dtype=np.float32), 34) output = asr(waveform) self.assertEqual(output, {"text": ""}) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") filename = ds[40]["file"] output = asr(filename) self.assertEqual(output, {"text": "A MAN SAID TO THE UNIVERSE SIR I EXIST"}) filename = ds[40]["file"] with open(filename, "rb") as f: data = f.read() output = asr(data) self.assertEqual(output, {"text": "A MAN SAID TO THE UNIVERSE SIR I EXIST"}) @slow @require_torch @require_torchaudio def test_simple_s2t(self): model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-mustc-en-it-st") tokenizer = AutoTokenizer.from_pretrained("facebook/s2t-small-mustc-en-it-st") feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/s2t-small-mustc-en-it-st") asr = AutomaticSpeechRecognitionPipeline(model=model, tokenizer=tokenizer, feature_extractor=feature_extractor) waveform = np.tile(np.arange(1000, dtype=np.float32), 34) output = asr(waveform) self.assertEqual(output, {"text": "(Applausi)"}) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") filename = ds[40]["file"] output = asr(filename) self.assertEqual(output, {"text": "Un uomo disse all'universo: \"Signore, io esisto."}) filename = ds[40]["file"] with open(filename, "rb") as f: data = f.read() output = asr(data) self.assertEqual(output, {"text": "Un uomo disse all'universo: \"Signore, io esisto."}) @slow @require_torch @require_torchaudio def test_simple_whisper_asr(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="openai/whisper-tiny.en", framework="pt", ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") filename = ds[0]["file"] output = speech_recognizer(filename) self.assertEqual( output, {"text": " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel."}, ) output = speech_recognizer(filename, return_timestamps=True) self.assertEqual( output, { "text": " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.", "chunks": [ { "text": ( " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel." ), "timestamp": (0.0, 5.44), } ], }, ) speech_recognizer.model.generation_config.alignment_heads = [[2, 2], [3, 0], [3, 2], [3, 3], [3, 4], [3, 5]] output = speech_recognizer(filename, return_timestamps="word") # fmt: off self.assertEqual( output, { 'text': ' Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.', 'chunks': [ {'text': ' Mr.', 'timestamp': (0.38, 1.04)}, {'text': ' Quilter', 'timestamp': (1.04, 1.18)}, {'text': ' is', 'timestamp': (1.18, 1.44)}, {'text': ' the', 'timestamp': (1.44, 1.58)}, {'text': ' apostle', 'timestamp': (1.58, 1.98)}, {'text': ' of', 'timestamp': (1.98, 2.32)}, {'text': ' the', 'timestamp': (2.32, 2.46)}, {'text': ' middle', 'timestamp': (2.46, 2.56)}, {'text': ' classes,', 'timestamp': (2.56, 3.4)}, {'text': ' and', 'timestamp': (3.4, 3.54)}, {'text': ' we', 'timestamp': (3.54, 3.62)}, {'text': ' are', 'timestamp': (3.62, 3.72)}, {'text': ' glad', 'timestamp': (3.72, 4.0)}, {'text': ' to', 'timestamp': (4.0, 4.26)}, {'text': ' welcome', 'timestamp': (4.26, 4.56)}, {'text': ' his', 'timestamp': (4.56, 4.92)}, {'text': ' gospel.', 'timestamp': (4.92, 5.84)} ] } ) # fmt: on # Whisper can only predict segment level timestamps or word level, not character level with self.assertRaisesRegex( ValueError, "^Whisper cannot return `char` timestamps, only word level or segment level timestamps. " "Use `return_timestamps='word'` or `return_timestamps=True` respectively.$", ): _ = speech_recognizer(filename, return_timestamps="char") @slow @require_torch @require_torchaudio def test_simple_whisper_translation(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="openai/whisper-large", framework="pt", ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") filename = ds[40]["file"] output = speech_recognizer(filename) self.assertEqual(output, {"text": " A man said to the universe, Sir, I exist."}) model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large") tokenizer = AutoTokenizer.from_pretrained("openai/whisper-large") feature_extractor = AutoFeatureExtractor.from_pretrained("openai/whisper-large") speech_recognizer_2 = AutomaticSpeechRecognitionPipeline( model=model, tokenizer=tokenizer, feature_extractor=feature_extractor ) output_2 = speech_recognizer_2(filename) self.assertEqual(output, output_2) # either use generate_kwargs or set the model's generation_config # model.generation_config.task = "transcribe" # model.generation_config.lang = "<|it|>" speech_translator = AutomaticSpeechRecognitionPipeline( model=model, tokenizer=tokenizer, feature_extractor=feature_extractor, generate_kwargs={"task": "transcribe", "language": "<|it|>"}, ) output_3 = speech_translator(filename) self.assertEqual(output_3, {"text": " Un uomo ha detto all'universo, Sir, esiste."}) @slow @require_torch def test_whisper_language(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="openai/whisper-tiny.en", framework="pt", ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") filename = ds[0]["file"] # 1. English-only model compatible with no language argument output = speech_recognizer(filename) self.assertEqual( output, {"text": " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel."}, ) # 2. English-only Whisper does not accept the language argument with self.assertRaisesRegex( ValueError, "Cannot specify `task` or `language` for an English-only model. If the model is intended to be multilingual, " "pass `is_multilingual=True` to generate, or update the generation config.", ): _ = speech_recognizer(filename, generate_kwargs={"language": "en"}) # 3. Multilingual model accepts language argument speech_recognizer = pipeline( task="automatic-speech-recognition", model="openai/whisper-tiny", framework="pt", ) output = speech_recognizer(filename, generate_kwargs={"language": "en"}) self.assertEqual( output, {"text": " Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel."}, ) @slow @require_torch @require_torchaudio def test_xls_r_to_en(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="facebook/wav2vec2-xls-r-1b-21-to-en", feature_extractor="facebook/wav2vec2-xls-r-1b-21-to-en", framework="pt", ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") filename = ds[40]["file"] output = speech_recognizer(filename) self.assertEqual(output, {"text": "A man said to the universe: “Sir, I exist."}) @slow @require_torch @require_torchaudio def test_xls_r_from_en(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="facebook/wav2vec2-xls-r-1b-en-to-15", feature_extractor="facebook/wav2vec2-xls-r-1b-en-to-15", framework="pt", ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") filename = ds[40]["file"] output = speech_recognizer(filename) self.assertEqual(output, {"text": "Ein Mann sagte zu dem Universum, Sir, ich bin da."}) @slow @require_torch @require_torchaudio def test_speech_to_text_leveraged(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="patrickvonplaten/wav2vec2-2-bart-base", feature_extractor="patrickvonplaten/wav2vec2-2-bart-base", tokenizer=AutoTokenizer.from_pretrained("patrickvonplaten/wav2vec2-2-bart-base"), framework="pt", ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") filename = ds[40]["file"] output = speech_recognizer(filename) self.assertEqual(output, {"text": "a man said to the universe sir i exist"}) @slow @require_torch_accelerator def test_wav2vec2_conformer_float16(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="facebook/wav2vec2-conformer-rope-large-960h-ft", device=torch_device, torch_dtype=torch.float16, framework="pt", ) dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") sample = dataset[0]["audio"] output = speech_recognizer(sample) self.assertEqual( output, {"text": "MISTER QUILTER IS THE APOSTLE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPEL"}, ) @require_torch def test_chunking_fast(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="hf-internal-testing/tiny-random-wav2vec2", chunk_length_s=10.0, ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") audio = ds[40]["audio"]["array"] n_repeats = 2 audio_tiled = np.tile(audio, n_repeats) output = speech_recognizer([audio_tiled], batch_size=2) self.assertEqual(output, [{"text": ANY(str)}]) self.assertEqual(output[0]["text"][:6], "ZBT ZC") @require_torch def test_return_timestamps_ctc_fast(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="hf-internal-testing/tiny-random-wav2vec2", ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") # Take short audio to keep the test readable audio = ds[40]["audio"]["array"][:800] output = speech_recognizer(audio, return_timestamps="char") self.assertEqual( output, { "text": "ZBT ZX G", "chunks": [ {"text": " ", "timestamp": (0.0, 0.012)}, {"text": "Z", "timestamp": (0.012, 0.016)}, {"text": "B", "timestamp": (0.016, 0.02)}, {"text": "T", "timestamp": (0.02, 0.024)}, {"text": " ", "timestamp": (0.024, 0.028)}, {"text": "Z", "timestamp": (0.028, 0.032)}, {"text": "X", "timestamp": (0.032, 0.036)}, {"text": " ", "timestamp": (0.036, 0.04)}, {"text": "G", "timestamp": (0.04, 0.044)}, ], }, ) output = speech_recognizer(audio, return_timestamps="word") self.assertEqual( output, { "text": "ZBT ZX G", "chunks": [ {"text": "ZBT", "timestamp": (0.012, 0.024)}, {"text": "ZX", "timestamp": (0.028, 0.036)}, {"text": "G", "timestamp": (0.04, 0.044)}, ], }, ) @require_torch @require_pyctcdecode def test_chunking_fast_with_lm(self): speech_recognizer = pipeline( model="hf-internal-testing/processor_with_lm", chunk_length_s=10.0, ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") audio = ds[40]["audio"]["array"] n_repeats = 2 audio_tiled = np.tile(audio, n_repeats) # Batch_size = 1 output1 = speech_recognizer([audio_tiled], batch_size=1) self.assertEqual(output1, [{"text": ANY(str)}]) self.assertEqual(output1[0]["text"][:6], "<s> <s") # batch_size = 2 output2 = speech_recognizer([audio_tiled], batch_size=2) self.assertEqual(output2, [{"text": ANY(str)}]) self.assertEqual(output2[0]["text"][:6], "<s> <s") # TODO There is an offby one error because of the ratio. # Maybe logits get affected by the padding on this random # model is more likely. Add some masking ? # self.assertEqual(output1, output2) @require_torch @require_pyctcdecode def test_with_lm_fast(self): speech_recognizer = pipeline( model="hf-internal-testing/processor_with_lm", ) self.assertEqual(speech_recognizer.type, "ctc_with_lm") ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") audio = ds[40]["audio"]["array"] n_repeats = 2 audio_tiled = np.tile(audio, n_repeats) output = speech_recognizer([audio_tiled], batch_size=2) self.assertEqual(output, [{"text": ANY(str)}]) self.assertEqual(output[0]["text"][:6], "<s> <s") # Making sure the argument are passed to the decoder # Since no change happens in the result, check the error comes from # the `decode_beams` function. with self.assertRaises(TypeError) as e: output = speech_recognizer([audio_tiled], decoder_kwargs={"num_beams": 2}) self.assertContains(e.msg, "TypeError: decode_beams() got an unexpected keyword argument 'num_beams'") output = speech_recognizer([audio_tiled], decoder_kwargs={"beam_width": 2}) @require_torch @require_pyctcdecode def test_with_local_lm_fast(self): local_dir = snapshot_download("hf-internal-testing/processor_with_lm") speech_recognizer = pipeline( task="automatic-speech-recognition", model=local_dir, ) self.assertEqual(speech_recognizer.type, "ctc_with_lm") ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") audio = ds[40]["audio"]["array"] n_repeats = 2 audio_tiled = np.tile(audio, n_repeats) output = speech_recognizer([audio_tiled], batch_size=2) self.assertEqual(output, [{"text": ANY(str)}]) self.assertEqual(output[0]["text"][:6], "<s> <s") @require_torch @slow def test_whisper_longform(self): # fmt: off EXPECTED_RESULT = """ Folks, if you watch the show, you know, I spent a lot of time right over there. Patiently and astutely scrutinizing the boxwood and mahogany chest set of the day's biggest stories developing the central headline pawns, definitely maneuvering an oso topical night to F6, fainting a classic Sicilian, nade door variation on the news, all the while seeing eight moves deep and patiently marshalling the latest press releases into a fisher's shows in Lip Nitsky attack that culminates in the elegant lethal slow-played, all-passant checkmate that is my nightly monologue. But sometimes, sometimes, folks, I. CHEERING AND APPLAUSE Sometimes I startle away, cubside down in the monkey bars of a condemned playground on a super fun site. Get all hept up on goofballs. Rummage that were discarded tag bag of defective toys. Yank out of fist bowl of disembodied doll limbs, toss them on a stained kid's place mat from a defunct denny's, set up a table inside a rusty cargo container down by the Wharf and challenged toothless drifters to the godless bughouse blitz of tournament that is my segment. Meanwhile!""" # fmt: on processor = AutoProcessor.from_pretrained("openai/whisper-tiny.en") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") model = model.to("cuda") pipe = pipeline( "automatic-speech-recognition", model=model, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor, max_new_tokens=128, device="cuda:0", ) ds = load_dataset("distil-whisper/meanwhile", "default")["test"] ds = ds.cast_column("audio", Audio(sampling_rate=16000)) audio = ds[:1]["audio"] result = pipe(audio)[0]["text"] assert result == EXPECTED_RESULT @require_torch @slow def test_seamless_v2(self): pipe = pipeline( "automatic-speech-recognition", model="facebook/seamless-m4t-v2-large", device="cuda:0", ) dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") sample = dataset[0]["audio"] result = pipe(sample, generate_kwargs={"tgt_lang": "eng"}) EXPECTED_RESULT = "mister quilter is the apostle of the middle classes and we are glad to welcome his gospel" assert result["text"] == EXPECTED_RESULT @require_torch @slow def test_chunking_and_timestamps(self): model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") tokenizer = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h") feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h") speech_recognizer = pipeline( task="automatic-speech-recognition", model=model, tokenizer=tokenizer, feature_extractor=feature_extractor, framework="pt", chunk_length_s=10.0, ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") audio = ds[40]["audio"]["array"] n_repeats = 10 audio_tiled = np.tile(audio, n_repeats) output = speech_recognizer([audio_tiled], batch_size=2) self.assertEqual(output, [{"text": ("A MAN SAID TO THE UNIVERSE SIR I EXIST " * n_repeats).strip()}]) output = speech_recognizer(audio, return_timestamps="char") self.assertEqual(audio.shape, (74_400,)) self.assertEqual(speech_recognizer.feature_extractor.sampling_rate, 16_000) # The audio is 74_400 / 16_000 = 4.65s long. self.assertEqual( output, { "text": "A MAN SAID TO THE UNIVERSE SIR I EXIST", "chunks": [ {"text": "A", "timestamp": (0.6, 0.62)}, {"text": " ", "timestamp": (0.62, 0.66)}, {"text": "M", "timestamp": (0.68, 0.7)}, {"text": "A", "timestamp": (0.78, 0.8)}, {"text": "N", "timestamp": (0.84, 0.86)}, {"text": " ", "timestamp": (0.92, 0.98)}, {"text": "S", "timestamp": (1.06, 1.08)}, {"text": "A", "timestamp": (1.14, 1.16)}, {"text": "I", "timestamp": (1.16, 1.18)}, {"text": "D", "timestamp": (1.2, 1.24)}, {"text": " ", "timestamp": (1.24, 1.28)}, {"text": "T", "timestamp": (1.28, 1.32)}, {"text": "O", "timestamp": (1.34, 1.36)}, {"text": " ", "timestamp": (1.38, 1.42)}, {"text": "T", "timestamp": (1.42, 1.44)}, {"text": "H", "timestamp": (1.44, 1.46)}, {"text": "E", "timestamp": (1.46, 1.5)}, {"text": " ", "timestamp": (1.5, 1.56)}, {"text": "U", "timestamp": (1.58, 1.62)}, {"text": "N", "timestamp": (1.64, 1.68)}, {"text": "I", "timestamp": (1.7, 1.72)}, {"text": "V", "timestamp": (1.76, 1.78)}, {"text": "E", "timestamp": (1.84, 1.86)}, {"text": "R", "timestamp": (1.86, 1.9)}, {"text": "S", "timestamp": (1.96, 1.98)}, {"text": "E", "timestamp": (1.98, 2.02)}, {"text": " ", "timestamp": (2.02, 2.06)}, {"text": "S", "timestamp": (2.82, 2.86)}, {"text": "I", "timestamp": (2.94, 2.96)}, {"text": "R", "timestamp": (2.98, 3.02)}, {"text": " ", "timestamp": (3.06, 3.12)}, {"text": "I", "timestamp": (3.5, 3.52)}, {"text": " ", "timestamp": (3.58, 3.6)}, {"text": "E", "timestamp": (3.66, 3.68)}, {"text": "X", "timestamp": (3.68, 3.7)}, {"text": "I", "timestamp": (3.9, 3.92)}, {"text": "S", "timestamp": (3.94, 3.96)}, {"text": "T", "timestamp": (4.0, 4.02)}, {"text": " ", "timestamp": (4.06, 4.1)}, ], }, ) output = speech_recognizer(audio, return_timestamps="word") self.assertEqual( output, { "text": "A MAN SAID TO THE UNIVERSE SIR I EXIST", "chunks": [ {"text": "A", "timestamp": (0.6, 0.62)}, {"text": "MAN", "timestamp": (0.68, 0.86)}, {"text": "SAID", "timestamp": (1.06, 1.24)}, {"text": "TO", "timestamp": (1.28, 1.36)}, {"text": "THE", "timestamp": (1.42, 1.5)}, {"text": "UNIVERSE", "timestamp": (1.58, 2.02)}, {"text": "SIR", "timestamp": (2.82, 3.02)}, {"text": "I", "timestamp": (3.5, 3.52)}, {"text": "EXIST", "timestamp": (3.66, 4.02)}, ], }, ) output = speech_recognizer(audio, return_timestamps="word", chunk_length_s=2.0) self.assertEqual( output, { "text": "A MAN SAID TO THE UNIVERSE SIR I EXIST", "chunks": [ {"text": "A", "timestamp": (0.6, 0.62)}, {"text": "MAN", "timestamp": (0.68, 0.86)}, {"text": "SAID", "timestamp": (1.06, 1.24)}, {"text": "TO", "timestamp": (1.3, 1.36)}, {"text": "THE", "timestamp": (1.42, 1.48)}, {"text": "UNIVERSE", "timestamp": (1.58, 2.02)}, # Tiny change linked to chunking. {"text": "SIR", "timestamp": (2.84, 3.02)}, {"text": "I", "timestamp": (3.5, 3.52)}, {"text": "EXIST", "timestamp": (3.66, 4.02)}, ], }, ) # CTC models must specify return_timestamps type - cannot set `return_timestamps=True` blindly with self.assertRaisesRegex( ValueError, "^CTC can either predict character level timestamps, or word level timestamps. " "Set `return_timestamps='char'` or `return_timestamps='word'` as required.$", ): _ = speech_recognizer(audio, return_timestamps=True) @require_torch @slow def test_chunking_with_lm(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="patrickvonplaten/wav2vec2-base-100h-with-lm", chunk_length_s=10.0, ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") audio = ds[40]["audio"]["array"] n_repeats = 10 audio = np.tile(audio, n_repeats) output = speech_recognizer([audio], batch_size=2) expected_text = "A MAN SAID TO THE UNIVERSE SIR I EXIST " * n_repeats expected = [{"text": expected_text.strip()}] self.assertEqual(output, expected) @require_torch def test_chunk_iterator(self): feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h") inputs = torch.arange(100).long() ratio = 1 outs = list(chunk_iter(inputs, feature_extractor, 100, 0, 0, ratio)) self.assertEqual(len(outs), 1) self.assertEqual([o["stride"] for o in outs], [(100, 0, 0)]) self.assertEqual([o["input_values"].shape for o in outs], [(1, 100)]) self.assertEqual([o["is_last"] for o in outs], [True]) # two chunks no stride outs = list(chunk_iter(inputs, feature_extractor, 50, 0, 0, ratio)) self.assertEqual(len(outs), 2) self.assertEqual([o["stride"] for o in outs], [(50, 0, 0), (50, 0, 0)]) self.assertEqual([o["input_values"].shape for o in outs], [(1, 50), (1, 50)]) self.assertEqual([o["is_last"] for o in outs], [False, True]) # two chunks incomplete last outs = list(chunk_iter(inputs, feature_extractor, 80, 0, 0, ratio)) self.assertEqual(len(outs), 2) self.assertEqual([o["stride"] for o in outs], [(80, 0, 0), (20, 0, 0)]) self.assertEqual([o["input_values"].shape for o in outs], [(1, 80), (1, 20)]) self.assertEqual([o["is_last"] for o in outs], [False, True]) # one chunk since first is also last, because it contains only data # in the right strided part we just mark that part as non stride # This test is specifically crafted to trigger a bug if next chunk # would be ignored by the fact that all the data would be # contained in the strided left data. outs = list(chunk_iter(inputs, feature_extractor, 105, 5, 5, ratio)) self.assertEqual(len(outs), 1) self.assertEqual([o["stride"] for o in outs], [(100, 0, 0)]) self.assertEqual([o["input_values"].shape for o in outs], [(1, 100)]) self.assertEqual([o["is_last"] for o in outs], [True]) @require_torch def test_chunk_iterator_stride(self): feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h") inputs = torch.arange(100).long() input_values = feature_extractor(inputs, sampling_rate=feature_extractor.sampling_rate, return_tensors="pt")[ "input_values" ] ratio = 1 outs = list(chunk_iter(inputs, feature_extractor, 100, 20, 10, ratio)) self.assertEqual(len(outs), 2) self.assertEqual([o["stride"] for o in outs], [(100, 0, 10), (30, 20, 0)]) self.assertEqual([o["input_values"].shape for o in outs], [(1, 100), (1, 30)]) self.assertEqual([o["is_last"] for o in outs], [False, True]) outs = list(chunk_iter(inputs, feature_extractor, 80, 20, 10, ratio)) self.assertEqual(len(outs), 2) self.assertEqual([o["stride"] for o in outs], [(80, 0, 10), (50, 20, 0)]) self.assertEqual([o["input_values"].shape for o in outs], [(1, 80), (1, 50)]) self.assertEqual([o["is_last"] for o in outs], [False, True]) outs = list(chunk_iter(inputs, feature_extractor, 90, 20, 0, ratio)) self.assertEqual(len(outs), 2) self.assertEqual([o["stride"] for o in outs], [(90, 0, 0), (30, 20, 0)]) self.assertEqual([o["input_values"].shape for o in outs], [(1, 90), (1, 30)]) outs = list(chunk_iter(inputs, feature_extractor, 36, 6, 6, ratio)) self.assertEqual(len(outs), 4) self.assertEqual([o["stride"] for o in outs], [(36, 0, 6), (36, 6, 6), (36, 6, 6), (28, 6, 0)]) self.assertEqual([o["input_values"].shape for o in outs], [(1, 36), (1, 36), (1, 36), (1, 28)]) inputs = torch.LongTensor([i % 2 for i in range(100)]) input_values = feature_extractor(inputs, sampling_rate=feature_extractor.sampling_rate, return_tensors="pt")[ "input_values" ] outs = list(chunk_iter(inputs, feature_extractor, 30, 5, 5, ratio)) self.assertEqual(len(outs), 5) self.assertEqual([o["stride"] for o in outs], [(30, 0, 5), (30, 5, 5), (30, 5, 5), (30, 5, 5), (20, 5, 0)]) self.assertEqual([o["input_values"].shape for o in outs], [(1, 30), (1, 30), (1, 30), (1, 30), (1, 20)]) self.assertEqual([o["is_last"] for o in outs], [False, False, False, False, True]) # (0, 25) self.assertEqual(nested_simplify(input_values[:, :30]), nested_simplify(outs[0]["input_values"])) # (25, 45) self.assertEqual(nested_simplify(input_values[:, 20:50]), nested_simplify(outs[1]["input_values"])) # (45, 65) self.assertEqual(nested_simplify(input_values[:, 40:70]), nested_simplify(outs[2]["input_values"])) # (65, 85) self.assertEqual(nested_simplify(input_values[:, 60:90]), nested_simplify(outs[3]["input_values"])) # (85, 100) self.assertEqual(nested_simplify(input_values[:, 80:100]), nested_simplify(outs[4]["input_values"])) @require_torch def test_stride(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="hf-internal-testing/tiny-random-wav2vec2", ) waveform = np.tile(np.arange(1000, dtype=np.float32), 10) output = speech_recognizer({"raw": waveform, "stride": (0, 0), "sampling_rate": 16_000}) self.assertEqual(output, {"text": "OB XB B EB BB B EB B OB X"}) # 0 effective ids Just take the middle one output = speech_recognizer({"raw": waveform, "stride": (5000, 5000), "sampling_rate": 16_000}) self.assertEqual(output, {"text": ""}) # Only 1 arange. output = speech_recognizer({"raw": waveform, "stride": (0, 9000), "sampling_rate": 16_000}) self.assertEqual(output, {"text": "OB"}) # 2nd arange output = speech_recognizer({"raw": waveform, "stride": (1000, 8000), "sampling_rate": 16_000}) self.assertEqual(output, {"text": "XB"}) @slow @require_torch_accelerator def test_slow_unfinished_sequence(self): from transformers import GenerationConfig pipe = pipeline( "automatic-speech-recognition", model="vasista22/whisper-hindi-large-v2", device=torch_device, ) # Original model wasn't trained with timestamps and has incorrect generation config pipe.model.generation_config = GenerationConfig.from_pretrained("openai/whisper-large-v2") audio = hf_hub_download("Narsil/asr_dummy", filename="hindi.ogg", repo_type="dataset") out = pipe( audio, return_timestamps=True, ) self.assertEqual( out, { "chunks": [ {"text": "", "timestamp": (18.94, 0.02)}, {"text": "मिर्ची में कितने विभिन्न प्रजातियां हैं", "timestamp": (None, None)}, ], "text": "मिर्ची में कितने विभिन्न प्रजातियां हैं", }, ) def require_ffmpeg(test_case): """ Decorator marking a test that requires FFmpeg. These tests are skipped when FFmpeg isn't installed. """ import subprocess try: subprocess.check_output(["ffmpeg", "-h"], stderr=subprocess.DEVNULL) return test_case except Exception: return unittest.skip("test requires ffmpeg")(test_case) def bytes_iter(chunk_size, chunks): for i in range(chunks): yield bytes(range(i * chunk_size, (i + 1) * chunk_size)) @require_ffmpeg class AudioUtilsTest(unittest.TestCase): def test_chunk_bytes_iter_too_big(self): iter_ = iter(chunk_bytes_iter(bytes_iter(chunk_size=3, chunks=2), 10, stride=(0, 0))) self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02\x03\x04\x05", "stride": (0, 0)}) with self.assertRaises(StopIteration): next(iter_) def test_chunk_bytes_iter(self): iter_ = iter(chunk_bytes_iter(bytes_iter(chunk_size=3, chunks=2), 3, stride=(0, 0))) self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02", "stride": (0, 0)}) self.assertEqual(next(iter_), {"raw": b"\x03\x04\x05", "stride": (0, 0)}) with self.assertRaises(StopIteration): next(iter_) def test_chunk_bytes_iter_stride(self): iter_ = iter(chunk_bytes_iter(bytes_iter(chunk_size=3, chunks=2), 3, stride=(1, 1))) self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02", "stride": (0, 1)}) self.assertEqual(next(iter_), {"raw": b"\x01\x02\x03", "stride": (1, 1)}) self.assertEqual(next(iter_), {"raw": b"\x02\x03\x04", "stride": (1, 1)}) # This is finished, but the chunk_bytes doesn't know it yet. self.assertEqual(next(iter_), {"raw": b"\x03\x04\x05", "stride": (1, 1)}) self.assertEqual(next(iter_), {"raw": b"\x04\x05", "stride": (1, 0)}) with self.assertRaises(StopIteration): next(iter_) def test_chunk_bytes_iter_stride_stream(self): iter_ = iter(chunk_bytes_iter(bytes_iter(chunk_size=3, chunks=2), 5, stride=(1, 1), stream=True)) self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02", "stride": (0, 0), "partial": True}) self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02\x03\x04", "stride": (0, 1), "partial": False}) self.assertEqual(next(iter_), {"raw": b"\x03\x04\x05", "stride": (1, 0), "partial": False}) with self.assertRaises(StopIteration): next(iter_) iter_ = iter(chunk_bytes_iter(bytes_iter(chunk_size=3, chunks=3), 5, stride=(1, 1), stream=True)) self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02", "stride": (0, 0), "partial": True}) self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02\x03\x04", "stride": (0, 1), "partial": False}) self.assertEqual(next(iter_), {"raw": b"\x03\x04\x05\x06\x07", "stride": (1, 1), "partial": False}) self.assertEqual(next(iter_), {"raw": b"\x06\x07\x08", "stride": (1, 0), "partial": False}) with self.assertRaises(StopIteration): next(iter_) iter_ = iter(chunk_bytes_iter(bytes_iter(chunk_size=3, chunks=3), 10, stride=(1, 1), stream=True)) self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02", "stride": (0, 0), "partial": True}) self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02\x03\x04\x05", "stride": (0, 0), "partial": True}) self.assertEqual( next(iter_), {"raw": b"\x00\x01\x02\x03\x04\x05\x06\x07\x08", "stride": (0, 0), "partial": True} ) self.assertEqual( next(iter_), {"raw": b"\x00\x01\x02\x03\x04\x05\x06\x07\x08", "stride": (0, 0), "partial": False} ) with self.assertRaises(StopIteration): next(iter_)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_text_classification.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow, torch_device from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. _TO_SKIP = {"LayoutLMv2Config", "LayoutLMv3Config"} @is_pipeline_test class TextClassificationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: model_mapping = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: tf_model_mapping = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def test_small_model_pt(self): text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) outputs = text_classifier("This is great !") self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}]) outputs = text_classifier("This is great !", top_k=2) self.assertEqual( nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}] ) outputs = text_classifier(["This is great !", "This is bad"], top_k=2) self.assertEqual( nested_simplify(outputs), [ [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], ], ) outputs = text_classifier("This is great !", top_k=1) self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}]) # Legacy behavior outputs = text_classifier("This is great !", return_all_scores=False) self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}]) outputs = text_classifier("This is great !", return_all_scores=True) self.assertEqual( nested_simplify(outputs), [[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]] ) outputs = text_classifier(["This is great !", "Something else"], return_all_scores=True) self.assertEqual( nested_simplify(outputs), [ [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], ], ) outputs = text_classifier(["This is great !", "Something else"], return_all_scores=False) self.assertEqual( nested_simplify(outputs), [ {"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_0", "score": 0.504}, ], ) @require_torch def test_accepts_torch_device(self): text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt", device=torch_device, ) outputs = text_classifier("This is great !") self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}]) @require_tf def test_small_model_tf(self): text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="tf" ) outputs = text_classifier("This is great !") self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}]) @slow @require_torch def test_pt_bert(self): text_classifier = pipeline("text-classification") outputs = text_classifier("This is great !") self.assertEqual(nested_simplify(outputs), [{"label": "POSITIVE", "score": 1.0}]) outputs = text_classifier("This is bad !") self.assertEqual(nested_simplify(outputs), [{"label": "NEGATIVE", "score": 1.0}]) outputs = text_classifier("Birds are a type of animal") self.assertEqual(nested_simplify(outputs), [{"label": "POSITIVE", "score": 0.988}]) @slow @require_tf def test_tf_bert(self): text_classifier = pipeline("text-classification", framework="tf") outputs = text_classifier("This is great !") self.assertEqual(nested_simplify(outputs), [{"label": "POSITIVE", "score": 1.0}]) outputs = text_classifier("This is bad !") self.assertEqual(nested_simplify(outputs), [{"label": "NEGATIVE", "score": 1.0}]) outputs = text_classifier("Birds are a type of animal") self.assertEqual(nested_simplify(outputs), [{"label": "POSITIVE", "score": 0.988}]) def get_test_pipeline(self, model, tokenizer, processor): text_classifier = TextClassificationPipeline(model=model, tokenizer=tokenizer) return text_classifier, ["HuggingFace is in", "This is another test"] def run_pipeline_test(self, text_classifier, _): model = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 valid_inputs = "HuggingFace is in" outputs = text_classifier(valid_inputs) self.assertEqual(nested_simplify(outputs), [{"label": ANY(str), "score": ANY(float)}]) self.assertTrue(outputs[0]["label"] in model.config.id2label.values()) valid_inputs = ["HuggingFace is in ", "Paris is in France"] outputs = text_classifier(valid_inputs) self.assertEqual( nested_simplify(outputs), [{"label": ANY(str), "score": ANY(float)}, {"label": ANY(str), "score": ANY(float)}], ) self.assertTrue(outputs[0]["label"] in model.config.id2label.values()) self.assertTrue(outputs[1]["label"] in model.config.id2label.values()) # Forcing to get all results with `top_k=None` # This is NOT the legacy format outputs = text_classifier(valid_inputs, top_k=None) N = len(model.config.id2label.values()) self.assertEqual( nested_simplify(outputs), [[{"label": ANY(str), "score": ANY(float)}] * N, [{"label": ANY(str), "score": ANY(float)}] * N], ) valid_inputs = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"} outputs = text_classifier(valid_inputs) self.assertEqual( nested_simplify(outputs), {"label": ANY(str), "score": ANY(float)}, ) self.assertTrue(outputs["label"] in model.config.id2label.values()) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. invalid_input = [["HuggingFace is in ", "Paris is in France"]] with self.assertRaises(ValueError): text_classifier(invalid_input) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility outputs = text_classifier([[["HuggingFace is in ", "Paris is in France"]]]) self.assertEqual( nested_simplify(outputs), [{"label": ANY(str), "score": ANY(float)}], ) self.assertTrue(outputs[0]["label"] in model.config.id2label.values())
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_visual_question_answering.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_accelerator, require_vision, slow, torch_device, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_torch @require_vision class VisualQuestionAnsweringPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def get_test_pipeline(self, model, tokenizer, processor): vqa_pipeline = pipeline("visual-question-answering", model="hf-internal-testing/tiny-vilt-random-vqa") examples = [ { "image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "question": "How many cats are there?", }, { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "question": "How many cats are there?", }, ] return vqa_pipeline, examples def run_pipeline_test(self, vqa_pipeline, examples): outputs = vqa_pipeline(examples, top_k=1) self.assertEqual( outputs, [ [{"score": ANY(float), "answer": ANY(str)}], [{"score": ANY(float), "answer": ANY(str)}], ], ) @require_torch def test_small_model_pt(self): vqa_pipeline = pipeline("visual-question-answering", model="hf-internal-testing/tiny-vilt-random-vqa") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" question = "How many cats are there?" outputs = vqa_pipeline(image=image, question="How many cats are there?", top_k=2) self.assertEqual( outputs, [{"score": ANY(float), "answer": ANY(str)}, {"score": ANY(float), "answer": ANY(str)}] ) outputs = vqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual( outputs, [{"score": ANY(float), "answer": ANY(str)}, {"score": ANY(float), "answer": ANY(str)}] ) @require_torch @require_torch_accelerator def test_small_model_pt_blip2(self): vqa_pipeline = pipeline( "visual-question-answering", model="hf-internal-testing/tiny-random-Blip2ForConditionalGeneration" ) image = "./tests/fixtures/tests_samples/COCO/000000039769.png" question = "How many cats are there?" outputs = vqa_pipeline(image=image, question=question) self.assertEqual(outputs, [{"answer": ANY(str)}]) outputs = vqa_pipeline({"image": image, "question": question}) self.assertEqual(outputs, [{"answer": ANY(str)}]) outputs = vqa_pipeline([{"image": image, "question": question}, {"image": image, "question": question}]) self.assertEqual(outputs, [[{"answer": ANY(str)}]] * 2) vqa_pipeline = pipeline( "visual-question-answering", model="hf-internal-testing/tiny-random-Blip2ForConditionalGeneration", model_kwargs={"torch_dtype": torch.float16}, device=torch_device, ) self.assertEqual(vqa_pipeline.model.device, torch.device("{}:0".format(torch_device))) self.assertEqual(vqa_pipeline.model.language_model.dtype, torch.float16) self.assertEqual(vqa_pipeline.model.vision_model.dtype, torch.float16) outputs = vqa_pipeline(image=image, question=question) self.assertEqual(outputs, [{"answer": ANY(str)}]) @slow @require_torch def test_large_model_pt(self): vqa_pipeline = pipeline("visual-question-answering", model="dandelin/vilt-b32-finetuned-vqa") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" question = "How many cats are there?" outputs = vqa_pipeline(image=image, question=question, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] ) outputs = vqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] ) outputs = vqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(outputs, decimals=4), [[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2, ) @slow @require_torch @require_torch_accelerator def test_large_model_pt_blip2(self): vqa_pipeline = pipeline( "visual-question-answering", model="Salesforce/blip2-opt-2.7b", model_kwargs={"torch_dtype": torch.float16}, device=torch_device, ) self.assertEqual(vqa_pipeline.model.device, torch.device("{}:0".format(torch_device))) self.assertEqual(vqa_pipeline.model.language_model.dtype, torch.float16) image = "./tests/fixtures/tests_samples/COCO/000000039769.png" question = "Question: how many cats are there? Answer:" outputs = vqa_pipeline(image=image, question=question) self.assertEqual(outputs, [{"answer": "two"}]) outputs = vqa_pipeline({"image": image, "question": question}) self.assertEqual(outputs, [{"answer": "two"}]) outputs = vqa_pipeline([{"image": image, "question": question}, {"image": image, "question": question}]) self.assertEqual(outputs, [[{"answer": "two"}]] * 2) @require_tf @unittest.skip("Visual question answering not implemented in TF") def test_small_model_tf(self): pass
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_translation.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import pytest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, MBart50TokenizerFast, MBartConfig, MBartForConditionalGeneration, TranslationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch, slow from .test_pipelines_common import ANY @is_pipeline_test class TranslationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def get_test_pipeline(self, model, tokenizer, processor): if isinstance(model.config, MBartConfig): src_lang, tgt_lang = list(tokenizer.lang_code_to_id.keys())[:2] translator = TranslationPipeline(model=model, tokenizer=tokenizer, src_lang=src_lang, tgt_lang=tgt_lang) else: translator = TranslationPipeline(model=model, tokenizer=tokenizer) return translator, ["Some string", "Some other text"] def run_pipeline_test(self, translator, _): outputs = translator("Some string") self.assertEqual(outputs, [{"translation_text": ANY(str)}]) outputs = translator(["Some string"]) self.assertEqual(outputs, [{"translation_text": ANY(str)}]) outputs = translator(["Some string", "other string"]) self.assertEqual(outputs, [{"translation_text": ANY(str)}, {"translation_text": ANY(str)}]) @require_torch def test_small_model_pt(self): translator = pipeline("translation_en_to_ro", model="patrickvonplaten/t5-tiny-random", framework="pt") outputs = translator("This is a test string", max_length=20) self.assertEqual( outputs, [ { "translation_text": ( "Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide" " Beide Beide" ) } ], ) @require_tf def test_small_model_tf(self): translator = pipeline("translation_en_to_ro", model="patrickvonplaten/t5-tiny-random", framework="tf") outputs = translator("This is a test string", max_length=20) self.assertEqual( outputs, [ { "translation_text": ( "Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide" " Beide Beide" ) } ], ) @require_torch def test_en_to_de_pt(self): translator = pipeline("translation_en_to_de", model="patrickvonplaten/t5-tiny-random", framework="pt") outputs = translator("This is a test string", max_length=20) self.assertEqual( outputs, [ { "translation_text": ( "monoton monoton monoton monoton monoton monoton monoton monoton monoton monoton urine urine" " urine urine urine urine urine urine urine" ) } ], ) @require_tf def test_en_to_de_tf(self): translator = pipeline("translation_en_to_de", model="patrickvonplaten/t5-tiny-random", framework="tf") outputs = translator("This is a test string", max_length=20) self.assertEqual( outputs, [ { "translation_text": ( "monoton monoton monoton monoton monoton monoton monoton monoton monoton monoton urine urine" " urine urine urine urine urine urine urine" ) } ], ) class TranslationNewFormatPipelineTests(unittest.TestCase): @require_torch @slow def test_default_translations(self): # We don't provide a default for this pair with self.assertRaises(ValueError): pipeline(task="translation_cn_to_ar") # but we do for this one translator = pipeline(task="translation_en_to_de") self.assertEqual(translator._preprocess_params["src_lang"], "en") self.assertEqual(translator._preprocess_params["tgt_lang"], "de") @require_torch @slow def test_multilingual_translation(self): model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-many-mmt") tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-many-to-many-mmt") translator = pipeline(task="translation", model=model, tokenizer=tokenizer) # Missing src_lang, tgt_lang with self.assertRaises(ValueError): translator("This is a test") outputs = translator("This is a test", src_lang="en_XX", tgt_lang="ar_AR") self.assertEqual(outputs, [{"translation_text": "هذا إختبار"}]) outputs = translator("This is a test", src_lang="en_XX", tgt_lang="hi_IN") self.assertEqual(outputs, [{"translation_text": "यह एक परीक्षण है"}]) # src_lang, tgt_lang can be defined at pipeline call time translator = pipeline(task="translation", model=model, tokenizer=tokenizer, src_lang="en_XX", tgt_lang="ar_AR") outputs = translator("This is a test") self.assertEqual(outputs, [{"translation_text": "هذا إختبار"}]) @require_torch def test_translation_on_odd_language(self): model = "patrickvonplaten/t5-tiny-random" translator = pipeline(task="translation_cn_to_ar", model=model) self.assertEqual(translator._preprocess_params["src_lang"], "cn") self.assertEqual(translator._preprocess_params["tgt_lang"], "ar") @require_torch def test_translation_default_language_selection(self): model = "patrickvonplaten/t5-tiny-random" with pytest.warns(UserWarning, match=r".*translation_en_to_de.*"): translator = pipeline(task="translation", model=model) self.assertEqual(translator.task, "translation_en_to_de") self.assertEqual(translator._preprocess_params["src_lang"], "en") self.assertEqual(translator._preprocess_params["tgt_lang"], "de") @require_torch def test_translation_with_no_language_no_model_fails(self): with self.assertRaises(ValueError): pipeline(task="translation")
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_conversational.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer, BlenderbotSmallForConditionalGeneration, BlenderbotSmallTokenizer, Conversation, ConversationalPipeline, TFAutoModelForCausalLM, pipeline, ) from transformers.testing_utils import ( backend_empty_cache, is_pipeline_test, is_torch_available, require_tf, require_torch, slow, torch_device, ) from .test_pipelines_common import ANY @is_pipeline_test class ConversationalPipelineTests(unittest.TestCase): def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): backend_empty_cache(torch_device) model_mapping = dict( list(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.items()) if MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING else [] + list(MODEL_FOR_CAUSAL_LM_MAPPING.items()) if MODEL_FOR_CAUSAL_LM_MAPPING else [] ) tf_model_mapping = dict( list(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.items()) if TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING else [] + list(TF_MODEL_FOR_CAUSAL_LM_MAPPING.items()) if TF_MODEL_FOR_CAUSAL_LM_MAPPING else [] ) def get_test_pipeline(self, model, tokenizer, processor): conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer) return conversation_agent, [Conversation("Hi there!")] def run_pipeline_test(self, conversation_agent, _): # Simple outputs = conversation_agent(Conversation("Hi there!"), max_new_tokens=5) self.assertEqual( outputs, Conversation([{"role": "user", "content": "Hi there!"}, {"role": "assistant", "content": ANY(str)}]), ) # Single list outputs = conversation_agent([Conversation("Hi there!")], max_new_tokens=5) self.assertEqual( outputs, Conversation([{"role": "user", "content": "Hi there!"}, {"role": "assistant", "content": ANY(str)}]), ) # Batch conversation_1 = Conversation("Going to the movies tonight - any suggestions?") conversation_2 = Conversation("What's the last book you have read?") self.assertEqual(len(conversation_1), 1) self.assertEqual(len(conversation_2), 1) outputs = conversation_agent([conversation_1, conversation_2], max_new_tokens=5) self.assertEqual(outputs, [conversation_1, conversation_2]) self.assertEqual( outputs, [ Conversation( [ {"role": "user", "content": "Going to the movies tonight - any suggestions?"}, {"role": "assistant", "content": ANY(str)}, ], ), Conversation( [ {"role": "user", "content": "What's the last book you have read?"}, {"role": "assistant", "content": ANY(str)}, ] ), ], ) # One conversation with history conversation_2.add_message({"role": "user", "content": "Why do you recommend it?"}) outputs = conversation_agent(conversation_2, max_new_tokens=5) self.assertEqual(outputs, conversation_2) self.assertEqual( outputs, Conversation( [ {"role": "user", "content": "What's the last book you have read?"}, {"role": "assistant", "content": ANY(str)}, {"role": "user", "content": "Why do you recommend it?"}, {"role": "assistant", "content": ANY(str)}, ] ), ) @require_torch @slow def test_integration_torch_conversation(self): # When conversation_agent = pipeline(task="conversational", device=torch_device) conversation_1 = Conversation("Going to the movies tonight - any suggestions?") conversation_2 = Conversation("What's the last book you have read?") # Then self.assertEqual(len(conversation_1.past_user_inputs), 0) self.assertEqual(len(conversation_2.past_user_inputs), 0) # When result = conversation_agent([conversation_1, conversation_2], do_sample=False, max_length=1000) # Then self.assertEqual(result, [conversation_1, conversation_2]) self.assertEqual(len(result[0].past_user_inputs), 1) self.assertEqual(len(result[1].past_user_inputs), 1) self.assertEqual(len(result[0].generated_responses), 1) self.assertEqual(len(result[1].generated_responses), 1) self.assertEqual(result[0].past_user_inputs[0], "Going to the movies tonight - any suggestions?") self.assertEqual(result[0].generated_responses[0], "The Big Lebowski") self.assertEqual(result[1].past_user_inputs[0], "What's the last book you have read?") self.assertEqual(result[1].generated_responses[0], "The Last Question") # When conversation_2.add_user_input("Why do you recommend it?") result = conversation_agent(conversation_2, do_sample=False, max_length=1000) # Then self.assertEqual(result, conversation_2) self.assertEqual(len(result.past_user_inputs), 2) self.assertEqual(len(result.generated_responses), 2) self.assertEqual(result.past_user_inputs[1], "Why do you recommend it?") self.assertEqual(result.generated_responses[1], "It's a good book.") @require_torch @slow def test_integration_torch_conversation_truncated_history(self): # When conversation_agent = pipeline(task="conversational", min_length_for_response=24, device=torch_device) conversation_1 = Conversation("Going to the movies tonight - any suggestions?") # Then self.assertEqual(len(conversation_1.past_user_inputs), 0) # When result = conversation_agent(conversation_1, do_sample=False, max_length=36) # Then self.assertEqual(result, conversation_1) self.assertEqual(len(result.past_user_inputs), 1) self.assertEqual(len(result.generated_responses), 1) self.assertEqual(result.past_user_inputs[0], "Going to the movies tonight - any suggestions?") self.assertEqual(result.generated_responses[0], "The Big Lebowski") # When conversation_1.add_user_input("Is it an action movie?") result = conversation_agent(conversation_1, do_sample=False, max_length=36) # Then self.assertEqual(result, conversation_1) self.assertEqual(len(result.past_user_inputs), 2) self.assertEqual(len(result.generated_responses), 2) self.assertEqual(result.past_user_inputs[1], "Is it an action movie?") self.assertEqual(result.generated_responses[1], "It's a comedy.") @require_torch def test_small_model_pt(self): tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-small") model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-small") conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer) conversation = Conversation("hello") output = conversation_agent(conversation) self.assertEqual(output, Conversation(past_user_inputs=["hello"], generated_responses=["Hi"])) @require_tf def test_small_model_tf(self): tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-small") model = TFAutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-small") conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer) conversation = Conversation("hello") output = conversation_agent(conversation) self.assertEqual(output, Conversation(past_user_inputs=["hello"], generated_responses=["Hi"])) @require_torch @slow def test_integration_torch_conversation_dialogpt_input_ids(self): tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-small") model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-small") conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer) conversation_1 = Conversation("hello") inputs = conversation_agent.preprocess(conversation_1) self.assertEqual(inputs["input_ids"].tolist(), [[31373, 50256]]) conversation_2 = Conversation("how are you ?", past_user_inputs=["hello"], generated_responses=["Hi there!"]) inputs = conversation_agent.preprocess(conversation_2) self.assertEqual( inputs["input_ids"].tolist(), [[31373, 50256, 17250, 612, 0, 50256, 4919, 389, 345, 5633, 50256]] ) @unittest.skip("Model is curently gated") @require_torch @slow def test_integration_torch_conversation_llama2_input_ids(self): tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf", use_default_system_prompt=True) conversation = Conversation( "What is so great about #1?", past_user_inputs=["I am going to Paris, what should I see?"], generated_responses=[ """\ Paris, the capital of France, is known for its stunning architecture, art museums, historical landmarks, and romantic atmosphere. Here are some of the top attractions to see in Paris: 1. The Eiffel Tower: The iconic Eiffel Tower is one of the most recognizable landmarks in the world and offers breathtaking views of the city. 2. The Louvre Museum: The Louvre is one of the world's largest and most famous museums, housing an impressive collection of art and artifacts, including the Mona Lisa. 3. Notre-Dame Cathedral: This beautiful cathedral is one of the most famous landmarks in Paris and is known for its Gothic architecture and stunning stained glass windows. These are just a few of the many attractions that Paris has to offer. With so much to see and do, it's no wonder that Paris is one of the most popular tourist destinations in the world.""" ], ) inputs = tokenizer._build_conversation_input_ids(conversation) EXPECTED_INPUTS_IDS = [ 1, 518, 25580, 29962, 3532, 14816, 29903, 6778, 13, 3492, 526, 263, 8444, 29892, 3390, 1319, 322, 15993, 20255, 29889, 29849, 1234, 408, 1371, 3730, 408, 1950, 29892, 1550, 1641, 9109, 29889, 29871, 3575, 6089, 881, 451, 3160, 738, 10311, 1319, 29892, 443, 621, 936, 29892, 11021, 391, 29892, 7916, 391, 29892, 304, 27375, 29892, 18215, 29892, 470, 27302, 2793, 29889, 3529, 9801, 393, 596, 20890, 526, 5374, 635, 443, 5365, 1463, 322, 6374, 297, 5469, 29889, 13, 13, 3644, 263, 1139, 947, 451, 1207, 738, 4060, 29892, 470, 338, 451, 2114, 1474, 16165, 261, 296, 29892, 5649, 2020, 2012, 310, 22862, 1554, 451, 1959, 29889, 960, 366, 1016, 29915, 29873, 1073, 278, 1234, 304, 263, 1139, 29892, 3113, 1016, 29915, 29873, 6232, 2089, 2472, 29889, 13, 29966, 829, 14816, 29903, 6778, 13, 13, 29902, 626, 2675, 304, 3681, 29892, 825, 881, 306, 1074, 29973, 518, 29914, 25580, 29962, 3681, 29892, 278, 7483, 310, 3444, 29892, 338, 2998, 363, 967, 380, 27389, 11258, 29892, 1616, 19133, 29879, 29892, 15839, 2982, 22848, 29892, 322, 6017, 7716, 25005, 29889, 2266, 526, 777, 310, 278, 2246, 19650, 1953, 304, 1074, 297, 3681, 29901, 13, 13, 29896, 29889, 450, 382, 2593, 295, 23615, 29901, 450, 9849, 293, 382, 2593, 295, 23615, 338, 697, 310, 278, 1556, 5936, 13902, 2982, 22848, 297, 278, 3186, 322, 16688, 2078, 271, 400, 5086, 8386, 310, 278, 4272, 29889, 13, 29906, 29889, 450, 4562, 12675, 6838, 29901, 450, 4562, 12675, 338, 697, 310, 278, 3186, 29915, 29879, 10150, 322, 1556, 13834, 19133, 29879, 29892, 27261, 385, 21210, 573, 4333, 310, 1616, 322, 24238, 29879, 29892, 3704, 278, 2598, 29874, 29420, 29889, 13, 29941, 29889, 24337, 29899, 29928, 420, 315, 21471, 29901, 910, 9560, 274, 21471, 338, 697, 310, 278, 1556, 13834, 2982, 22848, 297, 3681, 322, 338, 2998, 363, 967, 22883, 293, 11258, 322, 380, 27389, 380, 7114, 12917, 5417, 29889, 13, 13, 1349, 968, 526, 925, 263, 2846, 310, 278, 1784, 19650, 1953, 393, 3681, 756, 304, 5957, 29889, 2973, 577, 1568, 304, 1074, 322, 437, 29892, 372, 29915, 29879, 694, 4997, 393, 3681, 338, 697, 310, 278, 1556, 5972, 6282, 391, 15422, 800, 297, 278, 3186, 29889, 29871, 2, 1, 518, 25580, 29962, 1724, 338, 577, 2107, 1048, 396, 29896, 29973, 518, 29914, 25580, 29962] # fmt: skip self.assertEqual(inputs, EXPECTED_INPUTS_IDS) model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf") conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer) EXPECTED_TEXT = "what topic you want to focus on and create content around it. This will help you stand out from other creators and attract a specific audience.\n\nStep 2: Set Up Your Channel\nCreate your YouTube account and customize your channel with your branding and logo. Make sure your channel name and profile picture are consistent with your niche.\n\nStep 3: Plan Your Content\nDevelop a content strategy that includes the type of content you want to create, how often you will post, and when you will post. Consider creating a content calendar to help you stay organized.\n\nStep 4: Invest in Quality Equipment\nInvest in good quality camera and microphone equipment to ensure your videos look and sound professional. You don't need to break the bank, but investing in good equipment will make a big difference in the quality of your videos.\n\nStep 5: Optimize Your Videos for Search\nUse keywords in your video titles, descriptions, and tags to help people find your videos when they search for topics related to your niche" conversation = Conversation( "<<SYS>>\n Only answer with emojis, and charades\n<</SYS>>\n\nHow can I build a house in 10 steps?" ) result = conversation_agent(conversation) self.assertEqual(result.generated_responses[-1], EXPECTED_TEXT) @require_torch @slow def test_integration_torch_conversation_blenderbot_400M_input_ids(self): tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill") model = AutoModelForSeq2SeqLM.from_pretrained("facebook/blenderbot-400M-distill") conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer) # test1 conversation_1 = Conversation("hello") inputs = conversation_agent.preprocess(conversation_1) self.assertEqual(inputs["input_ids"].tolist(), [[1710, 86, 2]]) # test2 conversation_1 = Conversation( "I like lasagne.", past_user_inputs=["hello"], generated_responses=[ " Do you like lasagne? It is a traditional Italian dish consisting of a shepherd's pie." ], ) inputs = conversation_agent.preprocess(conversation_1) self.assertEqual( inputs["input_ids"].tolist(), [ # This should be compared with the same conversation on ParlAI `safe_interactive` demo. [ 1710, # hello 86, 228, # Double space 228, 946, 304, 398, 6881, 558, 964, 38, 452, 315, 265, 6252, 452, 322, 968, 6884, 3146, 278, 306, 265, 617, 87, 388, 75, 341, 286, 521, 21, 228, # Double space 228, 281, # I like lasagne. 398, 6881, 558, 964, 21, 2, # EOS ], ], ) @require_torch @slow def test_integration_torch_conversation_blenderbot_400M(self): tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill") model = AutoModelForSeq2SeqLM.from_pretrained("facebook/blenderbot-400M-distill") conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer) conversation_1 = Conversation("hello") result = conversation_agent( conversation_1, ) self.assertEqual( result.generated_responses[0], # ParlAI implementation output, we have a different one, but it's our # second best, you can check by using num_return_sequences=10 # " Hello! How are you? I'm just getting ready to go to work, how about you?", " Hello! How are you doing today? I just got back from a walk with my dog.", ) conversation_1 = Conversation("Lasagne hello") result = conversation_agent(conversation_1, encoder_no_repeat_ngram_size=3) self.assertEqual( result.generated_responses[0], " Do you like lasagne? It is a traditional Italian dish consisting of a shepherd's pie.", ) conversation_1 = Conversation( "Lasagne hello Lasagne is my favorite Italian dish. Do you like lasagne? I like lasagne." ) result = conversation_agent( conversation_1, encoder_no_repeat_ngram_size=3, ) self.assertEqual( result.generated_responses[0], " Me too. I like how it can be topped with vegetables, meats, and condiments.", ) @require_torch @slow def test_integration_torch_conversation_encoder_decoder(self): # When tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M") model = AutoModelForSeq2SeqLM.from_pretrained("facebook/blenderbot_small-90M") conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer, device=torch_device) conversation_1 = Conversation("My name is Sarah and I live in London") conversation_2 = Conversation("Going to the movies tonight, What movie would you recommend? ") # Then self.assertEqual(len(conversation_1.past_user_inputs), 0) self.assertEqual(len(conversation_2.past_user_inputs), 0) # When result = conversation_agent([conversation_1, conversation_2], do_sample=False, max_length=1000) # Then self.assertEqual(result, [conversation_1, conversation_2]) self.assertEqual(len(result[0].past_user_inputs), 1) self.assertEqual(len(result[1].past_user_inputs), 1) self.assertEqual(len(result[0].generated_responses), 1) self.assertEqual(len(result[1].generated_responses), 1) self.assertEqual(result[0].past_user_inputs[0], "My name is Sarah and I live in London") self.assertEqual( result[0].generated_responses[0], "hi sarah, i live in london as well. do you have any plans for the weekend?", ) self.assertEqual( result[1].past_user_inputs[0], "Going to the movies tonight, What movie would you recommend? " ) self.assertEqual( result[1].generated_responses[0], "i don't know... i'm not really sure. what movie are you going to see?" ) # When conversation_1.add_user_input("Not yet, what about you?") conversation_2.add_user_input("What's your name?") result = conversation_agent([conversation_1, conversation_2], do_sample=False, max_length=1000) # Then self.assertEqual(result, [conversation_1, conversation_2]) self.assertEqual(len(result[0].past_user_inputs), 2) self.assertEqual(len(result[1].past_user_inputs), 2) self.assertEqual(len(result[0].generated_responses), 2) self.assertEqual(len(result[1].generated_responses), 2) self.assertEqual(result[0].past_user_inputs[1], "Not yet, what about you?") self.assertEqual(result[0].generated_responses[1], "i don't have any plans yet. i'm not sure what to do yet.") self.assertEqual(result[1].past_user_inputs[1], "What's your name?") self.assertEqual(result[1].generated_responses[1], "i don't have a name, but i'm going to see a horror movie.") @require_torch @slow def test_from_pipeline_conversation(self): model_id = "facebook/blenderbot_small-90M" # from model id conversation_agent_from_model_id = pipeline("conversational", model=model_id, tokenizer=model_id) # from model object model = BlenderbotSmallForConditionalGeneration.from_pretrained(model_id) tokenizer = BlenderbotSmallTokenizer.from_pretrained(model_id) conversation_agent_from_model = pipeline("conversational", model=model, tokenizer=tokenizer) conversation = Conversation("My name is Sarah and I live in London") conversation_copy = Conversation("My name is Sarah and I live in London") result_model_id = conversation_agent_from_model_id([conversation]) result_model = conversation_agent_from_model([conversation_copy]) # check for equality self.assertEqual( result_model_id.generated_responses[0], "hi sarah, i live in london as well. do you have any plans for the weekend?", ) self.assertEqual( result_model_id.generated_responses[0], result_model.generated_responses[0], )
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_document_question_answering.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectron2, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class Image: @staticmethod def open(*args, **kwargs): pass def load_image(_): return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. INVOICE_URL = ( "https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png" ) @is_pipeline_test @require_torch @require_vision class DocumentQuestionAnsweringPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def get_test_pipeline(self, model, tokenizer, processor): dqa_pipeline = pipeline( "document-question-answering", model=model, tokenizer=tokenizer, image_processor=processor ) image = INVOICE_URL word_boxes = list(zip(*apply_tesseract(load_image(image), None, ""))) question = "What is the placebo?" examples = [ { "image": load_image(image), "question": question, }, { "image": image, "question": question, }, { "image": image, "question": question, "word_boxes": word_boxes, }, ] return dqa_pipeline, examples def run_pipeline_test(self, dqa_pipeline, examples): outputs = dqa_pipeline(examples, top_k=2) self.assertEqual( outputs, [ [ {"score": ANY(float), "answer": ANY(str), "start": ANY(int), "end": ANY(int)}, {"score": ANY(float), "answer": ANY(str), "start": ANY(int), "end": ANY(int)}, ] ] * 3, ) @require_torch @require_detectron2 @require_pytesseract def test_small_model_pt(self): dqa_pipeline = pipeline("document-question-answering", model="hf-internal-testing/tiny-random-layoutlmv2") image = INVOICE_URL question = "How many cats are there?" expected_output = [ {"score": 0.0001, "answer": "oy 2312/2019", "start": 38, "end": 39}, {"score": 0.0001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40}, ] outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual(nested_simplify(outputs, decimals=4), expected_output) outputs = dqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual(nested_simplify(outputs, decimals=4), expected_output) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual(outputs, []) # We can optionnally pass directly the words and bounding boxes image = "./tests/fixtures/tests_samples/COCO/000000039769.png" words = [] boxes = [] outputs = dqa_pipeline(image=image, question=question, words=words, boxes=boxes, top_k=2) self.assertEqual(outputs, []) # TODO: Enable this once hf-internal-testing/tiny-random-donut is implemented # @require_torch # def test_small_model_pt_donut(self): # dqa_pipeline = pipeline("document-question-answering", model="hf-internal-testing/tiny-random-donut") # # dqa_pipeline = pipeline("document-question-answering", model="../tiny-random-donut") # image = "https://templates.invoicehome.com/invoice-template-us-neat-750px.png" # question = "How many cats are there?" # # outputs = dqa_pipeline(image=image, question=question, top_k=2) # self.assertEqual( # nested_simplify(outputs, decimals=4), [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] # ) @slow @require_torch @require_detectron2 @require_pytesseract def test_large_model_pt(self): dqa_pipeline = pipeline( "document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", ) image = INVOICE_URL question = "What is the invoice number?" outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0009, "answer": "us-001", "start": 16, "end": 16}, ], ) outputs = dqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0009, "answer": "us-001", "start": 16, "end": 16}, ], ) outputs = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.9944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0009, "answer": "us-001", "start": 16, "end": 16}, ], ] * 2, ) @slow @require_torch @require_detectron2 @require_pytesseract def test_large_model_pt_chunk(self): dqa_pipeline = pipeline( "document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", max_seq_len=50, ) image = INVOICE_URL question = "What is the invoice number?" outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9948, "answer": "us-001", "start": 16, "end": 16}, ], ) outputs = dqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9948, "answer": "us-001", "start": 16, "end": 16}, ], ) outputs = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9948, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2, ) @slow @require_torch @require_pytesseract @require_vision def test_large_model_pt_layoutlm(self): tokenizer = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=True ) dqa_pipeline = pipeline( "document-question-answering", model="impira/layoutlm-document-qa", tokenizer=tokenizer, revision="3dc6de3", ) image = INVOICE_URL question = "What is the invoice number?" outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.4251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23}, ], ) outputs = dqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.4251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23}, ], ) outputs = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.4251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23}, ] ] * 2, ) word_boxes = list(zip(*apply_tesseract(load_image(image), None, ""))) # This model should also work if `image` is set to None outputs = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.4251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23}, ], ) @slow @require_torch @require_pytesseract @require_vision def test_large_model_pt_layoutlm_chunk(self): tokenizer = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=True ) dqa_pipeline = pipeline( "document-question-answering", model="impira/layoutlm-document-qa", tokenizer=tokenizer, revision="3dc6de3", max_seq_len=50, ) image = INVOICE_URL question = "What is the invoice number?" outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9998, "answer": "us-001", "start": 16, "end": 16}, ], ) outputs = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.9999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9998, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2, ) word_boxes = list(zip(*apply_tesseract(load_image(image), None, ""))) # This model should also work if `image` is set to None outputs = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9998, "answer": "us-001", "start": 16, "end": 16}, ], ) @slow @require_torch def test_large_model_pt_donut(self): dqa_pipeline = pipeline( "document-question-answering", model="naver-clova-ix/donut-base-finetuned-docvqa", tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa"), feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa", ) image = INVOICE_URL question = "What is the invoice number?" outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual(nested_simplify(outputs, decimals=4), [{"answer": "us-001"}]) @require_tf @unittest.skip("Document question answering not implemented in TF") def test_small_model_tf(self): pass
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_depth_estimation.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from huggingface_hub.utils import insecure_hashlib from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass def hashimage(image: Image) -> str: m = insecure_hashlib.md5(image.tobytes()) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class DepthEstimationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def get_test_pipeline(self, model, tokenizer, processor): depth_estimator = DepthEstimationPipeline(model=model, image_processor=processor) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def run_pipeline_test(self, depth_estimator, examples): outputs = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png") self.assertEqual({"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, outputs) import datasets # we use revision="refs/pr/1" until the PR is merged # https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1 dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1") outputs = depth_estimator( [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["image"], # LA dataset[1]["image"], # L dataset[2]["image"], ] ) self.assertEqual( [ {"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, {"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, {"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, {"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, {"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, ], outputs, ) @require_tf @unittest.skip("Depth estimation is not implemented in TF") def test_small_model_tf(self): pass @slow @require_torch def test_large_model_pt(self): model_id = "Intel/dpt-large" depth_estimator = pipeline("depth-estimation", model=model_id) outputs = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg") outputs["depth"] = hashimage(outputs["depth"]) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item()), 29.304) self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item()), 2.662) @require_torch def test_small_model_pt(self): # This is highly irregular to have no small tests. self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT")
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_text2text_generation.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, Text2TextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class Text2TextGenerationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def get_test_pipeline(self, model, tokenizer, processor): generator = Text2TextGenerationPipeline(model=model, tokenizer=tokenizer) return generator, ["Something to write", "Something else"] def run_pipeline_test(self, generator, _): outputs = generator("Something there") self.assertEqual(outputs, [{"generated_text": ANY(str)}]) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["generated_text"].startswith("Something there")) outputs = generator(["This is great !", "Something else"], num_return_sequences=2, do_sample=True) self.assertEqual( outputs, [ [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], ], ) outputs = generator( ["This is great !", "Something else"], num_return_sequences=2, batch_size=2, do_sample=True ) self.assertEqual( outputs, [ [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], ], ) with self.assertRaises(ValueError): generator(4) @require_torch def test_small_model_pt(self): generator = pipeline("text2text-generation", model="patrickvonplaten/t5-tiny-random", framework="pt") # do_sample=False necessary for reproducibility outputs = generator("Something there", do_sample=False) self.assertEqual(outputs, [{"generated_text": ""}]) num_return_sequences = 3 outputs = generator( "Something there", num_return_sequences=num_return_sequences, num_beams=num_return_sequences, ) target_outputs = [ {"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"}, {"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"}, {"generated_text": ""}, ] self.assertEqual(outputs, target_outputs) outputs = generator("This is a test", do_sample=True, num_return_sequences=2, return_tensors=True) self.assertEqual( outputs, [ {"generated_token_ids": ANY(torch.Tensor)}, {"generated_token_ids": ANY(torch.Tensor)}, ], ) generator.tokenizer.pad_token_id = generator.model.config.eos_token_id generator.tokenizer.pad_token = "<pad>" outputs = generator( ["This is a test", "This is a second test"], do_sample=True, num_return_sequences=2, batch_size=2, return_tensors=True, ) self.assertEqual( outputs, [ [ {"generated_token_ids": ANY(torch.Tensor)}, {"generated_token_ids": ANY(torch.Tensor)}, ], [ {"generated_token_ids": ANY(torch.Tensor)}, {"generated_token_ids": ANY(torch.Tensor)}, ], ], ) @require_tf def test_small_model_tf(self): generator = pipeline("text2text-generation", model="patrickvonplaten/t5-tiny-random", framework="tf") # do_sample=False necessary for reproducibility outputs = generator("Something there", do_sample=False) self.assertEqual(outputs, [{"generated_text": ""}])
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_fill_mask.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( backend_empty_cache, is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_accelerator, slow, torch_device, ) from .test_pipelines_common import ANY @is_pipeline_test class FillMaskPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_MASKED_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_MASKED_LM_MAPPING def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): backend_empty_cache(torch_device) @require_tf def test_small_model_tf(self): unmasker = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", top_k=2, framework="tf") outputs = unmasker("My name is <mask>") self.assertEqual( nested_simplify(outputs, decimals=6), [ {"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"}, {"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"}, ], ) outputs = unmasker("The largest city in France is <mask>") self.assertEqual( nested_simplify(outputs, decimals=6), [ { "sequence": "The largest city in France is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped", }, { "sequence": "The largest city in France is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser", }, ], ) outputs = unmasker("My name is <mask>", targets=[" Patrick", " Clara", " Teven"], top_k=3) self.assertEqual( nested_simplify(outputs, decimals=6), [ {"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"}, {"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"}, {"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"}, ], ) @require_torch def test_small_model_pt(self): unmasker = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", top_k=2, framework="pt") outputs = unmasker("My name is <mask>") self.assertEqual( nested_simplify(outputs, decimals=6), [ {"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"}, {"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"}, ], ) outputs = unmasker("The largest city in France is <mask>") self.assertEqual( nested_simplify(outputs, decimals=6), [ { "sequence": "The largest city in France is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul", }, {"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"}, ], ) outputs = unmasker("My name is <mask>", targets=[" Patrick", " Clara", " Teven"], top_k=3) self.assertEqual( nested_simplify(outputs, decimals=6), [ {"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"}, {"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"}, {"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"}, ], ) outputs = unmasker("My name is <mask> <mask>", top_k=2) self.assertEqual( nested_simplify(outputs, decimals=6), [ [ { "score": 2.2e-05, "token": 35676, "token_str": " Maul", "sequence": "<s>My name is Maul<mask></s>", }, {"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"}, ], [ { "score": 2.2e-05, "token": 35676, "token_str": " Maul", "sequence": "<s>My name is<mask> Maul</s>", }, {"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"}, ], ], ) @require_torch_accelerator def test_fp16_casting(self): pipe = pipeline( "fill-mask", model="hf-internal-testing/tiny-random-distilbert", device=torch_device, framework="pt", ) # convert model to fp16 pipe.model.half() response = pipe("Paris is the [MASK] of France.") # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(response, list) @slow @require_torch def test_large_model_pt(self): unmasker = pipeline(task="fill-mask", model="distilroberta-base", top_k=2, framework="pt") self.run_large_test(unmasker) @slow @require_tf def test_large_model_tf(self): unmasker = pipeline(task="fill-mask", model="distilroberta-base", top_k=2, framework="tf") self.run_large_test(unmasker) def run_large_test(self, unmasker): outputs = unmasker("My name is <mask>") self.assertEqual( nested_simplify(outputs), [ {"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"}, {"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"}, ], ) outputs = unmasker("The largest city in France is <mask>") self.assertEqual( nested_simplify(outputs), [ { "sequence": "The largest city in France is Paris", "score": 0.251, "token": 2201, "token_str": " Paris", }, { "sequence": "The largest city in France is Lyon", "score": 0.214, "token": 12790, "token_str": " Lyon", }, ], ) outputs = unmasker("My name is <mask>", targets=[" Patrick", " Clara", " Teven"], top_k=3) self.assertEqual( nested_simplify(outputs), [ {"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"}, {"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"}, {"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"}, ], ) dummy_str = "Lorem ipsum dolor sit amet, consectetur adipiscing elit," * 100 outputs = unmasker( "My name is <mask>" + dummy_str, tokenizer_kwargs={"truncation": True}, ) simplified = nested_simplify(outputs, decimals=4) self.assertEqual( [{"sequence": x["sequence"][:100]} for x in simplified], [ {"sequence": f"My name is,{dummy_str}"[:100]}, {"sequence": f"My name is:,{dummy_str}"[:100]}, ], ) self.assertEqual( [{k: x[k] for k in x if k != "sequence"} for x in simplified], [ {"score": 0.2819, "token": 6, "token_str": ","}, {"score": 0.0954, "token": 46686, "token_str": ":,"}, ], ) @require_torch def test_model_no_pad_pt(self): unmasker = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", framework="pt") unmasker.tokenizer.pad_token_id = None unmasker.tokenizer.pad_token = None self.run_pipeline_test(unmasker, []) @require_tf def test_model_no_pad_tf(self): unmasker = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", framework="tf") unmasker.tokenizer.pad_token_id = None unmasker.tokenizer.pad_token = None self.run_pipeline_test(unmasker, []) def get_test_pipeline(self, model, tokenizer, processor): if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)") fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer) examples = [ f"This is another {tokenizer.mask_token} test", ] return fill_masker, examples def run_pipeline_test(self, fill_masker, examples): tokenizer = fill_masker.tokenizer model = fill_masker.model outputs = fill_masker( f"This is a {tokenizer.mask_token}", ) self.assertEqual( outputs, [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ) outputs = fill_masker([f"This is a {tokenizer.mask_token}"]) self.assertEqual( outputs, [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ) outputs = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."]) self.assertEqual( outputs, [ [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ], ) with self.assertRaises(ValueError): fill_masker([None]) # No mask_token is not supported with self.assertRaises(PipelineException): fill_masker("This is") self.run_test_top_k(model, tokenizer) self.run_test_targets(model, tokenizer) self.run_test_top_k_targets(model, tokenizer) self.fill_mask_with_duplicate_targets_and_top_k(model, tokenizer) self.fill_mask_with_multiple_masks(model, tokenizer) def run_test_targets(self, model, tokenizer): vocab = tokenizer.get_vocab() targets = sorted(vocab.keys())[:2] # Pipeline argument fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer, targets=targets) outputs = fill_masker(f"This is a {tokenizer.mask_token}") self.assertEqual( outputs, [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ) target_ids = {vocab[el] for el in targets} self.assertEqual({el["token"] for el in outputs}, target_ids) processed_targets = [tokenizer.decode([x]) for x in target_ids] self.assertEqual({el["token_str"] for el in outputs}, set(processed_targets)) # Call argument fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer) outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets=targets) self.assertEqual( outputs, [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ) target_ids = {vocab[el] for el in targets} self.assertEqual({el["token"] for el in outputs}, target_ids) processed_targets = [tokenizer.decode([x]) for x in target_ids] self.assertEqual({el["token_str"] for el in outputs}, set(processed_targets)) # Score equivalence outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets=targets) tokens = [top_mask["token_str"] for top_mask in outputs] scores = [top_mask["score"] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(tokens) == set(targets): unmasked_targets = fill_masker(f"This is a {tokenizer.mask_token}", targets=tokens) target_scores = [top_mask["score"] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(scores), nested_simplify(target_scores)) # Raises with invalid with self.assertRaises(ValueError): outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets=[]) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(ValueError): outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets=[""]) with self.assertRaises(ValueError): outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets="") def run_test_top_k(self, model, tokenizer): fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer, top_k=2) outputs = fill_masker(f"This is a {tokenizer.mask_token}") self.assertEqual( outputs, [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ) fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer) outputs2 = fill_masker(f"This is a {tokenizer.mask_token}", top_k=2) self.assertEqual( outputs2, [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ) self.assertEqual(nested_simplify(outputs), nested_simplify(outputs2)) def run_test_top_k_targets(self, model, tokenizer): vocab = tokenizer.get_vocab() fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer) # top_k=2, ntargets=3 targets = sorted(vocab.keys())[:3] outputs = fill_masker(f"This is a {tokenizer.mask_token}", top_k=2, targets=targets) # If we use the most probably targets, and filter differently, we should still # have the same results targets2 = [el["token_str"] for el in sorted(outputs, key=lambda x: x["score"], reverse=True)] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(targets2).issubset(targets): outputs2 = fill_masker(f"This is a {tokenizer.mask_token}", top_k=3, targets=targets2) # They should yield exactly the same result self.assertEqual(nested_simplify(outputs), nested_simplify(outputs2)) def fill_mask_with_duplicate_targets_and_top_k(self, model, tokenizer): fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer) vocab = tokenizer.get_vocab() # String duplicates + id duplicates targets = sorted(vocab.keys())[:3] targets = [targets[0], targets[1], targets[0], targets[2], targets[1]] outputs = fill_masker(f"My name is {tokenizer.mask_token}", targets=targets, top_k=10) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(outputs), 3) def fill_mask_with_multiple_masks(self, model, tokenizer): fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer) outputs = fill_masker( f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}", top_k=2 ) self.assertEqual( outputs, [ [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ], )
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_zero_shot_image_classification.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_vision class ZeroShotImageClassificationPipelineTests(unittest.TestCase): # Deactivating auto tests since we don't have a good MODEL_FOR_XX mapping, # and only CLIP would be there for now. # model_mapping = {CLIPConfig: CLIPModel} # def get_test_pipeline(self, model, tokenizer, processor): # if tokenizer is None: # # Side effect of no Fast Tokenizer class for these model, so skipping # # But the slow tokenizer test should still run as they're quite small # self.skipTest("No tokenizer available") # return # # return None, None # image_classifier = ZeroShotImageClassificationPipeline( # model=model, tokenizer=tokenizer, feature_extractor=processor # ) # # test with a raw waveform # image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") # image2 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") # return image_classifier, [image, image2] # def run_pipeline_test(self, pipe, examples): # image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") # outputs = pipe(image, candidate_labels=["A", "B"]) # self.assertEqual(outputs, {"text": ANY(str)}) # # Batching # outputs = pipe([image] * 3, batch_size=2, candidate_labels=["A", "B"]) @require_torch def test_small_model_pt(self): image_classifier = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", ) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier(image, candidate_labels=["a", "b", "c"]) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(output), [ [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}], [{"score": 0.333, "label": "b"}, {"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}], ], ) output = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2) self.assertEqual( nested_simplify(output), # Pipeline outputs are supposed to be deterministic and # So we could in theory have real values "A", "B", "C" instead # of ANY(str). # However it seems that in this particular case, the floating # scores are so close, we enter floating error approximation # and the order is not guaranteed anymore with batching. [ [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], ], ) @require_tf def test_small_model_tf(self): image_classifier = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", framework="tf" ) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier(image, candidate_labels=["a", "b", "c"]) self.assertEqual( nested_simplify(output), [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], ) output = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2) self.assertEqual( nested_simplify(output), # Pipeline outputs are supposed to be deterministic and # So we could in theory have real values "A", "B", "C" instead # of ANY(str). # However it seems that in this particular case, the floating # scores are so close, we enter floating error approximation # and the order is not guaranteed anymore with batching. [ [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], ], ) @slow @require_torch def test_large_model_pt(self): image_classifier = pipeline( task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", ) # This is an image of 2 cats with remotes and no planes image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier(image, candidate_labels=["cat", "plane", "remote"]) self.assertEqual( nested_simplify(output), [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ) output = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2) self.assertEqual( nested_simplify(output), [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5, ) @slow @require_tf def test_large_model_tf(self): image_classifier = pipeline( task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", framework="tf" ) # This is an image of 2 cats with remotes and no planes image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier(image, candidate_labels=["cat", "plane", "remote"]) self.assertEqual( nested_simplify(output), [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ) output = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2) self.assertEqual( nested_simplify(output), [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5, ) @slow @require_torch def test_siglip_model_pt(self): image_classifier = pipeline( task="zero-shot-image-classification", model="google/siglip-base-patch16-224", ) # This is an image of 2 cats with remotes and no planes image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier(image, candidate_labels=["2 cats", "a plane", "a remote"]) self.assertEqual( nested_simplify(output), [ {"score": 0.198, "label": "2 cats"}, {"score": 0.0, "label": "a remote"}, {"score": 0.0, "label": "a plane"}, ], ) output = image_classifier([image] * 5, candidate_labels=["2 cats", "a plane", "a remote"], batch_size=2) self.assertEqual( nested_simplify(output), [ [ {"score": 0.198, "label": "2 cats"}, {"score": 0.0, "label": "a remote"}, {"score": 0.0, "label": "a plane"}, ] ] * 5, )
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_image_segmentation.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from typing import Dict import datasets import numpy as np import requests from datasets import load_dataset from huggingface_hub.utils import insecure_hashlib from transformers import ( MODEL_FOR_IMAGE_SEGMENTATION_MAPPING, MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, AutoImageProcessor, AutoModelForImageSegmentation, AutoModelForInstanceSegmentation, DetrForSegmentation, ImageSegmentationPipeline, MaskFormerForInstanceSegmentation, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass def hashimage(image: Image) -> str: m = insecure_hashlib.md5(image.tobytes()) return m.hexdigest()[:10] def mask_to_test_readable(mask: Image) -> Dict: npimg = np.array(mask) white_pixels = (npimg == 255).sum() shape = npimg.shape return {"hash": hashimage(mask), "white_pixels": white_pixels, "shape": shape} def mask_to_test_readable_only_shape(mask: Image) -> Dict: npimg = np.array(mask) shape = npimg.shape return {"shape": shape} @is_pipeline_test @require_vision @require_timm @require_torch class ImageSegmentationPipelineTests(unittest.TestCase): model_mapping = dict( (list(MODEL_FOR_IMAGE_SEGMENTATION_MAPPING.items()) if MODEL_FOR_IMAGE_SEGMENTATION_MAPPING else []) + (MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING.items() if MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING else []) + (MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING.items() if MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING else []) ) def get_test_pipeline(self, model, tokenizer, processor): image_segmenter = ImageSegmentationPipeline(model=model, image_processor=processor) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def run_pipeline_test(self, image_segmenter, examples): outputs = image_segmenter( "./tests/fixtures/tests_samples/COCO/000000039769.png", threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0, ) self.assertIsInstance(outputs, list) n = len(outputs) if isinstance(image_segmenter.model, (MaskFormerForInstanceSegmentation, DetrForSegmentation)): # Instance segmentation (maskformer, and detr) have a slot for null class # and can output nothing even with a low threshold self.assertGreaterEqual(n, 0) else: self.assertGreaterEqual(n, 1) # XXX: PIL.Image implements __eq__ which bypasses ANY, so we inverse the comparison # to make it work self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, outputs) # we use revision="refs/pr/1" until the PR is merged # https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1 dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1") # RGBA outputs = image_segmenter(dataset[0]["image"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0) m = len(outputs) self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * m, outputs) # LA outputs = image_segmenter(dataset[1]["image"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0) m = len(outputs) self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * m, outputs) # L outputs = image_segmenter(dataset[2]["image"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0) m = len(outputs) self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * m, outputs) if isinstance(image_segmenter.model, DetrForSegmentation): # We need to test batch_size with images with the same size. # Detr doesn't normalize the size of the images, meaning we can have # 800x800 or 800x1200, meaning we cannot batch simply. # We simply bail on this batch_size = 1 else: batch_size = 2 # 5 times the same image so the output shape is predictable batch = [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] outputs = image_segmenter( batch, threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0, batch_size=batch_size, ) self.assertEqual(len(batch), len(outputs)) self.assertEqual(len(outputs[0]), n) self.assertEqual( [ [{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, [{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, [{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, [{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, [{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, ], outputs, f"Expected [{n}, {n}, {n}, {n}, {n}], got {[len(item) for item in outputs]}", ) @require_tf @unittest.skip("Image segmentation not implemented in TF") def test_small_model_tf(self): pass @require_torch def test_small_model_pt_no_panoptic(self): model_id = "hf-internal-testing/tiny-random-mobilevit" # The default task is `image-classification` we need to override pipe = pipeline(task="image-segmentation", model=model_id) # This model does NOT support neither `instance` nor `panoptic` # We should error out with self.assertRaises(ValueError) as e: pipe("http://images.cocodataset.org/val2017/000000039769.jpg", subtask="panoptic") self.assertEqual( str(e.exception), "Subtask panoptic is not supported for model <class" " 'transformers.models.mobilevit.modeling_mobilevit.MobileViTForSemanticSegmentation'>", ) with self.assertRaises(ValueError) as e: pipe("http://images.cocodataset.org/val2017/000000039769.jpg", subtask="instance") self.assertEqual( str(e.exception), "Subtask instance is not supported for model <class" " 'transformers.models.mobilevit.modeling_mobilevit.MobileViTForSemanticSegmentation'>", ) @require_torch def test_small_model_pt(self): model_id = "hf-internal-testing/tiny-detr-mobilenetsv3-panoptic" model = AutoModelForImageSegmentation.from_pretrained(model_id) image_processor = AutoImageProcessor.from_pretrained(model_id) image_segmenter = ImageSegmentationPipeline( model=model, image_processor=image_processor, subtask="panoptic", threshold=0.0, mask_threshold=0.0, overlap_mask_area_threshold=0.0, ) outputs = image_segmenter( "http://images.cocodataset.org/val2017/000000039769.jpg", ) # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) # This is extremely brittle, and those values are made specific for the CI. self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.004, "label": "LABEL_215", "mask": {"hash": "a01498ca7c", "shape": (480, 640), "white_pixels": 307200}, }, ], ) outputs = image_segmenter( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], ) for output in outputs: for o in output: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ { "score": 0.004, "label": "LABEL_215", "mask": {"hash": "a01498ca7c", "shape": (480, 640), "white_pixels": 307200}, }, ], [ { "score": 0.004, "label": "LABEL_215", "mask": {"hash": "a01498ca7c", "shape": (480, 640), "white_pixels": 307200}, }, ], ], ) output = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", subtask="instance") for o in output: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(output, decimals=4), [ { "score": 0.004, "label": "LABEL_215", "mask": {"hash": "a01498ca7c", "shape": (480, 640), "white_pixels": 307200}, }, ], ) # This must be surprising to the reader. # The `panoptic` returns only LABEL_215, and this returns 3 labels. # output = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", subtask="semantic") output_masks = [o["mask"] for o in output] # page links (to visualize) expected_masks = [ "https://huggingface.co/datasets/hf-internal-testing/mask-for-image-segmentation-tests/blob/main/mask_0.png", "https://huggingface.co/datasets/hf-internal-testing/mask-for-image-segmentation-tests/blob/main/mask_1.png", "https://huggingface.co/datasets/hf-internal-testing/mask-for-image-segmentation-tests/blob/main/mask_2.png", ] # actual links to get files expected_masks = [x.replace("/blob/", "/resolve/") for x in expected_masks] expected_masks = [Image.open(requests.get(image, stream=True).raw) for image in expected_masks] # Convert masks to numpy array output_masks = [np.array(x) for x in output_masks] expected_masks = [np.array(x) for x in expected_masks] self.assertEqual(output_masks[0].shape, expected_masks[0].shape) self.assertEqual(output_masks[1].shape, expected_masks[1].shape) self.assertEqual(output_masks[2].shape, expected_masks[2].shape) # With un-trained tiny random models, the output `logits` tensor is very likely to contain many values # close to each other, which cause `argmax` to give quite different results when running the test on 2 # environments. We use a lower threshold `0.9` here to avoid flakiness. self.assertGreaterEqual(np.mean(output_masks[0] == expected_masks[0]), 0.9) self.assertGreaterEqual(np.mean(output_masks[1] == expected_masks[1]), 0.9) self.assertGreaterEqual(np.mean(output_masks[2] == expected_masks[2]), 0.9) for o in output: o["mask"] = mask_to_test_readable_only_shape(o["mask"]) self.maxDiff = None self.assertEqual( nested_simplify(output, decimals=4), [ { "label": "LABEL_88", "mask": {"shape": (480, 640)}, "score": None, }, { "label": "LABEL_101", "mask": {"shape": (480, 640)}, "score": None, }, { "label": "LABEL_215", "mask": {"shape": (480, 640)}, "score": None, }, ], ) @require_torch def test_small_model_pt_semantic(self): model_id = "hf-internal-testing/tiny-random-beit-pipeline" image_segmenter = pipeline(model=model_id) outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg") for o in outputs: # shortening by hashing o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": None, "label": "LABEL_0", "mask": {"hash": "42d0907228", "shape": (480, 640), "white_pixels": 10714}, }, { "score": None, "label": "LABEL_1", "mask": {"hash": "46b8cc3976", "shape": (480, 640), "white_pixels": 296486}, }, ], ) @require_torch @slow def test_integration_torch_image_segmentation(self): model_id = "facebook/detr-resnet-50-panoptic" image_segmenter = pipeline( "image-segmentation", model=model_id, threshold=0.0, overlap_mask_area_threshold=0.0, ) outputs = image_segmenter( "http://images.cocodataset.org/val2017/000000039769.jpg", ) # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9094, "label": "blanket", "mask": {"hash": "dcff19a97a", "shape": (480, 640), "white_pixels": 16617}, }, { "score": 0.9941, "label": "cat", "mask": {"hash": "9c0af87bd0", "shape": (480, 640), "white_pixels": 59185}, }, { "score": 0.9987, "label": "remote", "mask": {"hash": "c7870600d6", "shape": (480, 640), "white_pixels": 4182}, }, { "score": 0.9995, "label": "remote", "mask": {"hash": "ef899a25fd", "shape": (480, 640), "white_pixels": 2275}, }, { "score": 0.9722, "label": "couch", "mask": {"hash": "37b8446ac5", "shape": (480, 640), "white_pixels": 172380}, }, { "score": 0.9994, "label": "cat", "mask": {"hash": "6a09d3655e", "shape": (480, 640), "white_pixels": 52561}, }, ], ) outputs = image_segmenter( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], ) # Shortening by hashing for output in outputs: for o in output: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ { "score": 0.9094, "label": "blanket", "mask": {"hash": "dcff19a97a", "shape": (480, 640), "white_pixels": 16617}, }, { "score": 0.9941, "label": "cat", "mask": {"hash": "9c0af87bd0", "shape": (480, 640), "white_pixels": 59185}, }, { "score": 0.9987, "label": "remote", "mask": {"hash": "c7870600d6", "shape": (480, 640), "white_pixels": 4182}, }, { "score": 0.9995, "label": "remote", "mask": {"hash": "ef899a25fd", "shape": (480, 640), "white_pixels": 2275}, }, { "score": 0.9722, "label": "couch", "mask": {"hash": "37b8446ac5", "shape": (480, 640), "white_pixels": 172380}, }, { "score": 0.9994, "label": "cat", "mask": {"hash": "6a09d3655e", "shape": (480, 640), "white_pixels": 52561}, }, ], [ { "score": 0.9094, "label": "blanket", "mask": {"hash": "dcff19a97a", "shape": (480, 640), "white_pixels": 16617}, }, { "score": 0.9941, "label": "cat", "mask": {"hash": "9c0af87bd0", "shape": (480, 640), "white_pixels": 59185}, }, { "score": 0.9987, "label": "remote", "mask": {"hash": "c7870600d6", "shape": (480, 640), "white_pixels": 4182}, }, { "score": 0.9995, "label": "remote", "mask": {"hash": "ef899a25fd", "shape": (480, 640), "white_pixels": 2275}, }, { "score": 0.9722, "label": "couch", "mask": {"hash": "37b8446ac5", "shape": (480, 640), "white_pixels": 172380}, }, { "score": 0.9994, "label": "cat", "mask": {"hash": "6a09d3655e", "shape": (480, 640), "white_pixels": 52561}, }, ], ], ) @require_torch @slow def test_threshold(self): model_id = "facebook/detr-resnet-50-panoptic" image_segmenter = pipeline("image-segmentation", model=model_id) outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=0.999) # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9995, "label": "remote", "mask": {"hash": "d02404f578", "shape": (480, 640), "white_pixels": 2789}, }, { "score": 0.9994, "label": "cat", "mask": {"hash": "eaa115b40c", "shape": (480, 640), "white_pixels": 304411}, }, ], ) outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=0.5) for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9941, "label": "cat", "mask": {"hash": "9c0af87bd0", "shape": (480, 640), "white_pixels": 59185}, }, { "score": 0.9987, "label": "remote", "mask": {"hash": "c7870600d6", "shape": (480, 640), "white_pixels": 4182}, }, { "score": 0.9995, "label": "remote", "mask": {"hash": "ef899a25fd", "shape": (480, 640), "white_pixels": 2275}, }, { "score": 0.9722, "label": "couch", "mask": {"hash": "37b8446ac5", "shape": (480, 640), "white_pixels": 172380}, }, { "score": 0.9994, "label": "cat", "mask": {"hash": "6a09d3655e", "shape": (480, 640), "white_pixels": 52561}, }, ], ) @require_torch @slow def test_maskformer(self): threshold = 0.8 model_id = "facebook/maskformer-swin-base-ade" model = AutoModelForInstanceSegmentation.from_pretrained(model_id) image_processor = AutoImageProcessor.from_pretrained(model_id) image_segmenter = pipeline("image-segmentation", model=model, image_processor=image_processor) image = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") file = image[0]["file"] outputs = image_segmenter(file, threshold=threshold) # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9974, "label": "wall", "mask": {"hash": "a547b7c062", "shape": (512, 683), "white_pixels": 14252}, }, { "score": 0.949, "label": "house", "mask": {"hash": "0da9b7b38f", "shape": (512, 683), "white_pixels": 132177}, }, { "score": 0.9995, "label": "grass", "mask": {"hash": "1d07ea0a26", "shape": (512, 683), "white_pixels": 53444}, }, { "score": 0.9976, "label": "tree", "mask": {"hash": "6cdc97c7da", "shape": (512, 683), "white_pixels": 7944}, }, { "score": 0.8239, "label": "plant", "mask": {"hash": "1ab4ce378f", "shape": (512, 683), "white_pixels": 4136}, }, { "score": 0.9942, "label": "road, route", "mask": {"hash": "39c5d17be5", "shape": (512, 683), "white_pixels": 1941}, }, { "score": 1.0, "label": "sky", "mask": {"hash": "a3756324a6", "shape": (512, 683), "white_pixels": 135802}, }, ], ) @require_torch @slow def test_oneformer(self): image_segmenter = pipeline(model="shi-labs/oneformer_ade20k_swin_tiny") image = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") file = image[0]["file"] outputs = image_segmenter(file, threshold=0.99) # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9981, "label": "grass", "mask": {"hash": "3a92904d4c", "white_pixels": 118131, "shape": (512, 683)}, }, { "score": 0.9992, "label": "sky", "mask": {"hash": "fa2300cc9a", "white_pixels": 231565, "shape": (512, 683)}, }, ], ) # Different task outputs = image_segmenter(file, threshold=0.99, subtask="instance") # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9991, "label": "sky", "mask": {"hash": "8b1ffad016", "white_pixels": 230566, "shape": (512, 683)}, }, { "score": 0.9981, "label": "grass", "mask": {"hash": "9bbdf83d3d", "white_pixels": 119130, "shape": (512, 683)}, }, ], ) # Different task outputs = image_segmenter(file, subtask="semantic") # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": None, "label": "wall", "mask": {"hash": "897fb20b7f", "white_pixels": 14506, "shape": (512, 683)}, }, { "score": None, "label": "building", "mask": {"hash": "f2a68c63e4", "white_pixels": 125019, "shape": (512, 683)}, }, { "score": None, "label": "sky", "mask": {"hash": "e0ca3a548e", "white_pixels": 135330, "shape": (512, 683)}, }, { "score": None, "label": "tree", "mask": {"hash": "7c9544bcac", "white_pixels": 16263, "shape": (512, 683)}, }, { "score": None, "label": "road, route", "mask": {"hash": "2c7704e491", "white_pixels": 2143, "shape": (512, 683)}, }, { "score": None, "label": "grass", "mask": {"hash": "bf6c2867e0", "white_pixels": 53040, "shape": (512, 683)}, }, { "score": None, "label": "plant", "mask": {"hash": "93c4b7199e", "white_pixels": 3335, "shape": (512, 683)}, }, { "score": None, "label": "house", "mask": {"hash": "93ec419ad5", "white_pixels": 60, "shape": (512, 683)}, }, ], ) def test_save_load(self): model_id = "hf-internal-testing/tiny-detr-mobilenetsv3-panoptic" model = AutoModelForImageSegmentation.from_pretrained(model_id) image_processor = AutoImageProcessor.from_pretrained(model_id) image_segmenter = pipeline( task="image-segmentation", model=model, image_processor=image_processor, ) with tempfile.TemporaryDirectory() as tmpdirname: image_segmenter.save_pretrained(tmpdirname) pipeline(task="image-segmentation", model=tmpdirname)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_zero_shot_object_detection.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_vision @require_torch class ZeroShotObjectDetectionPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def get_test_pipeline(self, model, tokenizer, processor): object_detector = pipeline( "zero-shot-object-detection", model="hf-internal-testing/tiny-random-owlvit-object-detection" ) examples = [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] return object_detector, examples def run_pipeline_test(self, object_detector, examples): outputs = object_detector(examples[0], threshold=0.0) n = len(outputs) self.assertGreater(n, 0) self.assertEqual( outputs, [ { "score": ANY(float), "label": ANY(str), "box": {"xmin": ANY(int), "ymin": ANY(int), "xmax": ANY(int), "ymax": ANY(int)}, } for i in range(n) ], ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF") def test_small_model_tf(self): pass @require_torch def test_small_model_pt(self): object_detector = pipeline( "zero-shot-object-detection", model="hf-internal-testing/tiny-random-owlvit-object-detection" ) outputs = object_detector( "./tests/fixtures/tests_samples/COCO/000000039769.png", candidate_labels=["cat", "remote", "couch"], threshold=0.64, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, {"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, ], ) outputs = object_detector( [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ], threshold=0.64, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, {"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, ] ], ) @require_torch @slow def test_large_model_pt(self): object_detector = pipeline("zero-shot-object-detection") outputs = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg", candidate_labels=["cat", "remote", "couch"], ) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ], ) outputs = object_detector( [ { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, ], ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ], [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ], ], ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF") def test_large_model_tf(self): pass @require_torch @slow def test_threshold(self): threshold = 0.2 object_detector = pipeline("zero-shot-object-detection") outputs = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg", candidate_labels=["cat", "remote", "couch"], threshold=threshold, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, ], ) @require_torch @slow def test_top_k(self): top_k = 2 object_detector = pipeline("zero-shot-object-detection") outputs = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg", candidate_labels=["cat", "remote", "couch"], top_k=top_k, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, ], )
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_mask_generation.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from typing import Dict import numpy as np from huggingface_hub.utils import insecure_hashlib from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass def hashimage(image: Image) -> str: m = insecure_hashlib.md5(image.tobytes()) return m.hexdigest()[:10] def mask_to_test_readable(mask: Image) -> Dict: npimg = np.array(mask) shape = npimg.shape return {"hash": hashimage(mask), "shape": shape} @is_pipeline_test @require_vision @require_torch class MaskGenerationPipelineTests(unittest.TestCase): model_mapping = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items()) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) tf_model_mapping = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items()) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def get_test_pipeline(self, model, tokenizer, processor): image_segmenter = MaskGenerationPipeline(model=model, image_processor=processor) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] # TODO: Implement me @Arthur def run_pipeline_test(self, mask_generator, examples): pass @require_tf @unittest.skip("Image segmentation not implemented in TF") def test_small_model_tf(self): pass @slow @require_torch def test_small_model_pt(self): image_segmenter = pipeline("mask-generation", model="facebook/sam-vit-huge") outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", points_per_batch=256) # Shortening by hashing new_outupt = [] for i, o in enumerate(outputs["masks"]): new_outupt += [{"mask": mask_to_test_readable(o), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(new_outupt, decimals=4), [ {'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0444}, {'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.021}, {'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0167}, {'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0132}, {'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0053}, {'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9967}, {'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.993}, {'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9909}, {'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9879}, {'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9834}, {'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9716}, {'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9612}, {'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9599}, {'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9552}, {'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9532}, {'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9516}, {'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9499}, {'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9483}, {'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9464}, {'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.943}, {'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.943}, {'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9408}, {'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9335}, {'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9326}, {'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9262}, {'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8999}, {'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8986}, {'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8984}, {'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8873}, {'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8871} ], ) # fmt: on @require_torch @slow def test_threshold(self): model_id = "facebook/sam-vit-huge" image_segmenter = pipeline("mask-generation", model=model_id) outputs = image_segmenter( "http://images.cocodataset.org/val2017/000000039769.jpg", pred_iou_thresh=1, points_per_batch=256 ) # Shortening by hashing new_outupt = [] for i, o in enumerate(outputs["masks"]): new_outupt += [{"mask": mask_to_test_readable(o), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(new_outupt, decimals=4), [ {"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0444}, {"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0210}, {"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0167}, {"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0132}, {"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0053}, ], )
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_image_classification.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, PreTrainedTokenizerBase, is_vision_available, ) from transformers.pipelines import ImageClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torch_or_tf, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_torch_or_tf @require_vision class ImageClassificationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING def get_test_pipeline(self, model, tokenizer, processor): image_classifier = ImageClassificationPipeline(model=model, image_processor=processor, top_k=2) examples = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", ] return image_classifier, examples def run_pipeline_test(self, image_classifier, examples): outputs = image_classifier("./tests/fixtures/tests_samples/COCO/000000039769.png") self.assertEqual( outputs, [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], ) import datasets # we use revision="refs/pr/1" until the PR is merged # https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1 dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1") # Accepts URL + PIL.Image + lists outputs = image_classifier( [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["image"], # LA dataset[1]["image"], # L dataset[2]["image"], ] ) self.assertEqual( outputs, [ [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], ], ) @require_torch def test_small_model_pt(self): small_model = "hf-internal-testing/tiny-random-vit" image_classifier = pipeline("image-classification", model=small_model) outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ) outputs = image_classifier( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], top_k=2, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ], ) @require_tf def test_small_model_tf(self): small_model = "hf-internal-testing/tiny-random-vit" image_classifier = pipeline("image-classification", model=small_model, framework="tf") outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ) outputs = image_classifier( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], top_k=2, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ], ) def test_custom_tokenizer(self): tokenizer = PreTrainedTokenizerBase() # Assert that the pipeline can be initialized with a feature extractor that is not in any mapping image_classifier = pipeline( "image-classification", model="hf-internal-testing/tiny-random-vit", tokenizer=tokenizer ) self.assertIs(image_classifier.tokenizer, tokenizer) @slow @require_torch def test_perceiver(self): # Perceiver is not tested by `run_pipeline_test` properly. # That is because the type of feature_extractor and model preprocessor need to be kept # in sync, which is not the case in the current design image_classifier = pipeline("image-classification", model="deepmind/vision-perceiver-conv") outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.4385, "label": "tabby, tabby cat"}, {"score": 0.321, "label": "tiger cat"}, {"score": 0.0502, "label": "Egyptian cat"}, {"score": 0.0137, "label": "crib, cot"}, {"score": 0.007, "label": "radiator"}, ], ) image_classifier = pipeline("image-classification", model="deepmind/vision-perceiver-fourier") outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.5658, "label": "tabby, tabby cat"}, {"score": 0.1309, "label": "tiger cat"}, {"score": 0.0722, "label": "Egyptian cat"}, {"score": 0.0707, "label": "remote control, remote"}, {"score": 0.0082, "label": "computer keyboard, keypad"}, ], ) image_classifier = pipeline("image-classification", model="deepmind/vision-perceiver-learned") outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.3022, "label": "tabby, tabby cat"}, {"score": 0.2362, "label": "Egyptian cat"}, {"score": 0.1856, "label": "tiger cat"}, {"score": 0.0324, "label": "remote control, remote"}, {"score": 0.0096, "label": "quilt, comforter, comfort, puff"}, ], ) @slow @require_torch def test_multilabel_classification(self): small_model = "hf-internal-testing/tiny-random-vit" # Sigmoid is applied for multi-label classification image_classifier = pipeline("image-classification", model=small_model) image_classifier.model.config.problem_type = "multi_label_classification" outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [{"label": "LABEL_1", "score": 0.5356}, {"label": "LABEL_0", "score": 0.4612}], ) outputs = image_classifier( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [{"label": "LABEL_1", "score": 0.5356}, {"label": "LABEL_0", "score": 0.4612}], [{"label": "LABEL_1", "score": 0.5356}, {"label": "LABEL_0", "score": 0.4612}], ], ) @slow @require_torch def test_function_to_apply(self): small_model = "hf-internal-testing/tiny-random-vit" # Sigmoid is applied for multi-label classification image_classifier = pipeline("image-classification", model=small_model) outputs = image_classifier( "http://images.cocodataset.org/val2017/000000039769.jpg", function_to_apply="sigmoid", ) self.assertEqual( nested_simplify(outputs, decimals=4), [{"label": "LABEL_1", "score": 0.5356}, {"label": "LABEL_0", "score": 0.4612}], )
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/benchmark/test_benchmark_tf.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class TFBenchmarkTest(unittest.TestCase): def check_results_dict_not_empty(self, results): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["bs"], model_result["ss"]): result = model_result["result"][batch_size][sequence_length] self.assertIsNotNone(result) def test_inference_no_configs_eager(self): MODEL_ID = "sshleifer/tiny-gpt2" benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], eager_mode=True, multi_process=False, ) benchmark = TensorFlowBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_inference_no_configs_only_pretrain(self): MODEL_ID = "sgugger/tiny-distilbert-classification" benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, only_pretrain_model=True, ) benchmark = TensorFlowBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_inference_no_configs_graph(self): MODEL_ID = "sshleifer/tiny-gpt2" benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = TensorFlowBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_inference_with_configs_eager(self): MODEL_ID = "sshleifer/tiny-gpt2" config = AutoConfig.from_pretrained(MODEL_ID) benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], eager_mode=True, multi_process=False, ) benchmark = TensorFlowBenchmark(benchmark_args, [config]) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_inference_with_configs_graph(self): MODEL_ID = "sshleifer/tiny-gpt2" config = AutoConfig.from_pretrained(MODEL_ID) benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = TensorFlowBenchmark(benchmark_args, [config]) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_train_no_configs(self): MODEL_ID = "sshleifer/tiny-gpt2" benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=True, inference=False, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = TensorFlowBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def test_train_with_configs(self): MODEL_ID = "sshleifer/tiny-gpt2" config = AutoConfig.from_pretrained(MODEL_ID) benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=True, inference=False, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = TensorFlowBenchmark(benchmark_args, [config]) results = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def test_inference_encoder_decoder_with_configs(self): MODEL_ID = "patrickvonplaten/t5-tiny-random" config = AutoConfig.from_pretrained(MODEL_ID) benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = TensorFlowBenchmark(benchmark_args, configs=[config]) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU")) == 0, "Cannot do xla on CPU.") def test_inference_no_configs_xla(self): MODEL_ID = "sshleifer/tiny-gpt2" benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], use_xla=True, multi_process=False, ) benchmark = TensorFlowBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_save_csv_files(self): MODEL_ID = "sshleifer/tiny-gpt2" with tempfile.TemporaryDirectory() as tmp_dir: benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], inference=True, save_to_csv=True, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(tmp_dir, "inf_time.csv"), inference_memory_csv_file=os.path.join(tmp_dir, "inf_mem.csv"), env_info_csv_file=os.path.join(tmp_dir, "env.csv"), multi_process=False, ) benchmark = TensorFlowBenchmark(benchmark_args) benchmark.run() self.assertTrue(Path(os.path.join(tmp_dir, "inf_time.csv")).exists()) self.assertTrue(Path(os.path.join(tmp_dir, "inf_mem.csv")).exists()) self.assertTrue(Path(os.path.join(tmp_dir, "env.csv")).exists()) def test_trace_memory(self): MODEL_ID = "sshleifer/tiny-gpt2" def _check_summary_is_not_empty(summary): self.assertTrue(hasattr(summary, "sequential")) self.assertTrue(hasattr(summary, "cumulative")) self.assertTrue(hasattr(summary, "current")) self.assertTrue(hasattr(summary, "total")) with tempfile.TemporaryDirectory() as tmp_dir: benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], inference=True, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(tmp_dir, "log.txt"), log_print=True, trace_memory_line_by_line=True, eager_mode=True, multi_process=False, ) benchmark = TensorFlowBenchmark(benchmark_args) result = benchmark.run() _check_summary_is_not_empty(result.inference_summary) self.assertTrue(Path(os.path.join(tmp_dir, "log.txt")).exists())
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/benchmark/test_benchmark.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class BenchmarkTest(unittest.TestCase): def check_results_dict_not_empty(self, results): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["bs"], model_result["ss"]): result = model_result["result"][batch_size][sequence_length] self.assertIsNotNone(result) def test_inference_no_configs(self): MODEL_ID = "sshleifer/tiny-gpt2" benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_inference_no_configs_only_pretrain(self): MODEL_ID = "sgugger/tiny-distilbert-classification" benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, only_pretrain_model=True, ) benchmark = PyTorchBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_inference_torchscript(self): MODEL_ID = "sshleifer/tiny-gpt2" benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, torchscript=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) @unittest.skipIf(torch_device == "cpu", "Cant do half precision") def test_inference_fp16(self): MODEL_ID = "sshleifer/tiny-gpt2" benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, fp16=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_inference_no_model_no_architectures(self): MODEL_ID = "sshleifer/tiny-gpt2" config = AutoConfig.from_pretrained(MODEL_ID) # set architectures equal to `None` config.architectures = None benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=True, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args, configs=[config]) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_train_no_configs(self): MODEL_ID = "sshleifer/tiny-gpt2" benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=True, inference=False, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) @unittest.skipIf(torch_device == "cpu", "Can't do half precision") def test_train_no_configs_fp16(self): MODEL_ID = "sshleifer/tiny-gpt2" benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=True, inference=False, sequence_lengths=[8], batch_sizes=[1], fp16=True, multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def test_inference_with_configs(self): MODEL_ID = "sshleifer/tiny-gpt2" config = AutoConfig.from_pretrained(MODEL_ID) benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args, configs=[config]) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_inference_encoder_decoder_with_configs(self): MODEL_ID = "sshleifer/tinier_bart" config = AutoConfig.from_pretrained(MODEL_ID) benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args, configs=[config]) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_train_with_configs(self): MODEL_ID = "sshleifer/tiny-gpt2" config = AutoConfig.from_pretrained(MODEL_ID) benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=True, inference=False, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args, configs=[config]) results = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def test_train_encoder_decoder_with_configs(self): MODEL_ID = "sshleifer/tinier_bart" config = AutoConfig.from_pretrained(MODEL_ID) benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=True, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args, configs=[config]) results = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def test_save_csv_files(self): MODEL_ID = "sshleifer/tiny-gpt2" with tempfile.TemporaryDirectory() as tmp_dir: benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=True, inference=True, save_to_csv=True, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(tmp_dir, "inf_time.csv"), train_memory_csv_file=os.path.join(tmp_dir, "train_mem.csv"), inference_memory_csv_file=os.path.join(tmp_dir, "inf_mem.csv"), train_time_csv_file=os.path.join(tmp_dir, "train_time.csv"), env_info_csv_file=os.path.join(tmp_dir, "env.csv"), multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args) benchmark.run() self.assertTrue(Path(os.path.join(tmp_dir, "inf_time.csv")).exists()) self.assertTrue(Path(os.path.join(tmp_dir, "train_time.csv")).exists()) self.assertTrue(Path(os.path.join(tmp_dir, "inf_mem.csv")).exists()) self.assertTrue(Path(os.path.join(tmp_dir, "train_mem.csv")).exists()) self.assertTrue(Path(os.path.join(tmp_dir, "env.csv")).exists()) def test_trace_memory(self): MODEL_ID = "sshleifer/tiny-gpt2" def _check_summary_is_not_empty(summary): self.assertTrue(hasattr(summary, "sequential")) self.assertTrue(hasattr(summary, "cumulative")) self.assertTrue(hasattr(summary, "current")) self.assertTrue(hasattr(summary, "total")) with tempfile.TemporaryDirectory() as tmp_dir: benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=True, inference=True, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(tmp_dir, "log.txt"), log_print=True, trace_memory_line_by_line=True, multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args) result = benchmark.run() _check_summary_is_not_empty(result.inference_summary) _check_summary_is_not_empty(result.train_summary) self.assertTrue(Path(os.path.join(tmp_dir, "log.txt")).exists())
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/nezha/test_modeling_nezha.py
# coding=utf-8 # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class NezhaModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=128, max_relative_position=32, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): """ Returns a tiny configuration by default. """ return NezhaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = NezhaModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = NezhaModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = NezhaForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_next_sequence_prediction( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = NezhaForNextSentencePrediction(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, 2)) def create_and_check_for_pretraining( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = NezhaForPreTraining(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, next_sentence_label=sequence_labels, ) self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = NezhaForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = NezhaForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = NezhaForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = NezhaForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class NezhaModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": NezhaModel, "fill-mask": NezhaForMaskedLM, "question-answering": NezhaForQuestionAnswering, "text-classification": NezhaForSequenceClassification, "token-classification": NezhaForTokenClassification, "zero-shot": NezhaForSequenceClassification, } if is_torch_available() else {} ) fx_compatible = True # special case for ForPreTraining model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) inputs_dict["next_sentence_label"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = NezhaModelTester(self) self.config_tester = ConfigTester(self, config_class=NezhaConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): # This regression test was failing with PyTorch < 1.3 ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_next_sequence_prediction(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = NezhaModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow @require_torch_gpu def test_torchscript_device_change(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return config.torchscript = True model = model_class(config=config) inputs_dict = self._prepare_for_class(inputs_dict, model_class) traced_model = torch.jit.trace( model, (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(traced_model, os.path.join(tmp, "bert.pt")) loaded = torch.jit.load(os.path.join(tmp, "bert.pt"), map_location=torch_device) loaded(inputs_dict["input_ids"].to(torch_device), inputs_dict["attention_mask"].to(torch_device)) @require_torch class NezhaModelIntegrationTest(unittest.TestCase): @slow def test_inference_nezha_model(self): model = NezhaModel.from_pretrained("sijunhe/nezha-cn-base") input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]]) attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1]]) with torch.no_grad(): output = model(input_ids, attention_mask=attention_mask)[0] expected_shape = torch.Size((1, 6, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]]) self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4)) @slow def test_inference_nezha_masked_lm(self): model = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base") input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]]) attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1]]) with torch.no_grad(): output = model(input_ids, attention_mask=attention_mask)[0] expected_shape = torch.Size((1, 6, 21128)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/clip/test_modeling_tf_clip.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the TensorFlow CLIP model. """ from __future__ import annotations import inspect import os import tempfile import unittest from importlib import import_module import requests from transformers import CLIPConfig, CLIPTextConfig, CLIPVisionConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCLIPModel, TFCLIPTextModel, TFCLIPVisionModel, TFSharedEmbeddings from transformers.models.clip.modeling_tf_clip import TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import CLIPProcessor class TFCLIPVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return CLIPVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = TFCLIPVisionModel(config=config) result = model(pixel_values, training=False) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFCLIPVisionModelTest(TFModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as CLIP does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (TFCLIPVisionModel,) if is_tf_available() else () test_pruning = False test_resize_embeddings = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFCLIPVisionModelTester(self) self.config_tester = ConfigTester(self, config_class=CLIPVisionConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_inputs_embeds(self): # CLIP does not use inputs_embeds pass def test_graph_mode_with_inputs_embeds(self): # CLIP does not use inputs_embeds pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, tf.keras.layers.Layer)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True # in CLIP, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) image_size = (self.model_tester.image_size, self.model_tester.image_size) patch_size = (self.model_tester.patch_size, self.model_tester.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 1 for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) # CLIP has a different seq_length image_size = (self.model_tester.image_size, self.model_tester.image_size) patch_size = (self.model_tester.patch_size, self.model_tester.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_length = num_patches + 1 self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) @slow def test_model_from_pretrained(self): for model_name in TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFCLIPVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow def test_saved_model_creation_extended(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True if hasattr(config, "use_cache"): config.use_cache = True # in CLIP, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) image_size = (self.model_tester.image_size, self.model_tester.image_size) patch_size = (self.model_tester.patch_size, self.model_tester.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 1 for model_class in self.all_model_classes: class_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) num_out = len(model(class_inputs_dict)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=True) saved_model_dir = os.path.join(tmpdirname, "saved_model", "1") model = tf.keras.models.load_model(saved_model_dir) outputs = model(class_inputs_dict) output_hidden_states = outputs["hidden_states"] output_attentions = outputs["attentions"] # Check num outputs self.assertEqual(len(outputs), num_out) # Check num layers expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(output_hidden_states), expected_num_layers) self.assertEqual(len(output_attentions), self.model_tester.num_hidden_layers) # Check attention outputs image_size = (self.model_tester.image_size, self.model_tester.image_size) patch_size = (self.model_tester.patch_size, self.model_tester.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 1 self.assertListEqual( list(output_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) # Check hidden states self.assertListEqual( list(output_hidden_states[0].shape[-2:]), [seq_len, self.model_tester.hidden_size], ) class TFCLIPTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) # make sure the first token has attention mask `1` to ensure that, after combining the causal mask, there # is still at least one token being attended to for each batch. # TODO: Change `random_attention_mask` in PT/TF/Flax common test file, after a discussion with the team. input_mask = tf.concat( [tf.ones_like(input_mask[:, :1], dtype=input_mask.dtype), input_mask[:, 1:]], axis=-1 ) config = self.get_config() return config, input_ids, input_mask def get_config(self): return CLIPTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, input_ids, input_mask): model = TFCLIPTextModel(config=config) result = model(input_ids, attention_mask=input_mask, training=False) result = model(input_ids, training=False) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFCLIPTextModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = (TFCLIPTextModel,) if is_tf_available() else () test_pruning = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFCLIPTextModelTester(self) self.config_tester = ConfigTester(self, config_class=CLIPTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_inputs_embeds(self): # CLIP does not use inputs_embeds pass @slow def test_model_from_pretrained(self): for model_name in TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFCLIPTextModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow def test_saved_model_creation_extended(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True if hasattr(config, "use_cache"): config.use_cache = True for model_class in self.all_model_classes: class_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) num_out = len(model(class_inputs_dict)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=True) saved_model_dir = os.path.join(tmpdirname, "saved_model", "1") model = tf.keras.models.load_model(saved_model_dir) outputs = model(class_inputs_dict) output_hidden_states = outputs["hidden_states"] output_attentions = outputs["attentions"] # Check number of outputs self.assertEqual(len(outputs), num_out) # Check number of layers expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) # Check hidden states self.assertEqual(len(output_hidden_states), expected_num_layers) self.assertListEqual( list(output_hidden_states[0].shape[-2:]), [self.model_tester.seq_length, self.model_tester.hidden_size], ) # Check attention outputs self.assertEqual(len(output_attentions), self.model_tester.num_hidden_layers) seq_length = self.model_tester.seq_length key_length = getattr(self.model_tester, "key_length", seq_length) self.assertListEqual( list(output_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_length, key_length], ) class TFCLIPModelTester: def __init__(self, parent, is_training=True): self.parent = parent self.text_model_tester = TFCLIPTextModelTester(parent) self.vision_model_tester = TFCLIPVisionModelTester(parent) self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return CLIPConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = TFCLIPModel(config) result = model(input_ids, pixel_values, attention_mask, training=False) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "return_loss": True, } return config, inputs_dict @require_tf class TFCLIPModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFCLIPModel,) if is_tf_available() else () pipeline_model_mapping = {"feature-extraction": TFCLIPModel} if is_tf_available() else {} test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False test_onnx = False def setUp(self): self.model_tester = TFCLIPModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) # hidden_states are tested in individual model tests def test_hidden_states_output(self): pass # input_embeds are tested in individual model tests def test_inputs_embeds(self): pass # CLIPModel does not have input/output embeddings def test_model_common_attributes(self): pass # overwrite from common since `TFCLIPModelTester` set `return_loss` to `True` and causes the preparation of # `symbolic_inputs` failed. def test_keras_save_load(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # remove `return_loss` to make code work if self.__class__.__name__ == "TFCLIPModelTest": inputs_dict.pop("return_loss", None) tf_main_layer_classes = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__),) for module_member_name in dir(module) if module_member_name.endswith("MainLayer") # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("MainLayer")] == model_class.__name__[: -len("Model")] for module_member in (getattr(module, module_member_name),) if isinstance(module_member, type) and tf.keras.layers.Layer in module_member.__bases__ and getattr(module_member, "_keras_serializable", False) } for main_layer_class in tf_main_layer_classes: # T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter if "T5" in main_layer_class.__name__: # Take the same values than in TFT5ModelTester for this shared layer shared = TFSharedEmbeddings(99, 32, name="shared") config.use_cache = inputs_dict.pop("use_cache", None) main_layer = main_layer_class(config, embed_tokens=shared) else: main_layer = main_layer_class(config) symbolic_inputs = { name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items() } model = tf.keras.Model(symbolic_inputs, outputs=main_layer(symbolic_inputs)) outputs = model(inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: filepath = os.path.join(tmpdirname, "keras_model.h5") model.save(filepath) if "T5" in main_layer_class.__name__: model = tf.keras.models.load_model( filepath, custom_objects={ main_layer_class.__name__: main_layer_class, "TFSharedEmbeddings": TFSharedEmbeddings, }, ) else: model = tf.keras.models.load_model( filepath, custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(model, tf.keras.Model) after_outputs = model(inputs_dict) self.assert_outputs_same(after_outputs, outputs) @slow def test_model_from_pretrained(self): for model_name in TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFCLIPModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip(reason="Currently `saved_model` doesn't work with nested outputs.") @slow def test_saved_model_creation(self): pass @unittest.skip(reason="Currently `saved_model` doesn't work with nested outputs.") @slow def test_saved_model_creation_extended(self): pass @unittest.skip(reason="`saved_model` doesn't work with nested outputs so no preparation happens.") @slow def test_prepare_serving_output(self): pass # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @require_vision @require_tf class TFCLIPModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "openai/clip-vit-base-patch32" model = TFCLIPModel.from_pretrained(model_name) processor = CLIPProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor( text=["a photo of a cat", "a photo of a dog"], images=image, padding=True, return_tensors="tf" ) outputs = model(**inputs, training=False) # verify the logits self.assertEqual( outputs.logits_per_image.shape, tf.TensorShape((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( outputs.logits_per_text.shape, tf.TensorShape((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) expected_logits = tf.constant([[24.5701, 19.3049]]) tf.debugging.assert_near(outputs.logits_per_image, expected_logits, atol=1e-3)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/clip/test_tokenization_clip.py
# coding=utf-8 # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import unittest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class CLIPTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = CLIPTokenizer rust_tokenizer_class = CLIPTokenizerFast test_rust_tokenizer = True from_pretrained_kwargs = {} test_seq2seq = False def setUp(self): super().setUp() vocab = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: skip vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"] self.special_tokens_map = {"unk_token": "<unk>"} self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) def get_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return CLIPTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_rust_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self, tokenizer): input_text = "lower newer" output_text = "lower newer" return input_text, output_text def test_full_tokenizer(self): tokenizer = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map) text = "lower newer" bpe_tokens = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"] tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + [tokenizer.unk_token] input_bpe_tokens = [10, 2, 16, 9, 3, 2, 16, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) @require_ftfy def test_check_encoding_slow_fast(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_s = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) text = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d." text_tokenized_s = tokenizer_s.tokenize(text) text_tokenized_r = tokenizer_r.tokenize(text) self.assertListEqual(text_tokenized_s, text_tokenized_r) # Test that the tokenization is identical on an example containing a character (Latin Small Letter A # with Tilde) encoded in 2 different ways text = "xa\u0303y" + " " + "x\xe3y" text_tokenized_s = tokenizer_s.tokenize(text) text_tokenized_r = tokenizer_r.tokenize(text) self.assertListEqual(text_tokenized_s, text_tokenized_r) # Test that the tokenization is identical on unicode of space type spaces_unicodes = [ "\u0009", # (horizontal tab, '\t') "\u000B", # (vertical tab) "\u000C", # (form feed) "\u0020", # (space, ' ') "\u200E", # (left-to-right mark):w "\u200F", # (right-to-left mark) ] for unicode_seq in spaces_unicodes: text_tokenized_s = tokenizer_s.tokenize(unicode_seq) text_tokenized_r = tokenizer_r.tokenize(unicode_seq) self.assertListEqual(text_tokenized_s, text_tokenized_r) # Test that the tokenization is identical on unicode of line break type line_break_unicodes = [ "\u000A", # (line feed, '\n') "\r\n", # (carriage return and line feed, '\r\n') "\u000D", # (carriage return, '\r') "\r", # (carriage return, '\r') "\u000D", # (carriage return, '\r') "\u2028", # (line separator) "\u2029", # (paragraph separator) # "\u0085", # (next line) ] # The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms # it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a # space (and thus into an empty list). for unicode_seq in line_break_unicodes: text_tokenized_s = tokenizer_s.tokenize(unicode_seq) text_tokenized_r = tokenizer_r.tokenize(unicode_seq) self.assertListEqual(text_tokenized_s, text_tokenized_r) def test_offsets_mapping_with_different_add_prefix_space_argument(self): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): text_of_1_token = "hello" # `hello` is a token in the vocabulary of `pretrained_name` text = f"{text_of_1_token} {text_of_1_token}" tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, use_fast=True, ) encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) self.assertEqual(encoding.offset_mapping[0], (0, len(text_of_1_token))) self.assertEqual( encoding.offset_mapping[1], (len(text_of_1_token) + 1, len(text_of_1_token) + 1 + len(text_of_1_token)), ) text = f" {text}" tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, use_fast=True, ) encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) self.assertEqual( encoding.offset_mapping[1], (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), ) def test_log_warning(self): # Test related to the breaking change introduced in transformers v4.17.0 # We need to check that an error in raised when the user try to load a previous version of the tokenizer. with self.assertRaises(ValueError) as context: self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer") self.assertTrue( context.exception.args[0].startswith( "The `backend_tokenizer` provided does not match the expected format." ) ) @require_ftfy def test_tokenization_python_rust_equals(self): super().test_tokenization_python_rust_equals() # overwrite common test def test_added_tokens_do_lower_case(self): # CLIP always lower cases letters pass
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/clip/test_image_processing_clip.py
# coding=utf-8 # Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_vision_available(): from transformers import CLIPImageProcessor class CLIPImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_center_crop=True, crop_size=None, do_normalize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], do_convert_rgb=True, ): size = size if size is not None else {"shortest_edge": 20} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_convert_rgb = do_convert_rgb def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def expected_output_image_shape(self, images): return self.num_channels, self.crop_size["height"], self.crop_size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class CLIPImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = CLIPImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = CLIPImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "center_crop")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_convert_rgb")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 20}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84})
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/clip/test_modeling_flax_clip.py
import inspect import tempfile import unittest import numpy as np import transformers from transformers import CLIPConfig, CLIPTextConfig, CLIPVisionConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.clip.modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPTextModel, FlaxCLIPTextModelWithProjection, FlaxCLIPVisionModel, ) if is_torch_available(): import torch class FlaxCLIPVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = CLIPVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) return config, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class FlaxCLIPVisionModelTest(FlaxModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as CLIP does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (FlaxCLIPVisionModel,) if is_flax_available() else () def setUp(self): self.model_tester = FlaxCLIPVisionModelTester(self) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_jit_compilation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def model_jitted(pixel_values, **kwargs): return model(pixel_values=pixel_values, **kwargs).to_tuple() with self.subTest("JIT Enabled"): jitted_outputs = model_jitted(**prepared_inputs_dict) with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = model_jitted(**prepared_inputs_dict) self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states self.assertEqual(len(hidden_states), self.model_tester.num_hidden_layers + 1) # CLIP has a different seq_length image_size = (self.model_tester.image_size, self.model_tester.image_size) patch_size = (self.model_tester.patch_size, self.model_tester.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_length = num_patches + 1 self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True # in CLIP, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) image_size = (self.model_tester.image_size, self.model_tester.image_size) patch_size = (self.model_tester.patch_size, self.model_tester.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_length = num_patches + 1 for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_length, seq_length], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_length, seq_length], ) # FlaxCLIPVisionModel does not have any base model def test_save_load_from_base(self): pass # FlaxCLIPVisionModel does not have any base model def test_save_load_to_base(self): pass # FlaxCLIPVisionModel does not have any base model @is_pt_flax_cross_test def test_save_load_from_base_pt(self): pass # FlaxCLIPVisionModel does not have any base model @is_pt_flax_cross_test def test_save_load_to_base_pt(self): pass # FlaxCLIPVisionModel does not have any base model @is_pt_flax_cross_test def test_save_load_bf16_to_base_pt(self): pass @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("openai/clip-vit-base-patch32", from_pt=True) outputs = model(np.ones((1, 3, 224, 224))) self.assertIsNotNone(outputs) class FlaxCLIPTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = CLIPTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) return config, input_ids, input_mask def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_flax class FlaxCLIPTextModelTest(FlaxModelTesterMixin, unittest.TestCase): all_model_classes = (FlaxCLIPTextModel, FlaxCLIPTextModelWithProjection) if is_flax_available() else () def setUp(self): self.model_tester = FlaxCLIPTextModelTester(self) # FlaxCLIPTextModel does not have any base model def test_save_load_from_base(self): pass # FlaxCLIPVisionModel does not have any base model def test_save_load_to_base(self): pass # FlaxCLIPVisionModel does not have any base model @is_pt_flax_cross_test def test_save_load_from_base_pt(self): pass # FlaxCLIPVisionModel does not have any base model @is_pt_flax_cross_test def test_save_load_to_base_pt(self): pass # FlaxCLIPVisionModel does not have any base model @is_pt_flax_cross_test def test_save_load_bf16_to_base_pt(self): pass @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("openai/clip-vit-base-patch32", from_pt=True) outputs = model(np.ones((1, 1))) self.assertIsNotNone(outputs) class FlaxCLIPModelTester: def __init__(self, parent, is_training=True): self.parent = parent self.text_model_tester = FlaxCLIPTextModelTester(parent) self.vision_model_tester = FlaxCLIPVisionModelTester(parent) self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = CLIPConfig.from_text_vision_configs(text_config, vision_config, projection_dim=64) return config, input_ids, attention_mask, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, } return config, inputs_dict @require_flax class FlaxCLIPModelTest(FlaxModelTesterMixin, unittest.TestCase): all_model_classes = (FlaxCLIPModel,) if is_flax_available() else () test_attention_outputs = False def setUp(self): self.model_tester = FlaxCLIPModelTester(self) # hidden_states are tested in individual model tests def test_hidden_states_output(self): pass def test_jit_compilation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def model_jitted(input_ids, pixel_values, **kwargs): return model(input_ids=input_ids, pixel_values=pixel_values, **kwargs).to_tuple() with self.subTest("JIT Enabled"): jitted_outputs = model_jitted(**prepared_inputs_dict) with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = model_jitted(**prepared_inputs_dict) self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs[:4], outputs[:4]): self.assertEqual(jitted_output.shape, output.shape) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_ids", "pixel_values", "attention_mask", "position_ids"] self.assertListEqual(arg_names[:4], expected_arg_names) def test_get_image_features(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = FlaxCLIPModel(config) @jax.jit def model_jitted(pixel_values): return model.get_image_features(pixel_values=pixel_values) with self.subTest("JIT Enabled"): jitted_output = model_jitted(inputs_dict["pixel_values"]) with self.subTest("JIT Disabled"): with jax.disable_jit(): output = model_jitted(inputs_dict["pixel_values"]) self.assertEqual(jitted_output.shape, output.shape) self.assertTrue(np.allclose(jitted_output, output, atol=1e-3)) def test_get_text_features(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = FlaxCLIPModel(config) @jax.jit def model_jitted(input_ids, attention_mask, **kwargs): return model.get_text_features(input_ids=input_ids, attention_mask=attention_mask) with self.subTest("JIT Enabled"): jitted_output = model_jitted(**inputs_dict) with self.subTest("JIT Disabled"): with jax.disable_jit(): output = model_jitted(**inputs_dict) self.assertEqual(jitted_output.shape, output.shape) self.assertTrue(np.allclose(jitted_output, output, atol=1e-3)) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("openai/clip-vit-base-patch32", from_pt=True) outputs = model(input_ids=np.ones((1, 1)), pixel_values=np.ones((1, 3, 224, 224))) self.assertIsNotNone(outputs) # overwrite from common since FlaxCLIPModel returns nested output # which is not supported in the common test @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() fx_model = model_class(config, dtype=jnp.float32) fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**prepared_inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = model_class.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**prepared_inputs_dict).to_tuple() self.assertEqual( len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output_loaded, pt_output.numpy(), 4e-2) # overwrite from common since FlaxCLIPModel returns nested output # which is not supported in the common test @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() fx_model = model_class(config, dtype=jnp.float32) pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**prepared_inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = pt_model_class.from_pretrained(tmpdirname, from_flax=True) with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple() self.assertEqual( len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs_loaded[:4]): self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2) # overwrite from common since FlaxCLIPModel returns nested output # which is not supported in the common test def test_from_pretrained_save_pretrained(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class.__name__ != "FlaxBertModel": continue with self.subTest(model_class.__name__): model = model_class(config) prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) outputs = model(**prepared_inputs_dict).to_tuple() # verify that normal save_pretrained works as expected with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_loaded = model_class.from_pretrained(tmpdirname) outputs_loaded = model_loaded(**prepared_inputs_dict).to_tuple()[:4] for output_loaded, output in zip(outputs_loaded, outputs): self.assert_almost_equals(output_loaded, output, 1e-3) # verify that save_pretrained for distributed training # with `params=params` works as expected with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, params=model.params) model_loaded = model_class.from_pretrained(tmpdirname) outputs_loaded = model_loaded(**prepared_inputs_dict).to_tuple()[:4] for output_loaded, output in zip(outputs_loaded, outputs): self.assert_almost_equals(output_loaded, output, 1e-3)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/clip/test_modeling_clip.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch CLIP model. """ import inspect import os import tempfile import unittest import numpy as np import requests import transformers from transformers import CLIPConfig, CLIPTextConfig, CLIPVisionConfig from transformers.testing_utils import ( is_flax_available, is_pt_flax_cross_test, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( CLIPModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) from transformers.models.clip.modeling_clip import CLIP_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import CLIPProcessor if is_flax_available(): import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) class CLIPVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return CLIPVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = CLIPVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_with_projection(self, config, pixel_values): model = CLIPVisionModelWithProjection(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.image_embeds.shape, (self.batch_size, self.projection_dim)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class CLIPVisionModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as CLIP does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (CLIPVisionModel, CLIPVisionModelWithProjection) if is_torch_available() else () fx_compatible = True test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = CLIPVisionModelTester(self) self.config_tester = ConfigTester(self, config_class=CLIPVisionConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="CLIP does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_projection(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_projection(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="CLIPVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="CLIPVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = CLIPVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow def test_model_with_projection_from_pretrained(self): for model_name in CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = CLIPVisionModelWithProjection.from_pretrained(model_name) self.assertIsNotNone(model) self.assertTrue(hasattr(model, "visual_projection")) class CLIPTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return CLIPTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, input_ids, input_mask): model = CLIPTextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_with_projection(self, config, input_ids, input_mask): model = CLIPTextModelWithProjection(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.text_embeds.shape, (self.batch_size, self.projection_dim)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class CLIPTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (CLIPTextModel, CLIPTextModelWithProjection) if is_torch_available() else () fx_compatible = True test_pruning = False test_head_masking = False model_split_percents = [0.5, 0.8, 0.9] def setUp(self): self.model_tester = CLIPTextModelTester(self) self.config_tester = ConfigTester(self, config_class=CLIPTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_projection(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_projection(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="CLIP does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="CLIPTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="CLIPTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = CLIPTextModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow def test_model_with_projection_from_pretrained(self): for model_name in CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = CLIPTextModelWithProjection.from_pretrained(model_name) self.assertIsNotNone(model) self.assertTrue(hasattr(model, "text_projection")) class CLIPModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = CLIPTextModelTester(parent, **text_kwargs) self.vision_model_tester = CLIPVisionModelTester(parent, **vision_kwargs) self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return CLIPConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = CLIPModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "return_loss": True, } return config, inputs_dict @require_torch class CLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (CLIPModel,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": CLIPModel} if is_torch_available() else {} fx_compatible = True test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False def setUp(self): self.model_tester = CLIPModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="CLIPModel does not have input/output embeddings") def test_model_common_attributes(self): pass # override as the `logit_scale` parameter initilization is different for CLIP def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # check if `logit_scale` is initilized as per the original implementation if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] # CLIP needs pixel_values traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save CLIPConfig and check if we can load CLIPVisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = CLIPVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save CLIPConfig and check if we can load CLIPTextConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = CLIPTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) # overwrite from common since FlaxCLIPModel returns nested output # which is not supported in the common test @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # load PyTorch class pt_model = model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): return fx_model_class = getattr(transformers, fx_model_class_name) # load Flax class fx_model = fx_model_class(config, dtype=jnp.float32) # make sure only flax inputs are forward that actually exist in function args fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() # prepare inputs pt_inputs = self._prepare_for_class(inputs_dict, model_class) # remove function args that don't exist in Flax pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() # convert inputs to Flax fx_inputs = {k: np.array(v.to("cpu")) for k, v in pt_inputs.items() if torch.is_tensor(v)} fx_outputs = fx_model(**fx_inputs).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = fx_model_class.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**fx_inputs).to_tuple() self.assertEqual( len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output_loaded, pt_output.numpy(), 4e-2) # overwrite from common since FlaxCLIPModel returns nested output # which is not supported in the common test @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # load corresponding PyTorch class pt_model = model_class(config).eval() # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): # no flax model exists for this class return fx_model_class = getattr(transformers, fx_model_class_name) # load Flax class fx_model = fx_model_class(config, dtype=jnp.float32) # make sure only flax inputs are forward that actually exist in function args fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) # make sure weights are tied in PyTorch pt_model.tie_weights() # prepare inputs pt_inputs = self._prepare_for_class(inputs_dict, model_class) # remove function args that don't exist in Flax pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_inputs = {k: np.array(v.to("cpu")) for k, v in pt_inputs.items() if torch.is_tensor(v)} fx_outputs = fx_model(**fx_inputs).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = model_class.from_pretrained(tmpdirname, from_flax=True) with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple() self.assertEqual( len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs_loaded[:4]): self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2) @slow def test_model_from_pretrained(self): for model_name in CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = CLIPModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @require_vision @require_torch class CLIPModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "openai/clip-vit-base-patch32" model = CLIPModel.from_pretrained(model_name).to(torch_device) processor = CLIPProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor( text=["a photo of a cat", "a photo of a dog"], images=image, padding=True, return_tensors="pt" ).to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits self.assertEqual( outputs.logits_per_image.shape, torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( outputs.logits_per_text.shape, torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) expected_logits = torch.tensor([[24.5701, 19.3049]], device=torch_device) self.assertTrue(torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3))
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/clip/test_processor_clip.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class CLIPProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() vocab = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: skip vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] self.special_tokens_map = {"unk_token": "<unk>"} self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) image_processor_map = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48145466, 0.4578275, 0.40821073], "image_std": [0.26862954, 0.26130258, 0.27577711], } self.image_processor_file = os.path.join(self.tmpdirname, IMAGE_PROCESSOR_NAME) with open(self.image_processor_file, "w", encoding="utf-8") as fp: json.dump(image_processor_map, fp) def get_tokenizer(self, **kwargs): return CLIPTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_rust_tokenizer(self, **kwargs): return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **kwargs) def get_image_processor(self, **kwargs): return CLIPImageProcessor.from_pretrained(self.tmpdirname, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def prepare_image_inputs(self): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. """ image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] return image_inputs def test_save_load_pretrained_default(self): tokenizer_slow = self.get_tokenizer() tokenizer_fast = self.get_rust_tokenizer() image_processor = self.get_image_processor() processor_slow = CLIPProcessor(tokenizer=tokenizer_slow, image_processor=image_processor) processor_slow.save_pretrained(self.tmpdirname) processor_slow = CLIPProcessor.from_pretrained(self.tmpdirname, use_fast=False) processor_fast = CLIPProcessor(tokenizer=tokenizer_fast, image_processor=image_processor) processor_fast.save_pretrained(self.tmpdirname) processor_fast = CLIPProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer, CLIPTokenizer) self.assertIsInstance(processor_fast.tokenizer, CLIPTokenizerFast) self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string()) self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string()) self.assertIsInstance(processor_slow.image_processor, CLIPImageProcessor) self.assertIsInstance(processor_fast.image_processor, CLIPImageProcessor) def test_save_load_pretrained_additional_features(self): processor = CLIPProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = CLIPProcessor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, CLIPTokenizerFast) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, CLIPImageProcessor) def test_image_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = CLIPProcessor(tokenizer=tokenizer, image_processor=image_processor) image_input = self.prepare_image_inputs() input_image_proc = image_processor(image_input, return_tensors="np") input_processor = processor(images=image_input, return_tensors="np") for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = CLIPProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = CLIPProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"]) # test if it raises when no input is passed with pytest.raises(ValueError): processor() def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = CLIPProcessor(tokenizer=tokenizer, image_processor=image_processor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_model_input_names(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = CLIPProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), processor.model_input_names)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/rwkv/test_modeling_rwkv.py
# coding=utf-8 # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from unittest.util import safe_repr from transformers import AutoTokenizer, RwkvConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( RWKV_PRETRAINED_MODEL_ARCHIVE_LIST, RwkvForCausalLM, RwkvModel, ) from transformers.pytorch_utils import is_torch_greater_or_equal_than_2_0 else: is_torch_greater_or_equal_than_2_0 = False class RwkvModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_token_type_ids=False, use_input_mask=True, use_labels=True, use_mc_token_ids=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_token_type_ids = use_token_type_ids self.use_input_mask = use_input_mask self.use_labels = use_labels self.use_mc_token_ids = use_mc_token_ids self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.bos_token_id = vocab_size - 1 self.eos_token_id = vocab_size - 1 self.pad_token_id = vocab_size - 1 def get_large_model_config(self): return RwkvConfig.from_pretrained("sgugger/rwkv-4-pile-7b") def prepare_config_and_inputs( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) mc_token_ids = None if self.use_mc_token_ids: mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config( gradient_checkpointing=gradient_checkpointing, scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx, reorder_and_upcast_attn=reorder_and_upcast_attn, ) return ( config, input_ids, input_mask, None, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def get_config( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): return RwkvConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, intermediate_size=self.intermediate_size, activation_function=self.hidden_act, resid_pdrop=self.hidden_dropout_prob, attn_pdrop=self.attention_probs_dropout_prob, n_positions=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, gradient_checkpointing=gradient_checkpointing, scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx, reorder_and_upcast_attn=reorder_and_upcast_attn, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 return config def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, input_mask, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_rwkv_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): config.output_hidden_states = True model = RwkvModel(config=config) model.to(torch_device) model.eval() result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(len(result.hidden_states), config.num_hidden_layers + 1) def create_and_check_causl_lm(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = RwkvForCausalLM(config) model.to(torch_device) model.eval() result = model(input_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_state_equivalency(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = RwkvModel(config=config) model.to(torch_device) model.eval() outputs = model(input_ids) output_whole = outputs.last_hidden_state outputs = model(input_ids[:, :2]) output_one = outputs.last_hidden_state # Using the state computed on the first inputs, we will get the same output outputs = model(input_ids[:, 2:], state=outputs.state) output_two = outputs.last_hidden_state self.parent.assertTrue(torch.allclose(torch.cat([output_one, output_two], dim=1), output_whole, atol=1e-5)) def create_and_check_forward_and_backwards( self, config, input_ids, input_mask, head_mask, token_type_ids, *args, gradient_checkpointing=False ): model = RwkvForCausalLM(config) model.to(torch_device) if gradient_checkpointing: model.gradient_checkpointing_enable() result = model(input_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) result.loss.backward() def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids} return config, inputs_dict @unittest.skipIf( not is_torch_greater_or_equal_than_2_0, reason="See https://github.com/huggingface/transformers/pull/24204" ) @require_torch class RwkvModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (RwkvModel, RwkvForCausalLM) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": RwkvModel, "text-generation": RwkvForCausalLM} if is_torch_available() else {} ) # all_generative_model_classes = (RwkvForCausalLM,) if is_torch_available() else () fx_compatible = False test_missing_keys = False test_model_parallel = False test_pruning = False test_head_masking = False # Rwkv does not support head masking def setUp(self): self.model_tester = RwkvModelTester(self) self.config_tester = ConfigTester( self, config_class=RwkvConfig, n_embd=37, common_properties=["hidden_size", "num_hidden_layers"] ) def assertInterval(self, member, container, msg=None): r""" Simple utility function to check if a member is inside an interval. """ if isinstance(member, torch.Tensor): max_value, min_value = member.max().item(), member.min().item() elif isinstance(member, list) or isinstance(member, tuple): max_value, min_value = max(member), min(member) if not isinstance(container, list): raise TypeError("container should be a list or tuple") elif len(container) != 2: raise ValueError("container should have 2 elements") expected_min, expected_max = container is_inside_interval = (min_value >= expected_min) and (max_value <= expected_max) if not is_inside_interval: standardMsg = "%s not found in %s" % (safe_repr(member), safe_repr(container)) self.fail(self._formatMessage(msg, standardMsg)) def test_config(self): self.config_tester.run_common_tests() def test_rwkv_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_rwkv_model(*config_and_inputs) def test_rwkv_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causl_lm(*config_and_inputs) def test_state_equivalency(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_state_equivalency(*config_and_inputs) def test_initialization(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config=config) for name, param in model.named_parameters(): if "time_decay" in name: if param.requires_grad: self.assertTrue(param.data.max().item() == 3.0) self.assertTrue(param.data.min().item() == -5.0) elif "time_first" in name: if param.requires_grad: # check if it's a ones like self.assertTrue(torch.allclose(param.data, torch.ones_like(param.data), atol=1e-5, rtol=1e-5)) elif any(x in name for x in ["time_mix_key", "time_mix_receptance"]): if param.requires_grad: self.assertInterval( param.data, [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) elif "time_mix_value" in name: if param.requires_grad: self.assertInterval( param.data, [0.0, 1.3], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_attention_outputs(self): r""" Overriding the test_attention_outputs test as the attention outputs of Rwkv are different from other models it has a shape `batch_size, seq_len, hidden_size`. """ config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) batch_size = inputs["input_ids"].shape[0] with torch.no_grad(): outputs = model(**inputs) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) batch_size = inputs["input_ids"].shape[0] with torch.no_grad(): outputs = model(**inputs) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [batch_size, seq_len, config.hidden_size], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) batch_size = inputs["input_ids"].shape[0] with torch.no_grad(): outputs = model(**inputs) added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [batch_size, seq_len, config.hidden_size], ) @slow def test_model_from_pretrained(self): for model_name in RWKV_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = RwkvModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skipIf( not is_torch_greater_or_equal_than_2_0, reason="See https://github.com/huggingface/transformers/pull/24204" ) @slow class RWKVIntegrationTests(unittest.TestCase): def setUp(self): self.model_id = "RWKV/rwkv-4-169m-pile" self.tokenizer = AutoTokenizer.from_pretrained(self.model_id) def test_simple_generate(self): expected_output = "Hello my name is Jasmine and I am a newbie to the" model = RwkvForCausalLM.from_pretrained(self.model_id).to(torch_device) input_ids = self.tokenizer("Hello my name is", return_tensors="pt").input_ids.to(torch_device) output = model.generate(input_ids, max_new_tokens=10) output_sentence = self.tokenizer.decode(output[0].tolist()) self.assertEqual(output_sentence, expected_output) def test_simple_generate_bf16(self): expected_output = "Hello my name is Jasmine and I am a newbie to the" input_ids = self.tokenizer("Hello my name is", return_tensors="pt").input_ids.to(torch_device) model = RwkvForCausalLM.from_pretrained(self.model_id, torch_dtype=torch.bfloat16).to(torch_device) output = model.generate(input_ids, max_new_tokens=10) output_sentence = self.tokenizer.decode(output[0].tolist()) self.assertEqual(output_sentence, expected_output)
0