text
stringlengths
7
328k
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
459
# coding=utf-8 # Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_vision_available(): from transformers import ChineseCLIPImageProcessor if is_torch_available(): pass class ChineseCLIPImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_center_crop=True, crop_size=None, do_normalize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], do_convert_rgb=True, ): size = size if size is not None else {"height": 224, "width": 224} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_convert_rgb = do_convert_rgb def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def expected_output_image_shape(self, images): return 3, self.crop_size["height"], self.crop_size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class ChineseCLIPImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = ChineseCLIPImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = ChineseCLIPImageProcessingTester(self, do_center_crop=True) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "center_crop")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_convert_rgb")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 224, "width": 224}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) @unittest.skip("ChineseCLIPImageProcessor doesn't treat 4 channel PIL and numpy consistently yet") # FIXME Amy def test_call_numpy_4_channels(self): pass @require_torch @require_vision class ChineseCLIPImageProcessingTestFourChannels(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = ChineseCLIPImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = ChineseCLIPImageProcessingTester(self, num_channels=4, do_center_crop=True) self.expected_encoded_image_num_channels = 3 @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "center_crop")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_convert_rgb")) @unittest.skip("ChineseCLIPImageProcessor does not support 4 channels yet") # FIXME Amy def test_call_numpy(self): return super().test_call_numpy() @unittest.skip("ChineseCLIPImageProcessor does not support 4 channels yet") # FIXME Amy def test_call_pytorch(self): return super().test_call_torch() @unittest.skip("ChineseCLIPImageProcessor doesn't treat 4 channel PIL and numpy consistently yet") # FIXME Amy def test_call_numpy_4_channels(self): pass
transformers/tests/models/chinese_clip/test_image_processing_chinese_clip.py/0
{ "file_path": "transformers/tests/models/chinese_clip/test_image_processing_chinese_clip.py", "repo_id": "transformers", "token_count": 2634 }
386
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class CLIPSegProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() vocab = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: skip vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] self.special_tokens_map = {"unk_token": "<unk>"} self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) image_processor_map = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48145466, 0.4578275, 0.40821073], "image_std": [0.26862954, 0.26130258, 0.27577711], } self.image_processor_file = os.path.join(self.tmpdirname, IMAGE_PROCESSOR_NAME) with open(self.image_processor_file, "w", encoding="utf-8") as fp: json.dump(image_processor_map, fp) def get_tokenizer(self, **kwargs): return CLIPTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_rust_tokenizer(self, **kwargs): return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **kwargs) def get_image_processor(self, **kwargs): return ViTImageProcessor.from_pretrained(self.tmpdirname, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def prepare_image_inputs(self): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True.""" image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] return image_inputs def test_save_load_pretrained_default(self): tokenizer_slow = self.get_tokenizer() tokenizer_fast = self.get_rust_tokenizer() image_processor = self.get_image_processor() processor_slow = CLIPSegProcessor(tokenizer=tokenizer_slow, image_processor=image_processor) processor_slow.save_pretrained(self.tmpdirname) processor_slow = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=False) processor_fast = CLIPSegProcessor(tokenizer=tokenizer_fast, image_processor=image_processor) processor_fast.save_pretrained(self.tmpdirname) processor_fast = CLIPSegProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer, CLIPTokenizer) self.assertIsInstance(processor_fast.tokenizer, CLIPTokenizerFast) self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string()) self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string()) self.assertIsInstance(processor_slow.image_processor, ViTImageProcessor) self.assertIsInstance(processor_fast.image_processor, ViTImageProcessor) def test_save_load_pretrained_additional_features(self): processor = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = CLIPSegProcessor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, CLIPTokenizerFast) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, ViTImageProcessor) def test_image_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = CLIPSegProcessor(tokenizer=tokenizer, image_processor=image_processor) image_input = self.prepare_image_inputs() input_feat_extract = image_processor(image_input, return_tensors="np") input_processor = processor(images=image_input, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = CLIPSegProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_processor_text(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = CLIPSegProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"]) # test if it raises when no input is passed with pytest.raises(ValueError): processor() def test_processor_visual_prompt(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = CLIPSegProcessor(tokenizer=tokenizer, image_processor=image_processor) image_input = self.prepare_image_inputs() visual_prompt_input = self.prepare_image_inputs() inputs = processor(images=image_input, visual_prompt=visual_prompt_input) self.assertListEqual(list(inputs.keys()), ["pixel_values", "conditional_pixel_values"]) # test if it raises when no input is passed with pytest.raises(ValueError): processor() def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = CLIPSegProcessor(tokenizer=tokenizer, image_processor=image_processor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor)
transformers/tests/models/clipseg/test_processor_clipseg.py/0
{ "file_path": "transformers/tests/models/clipseg/test_processor_clipseg.py", "repo_id": "transformers", "token_count": 3413 }
387
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Conditional DETR model. """ import inspect import math import unittest from transformers import ConditionalDetrConfig, ResNetConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_timm, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ) if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class ConditionalDetrModelTester: def __init__( self, parent, batch_size=8, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=8, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, num_queries=12, num_channels=3, min_size=200, max_size=200, n_targets=8, num_labels=91, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_queries = num_queries self.num_channels = num_channels self.min_size = min_size self.max_size = max_size self.n_targets = n_targets self.num_labels = num_labels # we also set the expected seq length for both encoder and decoder self.encoder_seq_length = math.ceil(self.min_size / 32) * math.ceil(self.max_size / 32) self.decoder_seq_length = self.num_queries def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]) pixel_mask = torch.ones([self.batch_size, self.min_size, self.max_size], device=torch_device) labels = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) labels = [] for i in range(self.batch_size): target = {} target["class_labels"] = torch.randint( high=self.num_labels, size=(self.n_targets,), device=torch_device ) target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device) target["masks"] = torch.rand(self.n_targets, self.min_size, self.max_size, device=torch_device) labels.append(target) config = self.get_config() return config, pixel_values, pixel_mask, labels def get_config(self): resnet_config = ResNetConfig( num_channels=3, embeddings_size=10, hidden_sizes=[10, 20, 30, 40], depths=[1, 1, 2, 1], hidden_act="relu", num_labels=3, out_features=["stage2", "stage3", "stage4"], out_indices=[2, 3, 4], ) return ConditionalDetrConfig( d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, num_queries=self.num_queries, num_labels=self.num_labels, use_timm_backbone=False, backbone_config=resnet_config, backbone=None, use_pretrained_backbone=False, ) def prepare_config_and_inputs_for_common(self): config, pixel_values, pixel_mask, labels = self.prepare_config_and_inputs() inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def create_and_check_conditional_detr_model(self, config, pixel_values, pixel_mask, labels): model = ConditionalDetrModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.decoder_seq_length, self.hidden_size) ) def create_and_check_conditional_detr_object_detection_head_model(self, config, pixel_values, pixel_mask, labels): model = ConditionalDetrForObjectDetection(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) @require_torch class ConditionalDetrModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( ConditionalDetrModel, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"image-feature-extraction": ConditionalDetrModel, "object-detection": ConditionalDetrForObjectDetection} if is_torch_available() else {} ) is_encoder_decoder = True test_torchscript = False test_pruning = False test_head_masking = False test_missing_keys = False zero_init_hidden_state = True # special case for head models def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ in ["ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation"]: labels = [] for i in range(self.model_tester.batch_size): target = {} target["class_labels"] = torch.ones( size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long ) target["boxes"] = torch.ones( self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float ) target["masks"] = torch.ones( self.model_tester.n_targets, self.model_tester.min_size, self.model_tester.max_size, device=torch_device, dtype=torch.float, ) labels.append(target) inputs_dict["labels"] = labels return inputs_dict def setUp(self): self.model_tester = ConditionalDetrModelTester(self) self.config_tester = ConfigTester(self, config_class=ConditionalDetrConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() def test_conditional_detr_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_conditional_detr_model(*config_and_inputs) def test_conditional_detr_object_detection_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_conditional_detr_object_detection_head_model(*config_and_inputs) # TODO: check if this works again for PyTorch 2.x.y @unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0.") def test_multi_gpu_data_parallel_forward(self): pass @unittest.skip(reason="Conditional DETR does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Conditional DETR does not have a get_input_embeddings method") def test_model_common_attributes(self): pass @unittest.skip(reason="Conditional DETR is not a generative model") def test_generate_without_input_ids(self): pass @unittest.skip(reason="Conditional DETR does not use token embeddings") def test_resize_tokens_embeddings(self): pass @slow def test_model_outputs_equivalence(self): # TODO Niels: fix me! pass def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True decoder_seq_length = self.model_tester.decoder_seq_length encoder_seq_length = self.model_tester.encoder_seq_length decoder_key_length = self.model_tester.decoder_seq_length encoder_key_length = self.model_tester.encoder_seq_length for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 6 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning # Object Detection model returns pred_logits and pred_boxes if model_class.__name__ == "ConditionalDetrForObjectDetection": correct_outlen += 1 # Panoptic Segmentation model returns pred_logits, pred_boxes, pred_masks if model_class.__name__ == "ConditionalDetrForSegmentation": correct_outlen += 2 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_retain_grad_hidden_states_attentions(self): # removed retain_grad and grad on decoder_hidden_states, as queries don't require grad config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_attentions = outputs.encoder_attentions[0] encoder_hidden_states.retain_grad() encoder_attentions.retain_grad() decoder_attentions = outputs.decoder_attentions[0] decoder_attentions.retain_grad() cross_attentions = outputs.cross_attentions[0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(encoder_attentions.grad) self.assertIsNotNone(decoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) def test_forward_auxiliary_loss(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.auxiliary_loss = True # only test for object detection and segmentation model for model_class in self.all_model_classes[1:]: model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) outputs = model(**inputs) self.assertIsNotNone(outputs.auxiliary_outputs) self.assertEqual(len(outputs.auxiliary_outputs), self.model_tester.num_hidden_layers - 1) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = ["pixel_values", "pixel_mask"] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" in arg_names else [] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["pixel_values", "pixel_mask"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_different_timm_backbone(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # let's pick a random timm backbone config.backbone = "tf_mobilenetv3_small_075" config.use_timm_backbone = True for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if model_class.__name__ == "ConditionalDetrForObjectDetection": expected_shape = ( self.model_tester.batch_size, self.model_tester.num_queries, self.model_tester.num_labels, ) self.assertEqual(outputs.logits.shape, expected_shape) self.assertTrue(outputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) configs_no_init.init_xavier_std = 1e9 for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if "bbox_attention" in name and "bias" not in name: self.assertLess( 100000, abs(param.data.max().item()), msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) TOLERANCE = 1e-4 # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_timm @require_vision @slow class ConditionalDetrModelIntegrationTests(unittest.TestCase): @cached_property def default_image_processor(self): return ( ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50") if is_vision_available() else None ) def test_inference_no_head(self): model = ConditionalDetrModel.from_pretrained("microsoft/conditional-detr-resnet-50").to(torch_device) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**encoding) expected_shape = torch.Size((1, 300, 256)) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[0.4222, 0.7471, 0.8760], [0.6395, -0.2729, 0.7127], [-0.3090, 0.7642, 0.9529]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4)) def test_inference_object_detection_head(self): model = ConditionalDetrForObjectDetection.from_pretrained("microsoft/conditional-detr-resnet-50").to( torch_device ) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt").to(torch_device) pixel_values = encoding["pixel_values"].to(torch_device) pixel_mask = encoding["pixel_mask"].to(torch_device) with torch.no_grad(): outputs = model(pixel_values, pixel_mask) # verify logits + box predictions expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.num_labels)) self.assertEqual(outputs.logits.shape, expected_shape_logits) expected_slice_logits = torch.tensor( [[-10.4372, -5.7558, -8.6764], [-10.5410, -5.8704, -8.0590], [-10.6827, -6.3469, -8.3923]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice_logits, atol=1e-4)) expected_shape_boxes = torch.Size((1, model.config.num_queries, 4)) self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes) expected_slice_boxes = torch.tensor( [[0.7733, 0.6576, 0.4496], [0.5171, 0.1184, 0.9094], [0.8846, 0.5647, 0.2486]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4)) # verify postprocessing results = image_processor.post_process_object_detection( outputs, threshold=0.3, target_sizes=[image.size[::-1]] )[0] expected_scores = torch.tensor([0.8330, 0.8313, 0.8039, 0.6829, 0.5355]).to(torch_device) expected_labels = [75, 17, 17, 75, 63] expected_slice_boxes = torch.tensor([38.3089, 72.1022, 177.6293, 118.4512]).to(torch_device) self.assertEqual(len(results["scores"]), 5) self.assertTrue(torch.allclose(results["scores"], expected_scores, atol=1e-4)) self.assertSequenceEqual(results["labels"].tolist(), expected_labels) self.assertTrue(torch.allclose(results["boxes"][0, :], expected_slice_boxes))
transformers/tests/models/conditional_detr/test_modeling_conditional_detr.py/0
{ "file_path": "transformers/tests/models/conditional_detr/test_modeling_conditional_detr.py", "repo_id": "transformers", "token_count": 10930 }
388
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the TensorFlow EfficientFormer model. """ import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.modeling_tf_utils import keras from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class TFEfficientFormerModelTester: def __init__( self, parent, batch_size: int = 13, image_size: int = 64, patch_size: int = 2, embed_dim: int = 3, num_channels: int = 3, is_training: bool = True, use_labels: bool = True, hidden_size: int = 128, hidden_sizes=[16, 32, 64, 128], num_hidden_layers: int = 7, num_attention_heads: int = 4, intermediate_size: int = 37, hidden_act: str = "gelu", hidden_dropout_prob: float = 0.1, attention_probs_dropout_prob: float = 0.1, type_sequence_label_size: int = 10, initializer_range: float = 0.02, encoder_stride: int = 2, num_attention_outputs: int = 1, dim: int = 128, depths: List[int] = [2, 2, 2, 2], resolution: int = 2, mlp_expansion_ratio: int = 2, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.encoder_stride = encoder_stride self.num_attention_outputs = num_attention_outputs self.embed_dim = embed_dim self.seq_length = embed_dim + 1 self.resolution = resolution self.depths = depths self.hidden_sizes = hidden_sizes self.dim = dim self.mlp_expansion_ratio = mlp_expansion_ratio def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return EfficientFormerConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, resolution=self.resolution, depths=self.depths, hidden_sizes=self.hidden_sizes, dim=self.dim, mlp_expansion_ratio=self.mlp_expansion_ratio, ) def create_and_check_model(self, config, pixel_values, labels): model = TFEfficientFormerModel(config=config) result = model(pixel_values, training=False) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = TFEfficientFormerForImageClassification(config) result = model(pixel_values, labels=labels, training=False) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) # test greyscale images config.num_channels = 1 model = TFEfficientFormerForImageClassification(config) pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFEfficientFormerModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_tf_common.py, as EfficientFormer does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFEfficientFormerModel, "image-classification": ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFEfficientFormerModelTester(self) self.config_tester = ConfigTester( self, config_class=EfficientFormerConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="EfficientFormer does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="EfficientFormer does not support input and output embeddings") def test_model_common_attributes(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1: seq_length = seq_length * self.model_tester.chunk_length else: seq_length = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.asseretIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( list(hidden_states[-1].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="EfficientFormer does not implement masked image modeling yet") def test_for_masked_image_modeling(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFEfficientFormerModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_attention_outputs) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_attention_outputs) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_compile_tf_model(self): # We use a simplified version of this test for EfficientFormer because it requires training=False # and Keras refuses to let us force that during functional construction config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model model = model_class(config) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes functional_inputs = { key: keras.Input(shape=val.shape[1:], dtype=val.dtype, name=key) for key, val in model.input_signature.items() if key in model.dummy_inputs } outputs_dict = model(functional_inputs) self.assertTrue(outputs_dict is not None) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf @require_vision class EfficientFormerModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300") if is_vision_available() else None ) @slow def test_inference_image_classification_head(self): model = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300") image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="tf") # forward pass outputs = model(**inputs, training=False) # verify the logits expected_shape = tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = tf.constant([-0.0555, 0.4825, -0.0852]) self.assertTrue(np.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) @slow def test_inference_image_classification_head_with_teacher(self): model = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( "snap-research/efficientformer-l1-300" ) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="tf") # forward pass outputs = model(**inputs, training=False) # verify the logits expected_shape = tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = tf.constant([-0.1312, 0.4353, -1.0499]) self.assertTrue(np.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
transformers/tests/models/efficientformer/test_modeling_tf_efficientformer.py/0
{ "file_path": "transformers/tests/models/efficientformer/test_modeling_tf_efficientformer.py", "repo_id": "transformers", "token_count": 7249 }
389
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest from transformers import FlaubertConfig, is_sacremoses_available, is_torch_available from transformers.testing_utils import require_torch, require_torch_accelerator, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class FlaubertModelTester(object): def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_lengths=True, use_token_type_ids=True, use_labels=True, gelu_activation=True, sinusoidal_embeddings=False, causal=False, asm=False, n_langs=2, vocab_size=99, n_special=0, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=12, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, summary_type="last", use_proj=None, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_lengths = use_input_lengths self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.gelu_activation = gelu_activation self.sinusoidal_embeddings = sinusoidal_embeddings self.causal = causal self.asm = asm self.n_langs = n_langs self.vocab_size = vocab_size self.n_special = n_special self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.summary_type = summary_type self.use_proj = use_proj self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = random_attention_mask([self.batch_size, self.seq_length]) input_lengths = None if self.use_input_lengths: input_lengths = ( ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2 ) # small variation of seq_length token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.n_langs) sequence_labels = None token_labels = None is_impossible_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) is_impossible_labels = ids_tensor([self.batch_size], 2).float() choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def get_config(self): return FlaubertConfig( vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, ) def create_and_check_flaubert_model( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = FlaubertModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, lengths=input_lengths, langs=token_type_ids) result = model(input_ids, langs=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_flaubert_lm_head( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = FlaubertWithLMHeadModel(config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_flaubert_simple_qa( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = FlaubertForQuestionAnsweringSimple(config) model.to(torch_device) model.eval() result = model(input_ids) result = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_flaubert_qa( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = FlaubertForQuestionAnswering(config) model.to(torch_device) model.eval() result = model(input_ids) result_with_labels = model( input_ids, start_positions=sequence_labels, end_positions=sequence_labels, cls_index=sequence_labels, is_impossible=is_impossible_labels, p_mask=input_mask, ) result_with_labels = model( input_ids, start_positions=sequence_labels, end_positions=sequence_labels, cls_index=sequence_labels, is_impossible=is_impossible_labels, ) (total_loss,) = result_with_labels.to_tuple() result_with_labels = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels) (total_loss,) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape, ()) self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top)) self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top)) self.parent.assertEqual( result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,)) def create_and_check_flaubert_sequence_classif( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = FlaubertForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids) result = model(input_ids, labels=sequence_labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def create_and_check_flaubert_token_classif( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): config.num_labels = self.num_labels model = FlaubertForTokenClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_flaubert_multiple_choice( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): config.num_choices = self.num_choices model = FlaubertForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths, "attention_mask": input_mask, } return config, inputs_dict @require_torch class FlaubertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": FlaubertModel, "fill-mask": FlaubertWithLMHeadModel, "question-answering": FlaubertForQuestionAnsweringSimple, "text-classification": FlaubertForSequenceClassification, "token-classification": FlaubertForTokenClassification, "zero-shot": FlaubertForSequenceClassification, } if is_torch_available() and is_sacremoses_available() else {} ) # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False # Flaubert has 2 QA models -> need to manually set the correct labels for one of them here def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) inputs_dict["end_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = FlaubertModelTester(self) self.config_tester = ConfigTester(self, config_class=FlaubertConfig, emb_dim=37) def test_config(self): self.config_tester.run_common_tests() def test_flaubert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*config_and_inputs) def test_flaubert_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*config_and_inputs) def test_flaubert_simple_qa(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*config_and_inputs) def test_flaubert_qa(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*config_and_inputs) def test_flaubert_sequence_classif(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*config_and_inputs) def test_flaubert_token_classif(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*config_and_inputs) def test_flaubert_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = FlaubertModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow @require_torch_accelerator def test_torchscript_device_change(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return config.torchscript = True model = model_class(config=config) inputs_dict = self._prepare_for_class(inputs_dict, model_class) traced_model = torch.jit.trace( model, (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(traced_model, os.path.join(tmp, "traced_model.pt")) loaded = torch.jit.load(os.path.join(tmp, "traced_model.pt"), map_location=torch_device) loaded(inputs_dict["input_ids"].to(torch_device), inputs_dict["attention_mask"].to(torch_device)) @require_torch class FlaubertModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased") input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) with torch.no_grad(): output = model(input_ids)[0] expected_shape = torch.Size((1, 11, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
transformers/tests/models/flaubert/test_modeling_flaubert.py/0
{ "file_path": "transformers/tests/models/flaubert/test_modeling_flaubert.py", "repo_id": "transformers", "token_count": 8613 }
390
# coding=utf-8 # Copyright 2020 HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class TFFunnelModelTester: """You can also import this e.g, from .test_modeling_funnel import FunnelModelTester""" def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, block_sizes=[1, 1, 2], num_decoder_layers=1, d_model=32, n_head=4, d_head=8, d_inner=37, hidden_act="gelu_new", hidden_dropout=0.1, attention_dropout=0.1, activation_dropout=0.0, max_position_embeddings=512, type_vocab_size=3, initializer_std=0.02, # Set to a smaller value, so we can keep the small error threshold (1e-5) in the test num_labels=3, num_choices=4, scope=None, base=False, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.block_sizes = block_sizes self.num_decoder_layers = num_decoder_layers self.d_model = d_model self.n_head = n_head self.d_head = d_head self.d_inner = d_inner self.hidden_act = hidden_act self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = 2 self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.initializer_std = initializer_std # Used in the tests to check the size of the first attention layer self.num_attention_heads = n_head # Used in the tests to check the size of the first hidden state self.hidden_size = self.d_model # Used in the tests to check the number of output hidden states/attentions self.num_hidden_layers = sum(self.block_sizes) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: self.expected_num_hidden_layers = self.num_hidden_layers + 2 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = FunnelConfig( vocab_size=self.vocab_size, block_sizes=self.block_sizes, num_decoder_layers=self.num_decoder_layers, d_model=self.d_model, n_head=self.n_head, d_head=self.d_head, d_inner=self.d_inner, hidden_act=self.hidden_act, hidden_dropout=self.hidden_dropout, attention_dropout=self.attention_dropout, activation_dropout=self.activation_dropout, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_std=self.initializer_std, ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): model = TFFunnelModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model)) config.truncate_seq = False model = TFFunnelModel(config=config) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model)) config.separate_cls = False model = TFFunnelModel(config=config) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model)) def create_and_check_base_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): model = TFFunnelBaseModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 2, self.d_model)) config.truncate_seq = False model = TFFunnelBaseModel(config=config) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 3, self.d_model)) config.separate_cls = False model = TFFunnelBaseModel(config=config) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 2, self.d_model)) def create_and_check_for_pretraining( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): model = TFFunnelForPreTraining(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): model = TFFunnelForMaskedLM(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.num_labels = self.num_labels model = TFFunnelForSequenceClassification(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.num_choices = self.num_choices model = TFFunnelForMultipleChoice(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.num_labels = self.num_labels model = TFFunnelForTokenClassification(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): model = TFFunnelForQuestionAnswering(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFFunnelModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": (TFFunnelBaseModel, TFFunnelModel), "fill-mask": TFFunnelForMaskedLM, "question-answering": TFFunnelForQuestionAnswering, "text-classification": TFFunnelForSequenceClassification, "token-classification": TFFunnelForTokenClassification, "zero-shot": TFFunnelForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFFunnelModelTester(self) self.config_tester = ConfigTester(self, config_class=FunnelConfig) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) @require_tf class TFFunnelBaseModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFFunnelModelTester(self, base=True) self.config_tester = ConfigTester(self, config_class=FunnelConfig) def test_config(self): self.config_tester.run_common_tests() def test_base_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
transformers/tests/models/funnel/test_modeling_tf_funnel.py/0
{ "file_path": "transformers/tests/models/funnel/test_modeling_tf_funnel.py", "repo_id": "transformers", "token_count": 6844 }
391
# coding=utf-8 # Copyright 2022 Google LongT5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import numpy as np import transformers from transformers import is_flax_available from transformers.models.auto import get_values from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_sentencepiece, require_tokenizers, slow, ) from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html os.environ["XLA_PYTHON_CLIENT_ALLOCATOR"] = "platform" import jax import jax.numpy as jnp from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING, FLAX_MODEL_MAPPING, AutoTokenizer, LongT5Config from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.models.longt5.modeling_flax_longt5 import ( FlaxLongT5ForConditionalGeneration, FlaxLongT5Model, shift_tokens_right, ) class FlaxLongT5ModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, decoder_seq_length=9, local_radius=5, encoder_attention_type="local", global_block_size=3, # For common tests is_training=True, use_attention_mask=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, dropout_rate=0.1, initializer_factor=0.002, eos_token_id=1, pad_token_id=0, decoder_start_token_id=0, scope=None, decoder_layers=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length self.local_radius = local_radius self.block_len = local_radius + 1 self.encoder_attention_type = encoder_attention_type self.global_block_size = global_block_size # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.scope = None self.decoder_layers = decoder_layers def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None decoder_attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) decoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) config = LongT5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, local_radius=self.local_radius, encoder_attention_type=self.encoder_attention_type, global_block_size=self.global_block_size, ) return ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, ) def create_and_check_model( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, ): model = FlaxLongT5Model(config=config) result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) decoder_output = result.last_hidden_state encoder_output = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.shape, (self.batch_size, self.encoder_seq_length, self.hidden_size)) self.parent.assertEqual(decoder_output.shape, (self.batch_size, self.decoder_seq_length, self.hidden_size)) def check_use_cache_forward_with_attn_mask( self, model_class_name, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, ): max_decoder_length = 20 model = model_class_name(config) encoder_outputs = model.encode(input_ids) # prevent fully zero'd out attention mask decoder_attention_mask = jnp.ones_like(decoder_attention_mask) decoder_attention_mask_cache = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])), ], axis=-1, ) past_key_values = model.init_cache(decoder_input_ids.shape[0], max_decoder_length, encoder_outputs) outputs_cache = model.decode( decoder_input_ids[:, :-1], encoder_outputs, decoder_attention_mask=decoder_attention_mask_cache, past_key_values=past_key_values, ) outputs_cache_next = model.decode( decoder_input_ids[:, -1:], encoder_outputs, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=decoder_attention_mask_cache, ) outputs = model.decode(decoder_input_ids, encoder_outputs, decoder_attention_mask=decoder_attention_mask) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } return config, inputs_dict @require_flax class FlaxLongT5ModelTest(FlaxModelTesterMixin, FlaxGenerationTesterMixin, unittest.TestCase): all_model_classes = (FlaxLongT5Model, FlaxLongT5ForConditionalGeneration) if is_flax_available() else () all_generative_model_classes = (FlaxLongT5ForConditionalGeneration,) if is_flax_available() else () is_encoder_decoder = True def setUp(self): self.model_tester = FlaxLongT5ModelTester(self) self.config_tester = ConfigTester(self, config_class=LongT5Config, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_v1_1(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() # check that gated gelu feed forward and different word embeddings work config = config_and_inputs[0] config.tie_word_embeddings = False config.feed_forward_proj = "gated-gelu" self.model_tester.create_and_check_model(config, *config_and_inputs[1:]) def test_use_cache_forward_with_attn_mask(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(model_class, *config_and_inputs) def test_encode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def encode_jitted(input_ids, attention_mask=None, **kwargs): return model.encode(input_ids=input_ids, attention_mask=attention_mask) with self.subTest("JIT Enabled"): jitted_outputs = encode_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = encode_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_decode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): model = model_class(config) encoder_outputs = model.encode(inputs_dict["input_ids"], inputs_dict["attention_mask"]) prepared_inputs_dict = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(decoder_input_ids, decoder_attention_mask, encoder_outputs): return model.decode( decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, ) with self.subTest("JIT Enabled"): jitted_outputs = decode_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = decode_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_shift_right(self): decoder_start_token_id = 0 pad_token_id = 1 labels = np.arange(2, 102).reshape(5, 20) labels[:2, 15:] = -100 decoder_input_ids = shift_tokens_right(labels, pad_token_id, decoder_start_token_id) np_decoder_input_ids = np.array(decoder_input_ids) padded_slice = np_decoder_input_ids[:2, (15 + 1) :] self.assertTrue((padded_slice == 1).all()) not_padded_slice = np_decoder_input_ids[2:, 1:] rolled_labels = np.roll(labels[2:], 1)[:, 1:] self.assertTrue((not_padded_slice == rolled_labels).all()) self.assertTrue((np_decoder_input_ids[:, 0] == 0).all()) # overwrite since special base model prefix is used def test_save_load_from_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = base_class(config) base_params = flatten_dict(unfreeze(model.params)) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname) base_param_from_head = flatten_dict(unfreeze(head_model.params)) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # overwrite since special base model prefix is used def test_save_load_to_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) base_params_from_head = flatten_dict(unfreeze(model.params)) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_length = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_length) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_length) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) block_len = getattr(self.model_tester, "block_len", None) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # Question Answering model returns start_logits and end_logits if model_class in get_values(FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING): correct_outlen += 1 # start_logits and end_logits instead of only 1 output self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len], ) # overwrite since special base model prefix is used @is_pt_flax_cross_test def test_save_load_from_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = base_class(config) base_params = flatten_dict(unfreeze(model.params)) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, base_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: # save pt model pt_model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname, from_pt=True) base_param_from_head = flatten_dict(unfreeze(head_model.params)) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # overwrite since special base model prefix is used @is_pt_flax_cross_test def test_save_load_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) base_params_from_head = flatten_dict(unfreeze(model.params)) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # overwrite since special base model prefix is used @is_pt_flax_cross_test def test_save_load_bf16_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) model.params = model.to_bf16(model.params) base_params_from_head = flatten_dict(unfreeze(model.params)) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") class FlaxLongT5TGlobalModelTest(FlaxLongT5ModelTest): def setUp(self): self.model_tester = FlaxLongT5ModelTester(self, encoder_attention_type="transient-global") self.config_tester = ConfigTester(self, config_class=LongT5Config, d_model=37) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_length = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_length) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_length) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) block_len = getattr(self.model_tester, "block_len", None) global_block_size = getattr(self.model_tester, "global_block_size", None) global_seq_len = encoder_seq_length // global_block_size for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len + global_seq_len], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # Question Answering model returns start_logits and end_logits if model_class in get_values(FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING): correct_outlen += 1 # start_logits and end_logits instead of only 1 output self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, block_len, 3 * block_len + global_seq_len], ) @require_sentencepiece @require_tokenizers @require_flax class FlaxLongT5ModelIntegrationTests(unittest.TestCase): model_path = "Stancld/longt5-tglobal-large-16384-pubmed-3k_steps" def expected_summary(self): return [ "background : coronary artery disease ( cad ) is the emerging cause of morbidity and mortality in" " developing world . it provides an excellent resolution for visualization of the coronary arteries for" " catheter - based or operating interventions . although the association of this technique with major" " complications such as mortality is highly uncommon , it is frequently associated with various cardiac" " and noncardiac complications . computed tomography coronary angiography is a promising technique for the" " evaluation of cad noninvasively . it assesses disease within the coronary artery and provides" " qualitative and quantitative information about nonobstructive atherosclerotic plaque" ] @slow def test_summarization(self): model = FlaxLongT5ForConditionalGeneration.from_pretrained(self.model_path) tok = AutoTokenizer.from_pretrained(self.model_path) ARTICLE = """coronary artery disease ( cad ) is the emerging cause of morbidity and mortality in developing world . \n it provides an excellent resolution for visualization of the coronary arteries for catheter - based or operating interventions . \n although the association of this technique with major complications such as mortality is highly uncommon , it is frequently associated with various cardiac and noncardiac complications . computed tomography ( ct ) coronary angiography is a promising technique for the evaluation of cad noninvasively . \n it assesses disease within the coronary artery and provides qualitative and quantitative information about nonobstructive atherosclerotic plaque burden within the vessel wall . \n thus , ct angiography - based disease evaluation may provide clinically more significant information than conventional angiography . the introduction of multi - slice computed tomography ( msct ) technology such as 64-slice , 12 8-slice , 256-slice , and now 320-slice msct has produced a high diagnostic accuracy of ct coronary angiography . \n it has consistently showed to have a very high negative predictive value ( well above 90% ) in ruling out patients with s ignificant cad defined as coronary luminal stenosis of > 50% . \n the american college of cardiology / american heart association recommends that coronary angiography should be performed before valve surgery in men aged > 40 years , women aged > 35 years with coronary risk factors and in postmenopausal women . \n the prevalence of cad in patients undergoing valve replacement is 2040% in developed countries . in the previous studies , \n the incidence of angiographically p roven cad in acquired valvular diseases has been shown to vary widely from 9% to 41% . in aortic stenosis , \n we aimed to report the diagnostic performance of 128-slice ct coronary angiography in 50 patients undergoing for major noncoron ary cardiac surgery referred for diagnostic invasive coronary angiography to assess the extent and severity of coronary stenosis . \n during january 2013 to december 2014 , we enrolled fifty major noncoronary cardiac surgery patients sche duled for invasive coronary angiography who fulfilled the following inclusion criteria of age 40 years , having low or intermediate probability of cad , left ventricular ejection fraction ( lvef ) > 35% , and patient giving informed conse nt for undergoing msct and conventional coronary angiography . \n those having any contraindication for contrast injection , lvef < 35% , high pretest probability of cad , and hemodynamic instability were excluded from the study . \n pati ents with heart rates of > 70 bpm received ( unless they had known overt heart failure or electrocardiogram ( ecg ) atrioventricular conduction abnormalities ) a single oral dose of 100 mg metoprolol 45 min before the scan . \n patients w ith heart rates of > 80 bpm received an additional oral dose of metoprolol if not contraindicated . \n all patients were scanned with a 128-slice ct scanner ( siemens , somatom definition as ) equipped with a new feature in msct technolog y , so - called z - axis flying - focus technology . \n the central 32 detector rows acquire 0.6-mm slices , and the flying - focus spot switches back and forth between 2 z positions between each reading . \n two slices per detector row a re acquired , which results in a higher oversampling rate in the z - axis , thereby reducing artifacts related to the spiral acquisition and improving spatial resolution down to 0.4 mm . \n a bolus of 6580 ml contrast material ( omnipaque ) was injected through an arm vein at a flow rate of 5 ml / s . \n a bolus tracking technique was used to synchronize the arrival of contrast in the coronary arteries with the initiation of the scan . to monitor the arrival of contrast m aterial , \n axial scans were obtained at the level of the ascending aorta with a delay of 10 s after the start of the contrast injection . \n the scan was automatically started when a threshold of 150 hounsfield units was reached in a re gion of interest positioned in the ascending aorta . \n images were reconstructed with ecg gating to obtain optimal , motion - free image quality . \n all scans were performed within 2 weeks of the msct coronary diagnostic angiogram . a s ingle observer unaware of the multi - slice ct results identified coronary lesion as a single vessel , double vessel , or triple vessel disease . \n all lesion , regardless of size , were included for comparison with ct coronary angiograp hy . \n lesions were classified as having nonsignificant disease ( luminal irregularities or < 50% stenosis ) or as having significant stenosis . \n stenosis was evaluated in two orthogonal views and classified as significant if the mean lumen diameter reduction was 50% using a validated quantitative coronary angiography ( qca ) . \n all scans were analyzed independently by a radiologist and a cardiologist who were unaware of the results of conventional coronary angiograp hy . \n total calcium scores of all patients were calculated with dedicated software and expressed as agatston scores . \n the agatston score is a commonly used scoring method that calculates the total amount of calcium on the basis of th e number , areas , and peak hounsfield units of the detected calcified lesions . \n all available coronary segments were visually scored for the presence of > 50% considered as significant stenosis . \n maximum intensity projections were used to identify coronary lesions and ( curved ) multiplanar reconstructions to classify lesions as significant or nonsignificant . \n data were analyzed using statistical system spss version 20 software ( chicago , il , usa ) . \n the di agnostic performance of ct coronary angiography for the detection of significant lesions in coronary arteries with qca as the standard of reference is presented as sensitivity , specificity , positive and negative predictive values , and positive and negative likelihood ratios with the corresponding exact 95% of confidence interval ( cis ) . \n comparison between ct and conventional coronary angiography was performed on the two level vessel by vessel ( no or any disease p er vessel ) , and patient by patient ( no or any disease per patient ) . \n all scans were performed within 2 weeks of the msct coronary diagnostic angiogram . a single observer unaware of the multi - slice ct results identified coronary lesion as a single vessel , double vessel , or triple vessel disease . \n all lesion , regardless of size , were included for comparison with ct coronary angiography . \n lesions were classified as having nonsignificant disease ( luminal irregularities or < 50% stenosis ) or as having significant stenosis . \n stenosis was evaluated in two orthogonal views and classified as significant if the mean lumen diameter reduction was 50% using a validated quantitative coronary an giography ( qca ) . \n all scans were analyzed independently by a radiologist and a cardiologist who were unaware of the results of conventional coronary angiography . \n total calcium scores of all patients were calculated with dedicated software and expressed as agatston scores . \n the agatston score is a commonly used scoring method that calculates the total amount of calcium on the basis of the number , areas , and peak hounsfield units of the detected calcified lesi ons . \n all available coronary segments were visually scored for the presence of > 50% considered as significant stenosis . \n maximum intensity projections were used to identify coronary lesions and ( curved ) multiplanar reconstruction s to classify lesions as significant or nonsignificant . \n data were analyzed using statistical system spss version 20 software ( chicago , il , usa ) . \n the diagnostic performance of ct coronary angiography for the detection of signif icant lesions in coronary arteries with qca as the standard of reference is presented as sensitivity , specificity , positive and negative predictive values , and positive and negative likelihood ratios with the corresponding exact 95% of confidence interval ( cis ) . \n comparison between ct and conventional coronary angiography was performed on the two level vessel by vessel ( no or any disease per vessel ) , and patient by patient ( no or any disease per patient ) . \n in this study , 29 ( 58% ) subjects were female , and 21 ( 42% ) were male showing an average age of 50.36 8.39 years . \n of fifty patients 24 ( 48% ) , 13 ( 26% ) , eight ( 16% ) , and five ( 10% ) underwent mitral valve replacement , double valve replacement ( dvr ) , aortic valve replacement , and other surgeries , respectively . \n high distribution of cad risk factors such as hypertension ( 24% ) , smoking ( 22% ) , and dyslipidemia ( 18% ) was observed in the stu dy group . \n the mean creatinine level was 0.766 0.17 and average dye used in conventional angiography was 48.5 26.6 whereas for ct angiography it was 72.8 6.32 . \n average radiation dose in conventional coronary angiography and msct coronary angiography was 5.2 msv and 9.2 msv , respectively . \n the majority of the patients had sinus rhythm ( 68% ) , whereas atrial fibrillation was found in 32% of the subjects . \n patients included in the study had low to intermed iate probability of cad . in this study , three patients had complications after conventional angiography . \n complications were of local site hematoma , acute kidney injury managed conservatively , and acute heart failure . \n a patient who developed hematoma was obese female patients with body mass index > 30 kg / m . \n the patient suffered from pseudoaneurysm , had hospitalized for 9 days , which leads to increased morbidity and cost of hospital stay . \n the diagnos tic accuracy of ct coronary angiography was evaluated regarding true positive , true negative values and is presented in table 1 . the overall sensitivity and \n specificity of ct angiography technique was 100% ( 95% ci : 39.76%100% ) and 91.30% ( 95% ci : 79.21%97.58% ) , respectively [ table 2 ] . \n the positive predictive value ( 50% ; 95% ci : 15.70%84.30% ) and negative predictive value ( 100% ; 95% ci : 91.59%100% ) of ct angiography were also fairly high in these patients . \n recent reports from multiple studies demonstrated that recent - generation msct scanners showed promise for noninvasive detection of coronary stenosis however , until now no studies were found regarding the clinical efficacy or prognostic value of 128-slice ct coronary angiography versus conventional invasive coronary angiography in the diagnosis of patients planned for major noncoronary surgeries such as dvr , bentall , atrial septal defect closure , etc . in our study , we reported 8% cad prevalence in patients planned for major noncoronary cardiac surgery . \n we performed conventional and msct coronary angiography in all patients and the results showed that ct coronary angiography with i nvasive coronary angiography as the reference standard had a considerably high sensitivity ( 100% ) and specificity ( 95.65% ) . \n the health economic model using invasive coronary angiography as the reference standard showed that at a p retest probability of cad of 70% or lower , ct coronary angiography resulted in lower cost per patient with a true positive diagnosis . at a pretest probability of cad of 70% or higher , invasive coronary angiography was associated with a lower cost per patient with a true positive diagnosis . in our study population , \n two patients developed local site complications in the form of hematoma and pseudoaneurysm after conventional angiography . \n hence , msct coronary ang iography will be more favorable in female obese patients with intermediate likelihood of cad . \n hence , msct coronary angiography will be cost - effective in patients of valvular heart diseases . \n however , ct angiography suffers from a drawback that average amount of dye used in msct coronary angiography were 72.8 6.32 ml which is higher than average amount of dye required for conventional angiography ( 48.6 26.6 ml ) . \n hence , the use of ct coronary angiography could not be used in patients with known renal dysfunction , where reduction of contrast dye load is highly advocated . \n our results show that 128-slice ct coronary angiography is a reliable technique to detect coronary stenosis in pat ients planned for noncoronary cardiac surgery . \n although there has been important technological progress in the development of ct coronary angiography , its clinical application remains limited . \n a study wth large numbers of patient s is required for the recommendation of only ct coronary angiography for the coronary evaluation in major non - cardiac surgeries . \n mehta institute of cardiology and research center ( affiliated to bj medical college , ahmedabad , guja rat , india ) . \n u.n . mehta institute of cardiology and research center ( affiliated to bj medical college , ahmedabad , gujarat , india ) . \n """ dct = tok( [ARTICLE], max_length=1024, padding="max_length", truncation=True, return_tensors="np", ) hypotheses_batch = model.generate( **dct, num_beams=4, length_penalty=2.0, max_length=142, min_length=56, do_sample=False, early_stopping=True, ).sequences decoded = tok.batch_decode(hypotheses_batch, skip_special_tokens=True, clean_up_tokenization_spaces=False) self.assertListEqual( self.expected_summary(), decoded, )
transformers/tests/models/longt5/test_modeling_flax_longt5.py/0
{ "file_path": "transformers/tests/models/longt5/test_modeling_flax_longt5.py", "repo_id": "transformers", "token_count": 17644 }
392
# coding=utf-8 # Copyright 2021, The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Marian model. """ import tempfile import unittest from huggingface_hub.hf_api import list_models from transformers import MarianConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, require_torch_fp16, slow, torch_device, ) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( AutoConfig, AutoModelWithLMHead, AutoTokenizer, MarianModel, MarianMTModel, TranslationPipeline, ) from transformers.models.marian.convert_marian_to_pytorch import ( ORG_NAME, convert_hf_name_to_opus_name, convert_opus_name_to_hf_name, ) from transformers.models.marian.modeling_marian import ( MarianDecoder, MarianEncoder, MarianForCausalLM, shift_tokens_right, ) def prepare_marian_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class MarianModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, decoder_start_token_id=3, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.decoder_start_token_id = decoder_start_token_id # forcing a certain token to be generated, sets all other tokens to -inf # if however the token to be generated is already at -inf then it can lead token # `nan` values and thus break generation self.forced_bos_token_id = None self.forced_eos_token_id = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp( 3, ) input_ids[:, -1] = self.eos_token_id # Eos Token decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.get_config() inputs_dict = prepare_marian_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def get_config(self): return MarianConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, forced_bos_token_id=self.forced_bos_token_id, forced_eos_token_id=self.forced_eos_token_id, ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = MarianModel(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] head_mask = inputs_dict["head_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = MarianModel(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = MarianEncoder.from_pretrained(tmpdirname).to(torch_device) encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = MarianDecoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, encoder_attention_mask=inputs_dict["attention_mask"], )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class MarianModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (MarianModel, MarianMTModel) if is_torch_available() else () all_generative_model_classes = (MarianMTModel,) if is_torch_available() else () pipeline_model_mapping = ( { "conversational": MarianMTModel, "feature-extraction": MarianModel, "summarization": MarianMTModel, "text-generation": MarianForCausalLM, "text2text-generation": MarianMTModel, "translation": MarianMTModel, } if is_torch_available() else {} ) is_encoder_decoder = True fx_compatible = True test_pruning = False test_missing_keys = False def setUp(self): self.model_tester = MarianModelTester(self) self.config_tester = ConfigTester(self, config_class=MarianConfig) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = MarianMTModel(config).eval().to(torch_device) model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def test_share_encoder_decoder_embeddings(self): config, input_dict = self.model_tester.prepare_config_and_inputs() # check if embeddings are shared by default for model_class in self.all_model_classes: model = model_class(config) self.assertIs(model.get_encoder().embed_tokens, model.get_decoder().embed_tokens) self.assertIs(model.get_encoder().embed_tokens.weight, model.get_decoder().embed_tokens.weight) # check if embeddings are not shared when config.share_encoder_decoder_embeddings = False config.share_encoder_decoder_embeddings = False for model_class in self.all_model_classes: model = model_class(config) self.assertIsNot(model.get_encoder().embed_tokens, model.get_decoder().embed_tokens) self.assertIsNot(model.get_encoder().embed_tokens.weight, model.get_decoder().embed_tokens.weight) # check if a model with shared embeddings can be saved and loaded with share_encoder_decoder_embeddings = False config, _ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname, share_encoder_decoder_embeddings=False) self.assertIsNot(model.get_encoder().embed_tokens, model.get_decoder().embed_tokens) self.assertIsNot(model.get_encoder().embed_tokens.weight, model.get_decoder().embed_tokens.weight) def test_resize_decoder_token_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs() # check if resize_decoder_token_embeddings raises an error when embeddings are shared for model_class in self.all_model_classes: model = model_class(config) with self.assertRaises(ValueError): model.resize_decoder_token_embeddings(config.vocab_size + 1) # check if decoder embeddings are resized when config.share_encoder_decoder_embeddings = False config.share_encoder_decoder_embeddings = False for model_class in self.all_model_classes: model = model_class(config) model.resize_decoder_token_embeddings(config.vocab_size + 1) self.assertEqual(model.get_decoder().embed_tokens.weight.shape, (config.vocab_size + 1, config.d_model)) # check if lm_head is also resized config, _ = self.model_tester.prepare_config_and_inputs() config.share_encoder_decoder_embeddings = False model = MarianMTModel(config) model.resize_decoder_token_embeddings(config.vocab_size + 1) self.assertEqual(model.lm_head.weight.shape, (config.vocab_size + 1, config.d_model)) def test_tie_word_embeddings_decoder(self): pass @unittest.skip("Skipping for now, to fix @ArthurZ or @ydshieh") def test_pipeline_conversational(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def assert_tensors_close(a, b, atol=1e-12, prefix=""): """If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if torch.allclose(a, b, atol=atol): return True raise except Exception: pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item() if a.numel() > 100: msg = f"tensor values are {pct_different:.1%} percent different." else: msg = f"{a} != {b}" if prefix: msg = prefix + ": " + msg raise AssertionError(msg) def _long_tensor(tok_lst): return torch.tensor(tok_lst, dtype=torch.long, device=torch_device) class ModelManagementTests(unittest.TestCase): @slow @require_torch def test_model_names(self): model_list = list_models() model_ids = [x.modelId for x in model_list if x.modelId.startswith(ORG_NAME)] bad_model_ids = [mid for mid in model_ids if "+" in model_ids] self.assertListEqual([], bad_model_ids) self.assertGreater(len(model_ids), 500) @require_torch @require_sentencepiece @require_tokenizers class MarianIntegrationTest(unittest.TestCase): src = "en" tgt = "de" src_text = [ "I am a small frog.", "Now I can forget the 100 words of german that I know.", "Tom asked his teacher for advice.", "That's how I would do it.", "Tom really admired Mary's courage.", "Turn around and close your eyes.", ] expected_text = [ "Ich bin ein kleiner Frosch.", "Jetzt kann ich die 100 Wörter des Deutschen vergessen, die ich kenne.", "Tom bat seinen Lehrer um Rat.", "So würde ich das machen.", "Tom bewunderte Marias Mut wirklich.", "Drehen Sie sich um und schließen Sie die Augen.", ] # ^^ actual C++ output differs slightly: (1) des Deutschen removed, (2) ""-> "O", (3) tun -> machen @classmethod def setUpClass(cls) -> None: cls.model_name = f"Helsinki-NLP/opus-mt-{cls.src}-{cls.tgt}" return cls @cached_property def tokenizer(self): return AutoTokenizer.from_pretrained(self.model_name) @property def eos_token_id(self) -> int: return self.tokenizer.eos_token_id @cached_property def model(self): model: MarianMTModel = AutoModelWithLMHead.from_pretrained(self.model_name).to(torch_device) c = model.config self.assertListEqual(c.bad_words_ids, [[c.pad_token_id]]) self.assertEqual(c.max_length, 512) self.assertEqual(c.decoder_start_token_id, c.pad_token_id) if torch_device == "cuda": return model.half() else: return model def _assert_generated_batch_equal_expected(self, **tokenizer_kwargs): generated_words = self.translate_src_text(**tokenizer_kwargs) self.assertListEqual(self.expected_text, generated_words) def translate_src_text(self, **tokenizer_kwargs): model_inputs = self.tokenizer(self.src_text, padding=True, return_tensors="pt", **tokenizer_kwargs).to( torch_device ) self.assertEqual(self.model.device, model_inputs.input_ids.device) generated_ids = self.model.generate( model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, max_length=128, renormalize_logits=True, # Marian should always renormalize its logits. See #25459 ) generated_words = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True) return generated_words @require_sentencepiece @require_tokenizers class TestMarian_EN_DE_More(MarianIntegrationTest): @slow def test_forward(self): src, tgt = ["I am a small frog"], ["Ich bin ein kleiner Frosch."] expected_ids = [38, 121, 14, 697, 38848, 0] model_inputs = self.tokenizer(src, text_target=tgt, return_tensors="pt").to(torch_device) self.assertListEqual(expected_ids, model_inputs.input_ids[0].tolist()) desired_keys = { "input_ids", "attention_mask", "labels", } self.assertSetEqual(desired_keys, set(model_inputs.keys())) model_inputs["decoder_input_ids"] = shift_tokens_right( model_inputs.labels, self.tokenizer.pad_token_id, self.model.config.decoder_start_token_id ) model_inputs["return_dict"] = True model_inputs["use_cache"] = False with torch.no_grad(): outputs = self.model(**model_inputs) max_indices = outputs.logits.argmax(-1) self.tokenizer.batch_decode(max_indices) def test_unk_support(self): t = self.tokenizer ids = t(["||"], return_tensors="pt").to(torch_device).input_ids[0].tolist() expected = [t.unk_token_id, t.unk_token_id, t.eos_token_id] self.assertEqual(expected, ids) def test_pad_not_split(self): input_ids_w_pad = self.tokenizer(["I am a small frog <pad>"], return_tensors="pt").input_ids[0].tolist() expected_w_pad = [38, 121, 14, 697, 38848, self.tokenizer.pad_token_id, 0] # pad self.assertListEqual(expected_w_pad, input_ids_w_pad) @slow def test_batch_generation_en_de(self): self._assert_generated_batch_equal_expected() def test_auto_config(self): config = AutoConfig.from_pretrained(self.model_name) self.assertIsInstance(config, MarianConfig) @require_sentencepiece @require_tokenizers class TestMarian_EN_FR(MarianIntegrationTest): src = "en" tgt = "fr" src_text = [ "I am a small frog.", "Now I can forget the 100 words of german that I know.", ] expected_text = [ "Je suis une petite grenouille.", "Maintenant, je peux oublier les 100 mots d'allemand que je connais.", ] @slow def test_batch_generation_en_fr(self): self._assert_generated_batch_equal_expected() @require_sentencepiece @require_tokenizers class TestMarian_FR_EN(MarianIntegrationTest): src = "fr" tgt = "en" src_text = [ "Donnez moi le micro.", "Tom et Mary étaient assis à une table.", # Accents ] expected_text = [ "Give me the microphone.", "Tom and Mary were sitting at a table.", ] @slow def test_batch_generation_fr_en(self): self._assert_generated_batch_equal_expected() @require_sentencepiece @require_tokenizers class TestMarian_RU_FR(MarianIntegrationTest): src = "ru" tgt = "fr" src_text = ["Он показал мне рукопись своей новой пьесы."] expected_text = ["Il m'a montré le manuscrit de sa nouvelle pièce."] @slow def test_batch_generation_ru_fr(self): self._assert_generated_batch_equal_expected() @require_sentencepiece @require_tokenizers class TestMarian_MT_EN(MarianIntegrationTest): """Cover low resource/high perplexity setting. This breaks without adjust_logits_generation overwritten""" src = "mt" tgt = "en" src_text = ["Billi messu b'mod ġentili, Ġesù fejjaq raġel li kien milqut bil - marda kerha tal - ġdiem."] expected_text = ["Touching gently, Jesus healed a man who was affected by the sad disease of leprosy."] @slow def test_batch_generation_mt_en(self): self._assert_generated_batch_equal_expected() @require_sentencepiece @require_tokenizers class TestMarian_en_zh(MarianIntegrationTest): src = "en" tgt = "zh" src_text = ["My name is Wolfgang and I live in Berlin"] expected_text = ["我叫沃尔夫冈 我住在柏林"] @slow def test_batch_generation_eng_zho(self): self._assert_generated_batch_equal_expected() @require_sentencepiece @require_tokenizers class TestMarian_en_ROMANCE(MarianIntegrationTest): """Multilingual on target side.""" src = "en" tgt = "ROMANCE" src_text = [ ">>fr<< Don't spend so much time watching TV.", ">>pt<< Your message has been sent.", ">>es<< He's two years older than me.", ] expected_text = [ "Ne passez pas autant de temps à regarder la télé.", "A sua mensagem foi enviada.", "Es dos años más viejo que yo.", ] @slow def test_batch_generation_en_ROMANCE_multi(self): self._assert_generated_batch_equal_expected() @slow @require_torch def test_pipeline(self): pipeline = TranslationPipeline(self.model, self.tokenizer, framework="pt", device=torch_device) output = pipeline(self.src_text) self.assertEqual(self.expected_text, [x["translation_text"] for x in output]) @require_sentencepiece @require_tokenizers class TestMarian_FI_EN_V2(MarianIntegrationTest): src = "fi" tgt = "en" src_text = [ "minä tykkään kirjojen lukemisesta", "Pidän jalkapallon katsomisesta", ] expected_text = ["I like to read books", "I like watching football"] @classmethod def setUpClass(cls) -> None: cls.model_name = "hf-internal-testing/test-opus-tatoeba-fi-en-v2" return cls @slow def test_batch_generation_fi_en(self): self._assert_generated_batch_equal_expected() @require_torch class TestConversionUtils(unittest.TestCase): def test_renaming_multilingual(self): old_names = [ "opus-mt-cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh-fi", "opus-mt-cmn+cn-fi", # no group "opus-mt-en-de", # standard name "opus-mt-en-de", # standard name ] expected = ["opus-mt-ZH-fi", "opus-mt-cmn_cn-fi", "opus-mt-en-de", "opus-mt-en-de"] self.assertListEqual(expected, [convert_opus_name_to_hf_name(x) for x in old_names]) def test_undoing_renaming(self): hf_names = ["opus-mt-ZH-fi", "opus-mt-cmn_cn-fi", "opus-mt-en-de", "opus-mt-en-de"] converted_opus_names = [convert_hf_name_to_opus_name(x) for x in hf_names] expected_opus_names = [ "cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh-fi", "cmn+cn-fi", "en-de", # standard name "en-de", ] self.assertListEqual(expected_opus_names, converted_opus_names) class MarianStandaloneDecoderModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, d_model=16, decoder_seq_length=7, is_training=True, is_decoder=True, use_attention_mask=True, use_cache=False, use_labels=True, decoder_start_token_id=2, decoder_ffn_dim=32, decoder_layers=2, encoder_attention_heads=4, decoder_attention_heads=4, max_position_embeddings=30, is_encoder_decoder=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, scope=None, ): self.parent = parent self.batch_size = batch_size self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.d_model = d_model self.hidden_size = d_model self.num_hidden_layers = decoder_layers self.decoder_layers = decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.encoder_attention_heads = encoder_attention_heads self.decoder_attention_heads = decoder_attention_heads self.num_attention_heads = decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.use_cache = use_cache self.max_position_embeddings = max_position_embeddings self.is_encoder_decoder = is_encoder_decoder self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 2 self.decoder_attention_idx = 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = MarianConfig( vocab_size=self.vocab_size, d_model=self.d_model, decoder_layers=self.decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, encoder_attention_heads=self.encoder_attention_heads, decoder_attention_heads=self.decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, max_position_embeddings=self.max_position_embeddings, is_encoder_decoder=self.is_encoder_decoder, ) return ( config, input_ids, attention_mask, lm_labels, ) def create_and_check_decoder_model_past( self, config, input_ids, attention_mask, lm_labels, ): config.use_cache = True model = MarianDecoder(config=config).to(torch_device).eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, attention_mask, lm_labels, ): model = MarianDecoder(config=config).to(torch_device).eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=attn_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class MarianStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (MarianDecoder, MarianForCausalLM) if is_torch_available() else () all_generative_model_classes = (MarianForCausalLM,) if is_torch_available() else () test_pruning = False is_encoder_decoder = False def setUp( self, ): self.model_tester = MarianStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=MarianConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_attn_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) def test_retain_grad_hidden_states_attentions(self): # decoder cannot keep gradients return
transformers/tests/models/marian/test_modeling_marian.py/0
{ "file_path": "transformers/tests/models/marian/test_modeling_marian.py", "repo_id": "transformers", "token_count": 15174 }
393
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch MobileNetV2 model. """ import unittest from transformers import MobileNetV2Config from transformers.testing_utils import is_flaky, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetV2ForImageClassification, MobileNetV2ForSemanticSegmentation, MobileNetV2Model from transformers.models.mobilenet_v2.modeling_mobilenet_v2 import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetV2ImageProcessor class MobileNetV2ConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "tf_padding")) self.parent.assertTrue(hasattr(config, "depth_multiplier")) class MobileNetV2ModelTester: def __init__( self, parent, batch_size=13, num_channels=3, image_size=32, depth_multiplier=0.25, depth_divisible_by=8, min_depth=8, expand_ratio=6, output_stride=32, first_layer_is_expansion=True, finegrained_output=True, tf_padding=True, hidden_act="relu6", last_hidden_size=1280, classifier_dropout_prob=0.1, initializer_range=0.02, is_training=True, use_labels=True, num_labels=10, scope=None, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.depth_multiplier = depth_multiplier self.depth_divisible_by = depth_divisible_by self.min_depth = min_depth self.expand_ratio = expand_ratio self.tf_padding = tf_padding self.output_stride = output_stride self.first_layer_is_expansion = first_layer_is_expansion self.finegrained_output = finegrained_output self.hidden_act = hidden_act self.last_hidden_size = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier) self.classifier_dropout_prob = classifier_dropout_prob self.use_labels = use_labels self.is_training = is_training self.num_labels = num_labels self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None pixel_labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) pixel_labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels, pixel_labels def get_config(self): return MobileNetV2Config( num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, depth_divisible_by=self.depth_divisible_by, min_depth=self.min_depth, expand_ratio=self.expand_ratio, output_stride=self.output_stride, first_layer_is_expansion=self.first_layer_is_expansion, finegrained_output=self.finegrained_output, hidden_act=self.hidden_act, tf_padding=self.tf_padding, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values, labels, pixel_labels): model = MobileNetV2Model(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) self.parent.assertEqual( result.pooler_output.shape, (self.batch_size, self.last_hidden_size), ) def create_and_check_for_image_classification(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = MobileNetV2ForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_semantic_segmentation(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = MobileNetV2ForSemanticSegmentation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) result = model(pixel_values, labels=pixel_labels) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels, pixel_labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class MobileNetV2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as MobileNetV2 does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( (MobileNetV2Model, MobileNetV2ForImageClassification, MobileNetV2ForSemanticSegmentation) if is_torch_available() else () ) pipeline_model_mapping = ( { "image-feature-extraction": MobileNetV2Model, "image-classification": MobileNetV2ForImageClassification, "image-segmentation": MobileNetV2ForSemanticSegmentation, } if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = MobileNetV2ModelTester(self) self.config_tester = MobileNetV2ConfigTester(self, config_class=MobileNetV2Config, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="MobileNetV2 does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="MobileNetV2 does not support input and output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="MobileNetV2 does not output attentions") def test_attention_outputs(self): pass def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_stages = 16 self.assertEqual(len(hidden_states), expected_num_stages) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) def test_for_semantic_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = MobileNetV2Model.from_pretrained(model_name) self.assertIsNotNone(model) @is_flaky(description="is_flaky https://github.com/huggingface/transformers/issues/29516") def test_batching_equivalence(self): super().test_batching_equivalence() # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class MobileNetV2ModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( MobileNetV2ImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224") if is_vision_available() else None ) @slow def test_inference_image_classification_head(self): model = MobileNetV2ForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1001)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([0.2445, -1.1993, 0.1905]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) @slow def test_inference_semantic_segmentation(self): model = MobileNetV2ForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513") model = model.to(torch_device) image_processor = MobileNetV2ImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513") image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits # verify the logits expected_shape = torch.Size((1, 21, 65, 65)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor( [ [[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]], [[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]], [[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]], ], device=torch_device, ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], expected_slice, atol=1e-4))
transformers/tests/models/mobilenet_v2/test_modeling_mobilenet_v2.py/0
{ "file_path": "transformers/tests/models/mobilenet_v2/test_modeling_mobilenet_v2.py", "repo_id": "transformers", "token_count": 5588 }
394
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMT5ForConditionalGeneration from transformers.models.t5.modeling_flax_t5 import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class MT5IntegrationTest(unittest.TestCase): @slow def test_small_integration_test(self): """ For comparision run: >>> import t5 # pip install t5==0.7.1 >>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary >>> path_to_mtf_small_mt5_checkpoint = '<fill_in>' >>> path_to_mtf_small_mt5_spm_model_path = '<fill_in>' >>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_mt5_checkpoint, batch_size=1, tpu=None) >>> vocab = SentencePieceVocabulary(path_to_mtf_small_mt5_spm_model_path) >>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab) """ model = FlaxMT5ForConditionalGeneration.from_pretrained("google/mt5-small") tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") input_ids = tokenizer("Hello there", return_tensors="np").input_ids labels = tokenizer("Hi I am", return_tensors="np").input_ids decoder_input_ids = shift_tokens_right(labels, model.config.pad_token_id, model.config.decoder_start_token_id) logits = model(input_ids, decoder_input_ids=decoder_input_ids).logits loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])).mean() mtf_score = -(labels.shape[-1] * loss.item()) EXPECTED_SCORE = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
transformers/tests/models/mt5/test_modeling_flax_mt5.py/0
{ "file_path": "transformers/tests/models/mt5/test_modeling_flax_mt5.py", "repo_id": "transformers", "token_count": 950 }
395
# coding=utf-8 # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class NezhaModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=128, max_relative_position=32, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): """ Returns a tiny configuration by default. """ return NezhaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = NezhaModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = NezhaModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = NezhaForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_next_sequence_prediction( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = NezhaForNextSentencePrediction(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, 2)) def create_and_check_for_pretraining( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = NezhaForPreTraining(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, next_sentence_label=sequence_labels, ) self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = NezhaForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = NezhaForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = NezhaForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = NezhaForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class NezhaModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": NezhaModel, "fill-mask": NezhaForMaskedLM, "question-answering": NezhaForQuestionAnswering, "text-classification": NezhaForSequenceClassification, "token-classification": NezhaForTokenClassification, "zero-shot": NezhaForSequenceClassification, } if is_torch_available() else {} ) fx_compatible = True # special case for ForPreTraining model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) inputs_dict["next_sentence_label"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = NezhaModelTester(self) self.config_tester = ConfigTester(self, config_class=NezhaConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): # This regression test was failing with PyTorch < 1.3 ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_next_sequence_prediction(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = NezhaModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow @require_torch_gpu def test_torchscript_device_change(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return config.torchscript = True model = model_class(config=config) inputs_dict = self._prepare_for_class(inputs_dict, model_class) traced_model = torch.jit.trace( model, (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(traced_model, os.path.join(tmp, "bert.pt")) loaded = torch.jit.load(os.path.join(tmp, "bert.pt"), map_location=torch_device) loaded(inputs_dict["input_ids"].to(torch_device), inputs_dict["attention_mask"].to(torch_device)) @require_torch class NezhaModelIntegrationTest(unittest.TestCase): @slow def test_inference_nezha_model(self): model = NezhaModel.from_pretrained("sijunhe/nezha-cn-base") input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]]) attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1]]) with torch.no_grad(): output = model(input_ids, attention_mask=attention_mask)[0] expected_shape = torch.Size((1, 6, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]]) self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4)) @slow def test_inference_nezha_masked_lm(self): model = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base") input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]]) attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1]]) with torch.no_grad(): output = model(input_ids, attention_mask=attention_mask)[0] expected_shape = torch.Size((1, 6, 21128)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))
transformers/tests/models/nezha/test_modeling_nezha.py/0
{ "file_path": "transformers/tests/models/nezha/test_modeling_nezha.py", "repo_id": "transformers", "token_count": 9040 }
396
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import OpenAIGPTConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.openai.modeling_tf_openai import ( TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, TFOpenAIGPTDoubleHeadsModel, TFOpenAIGPTForSequenceClassification, TFOpenAIGPTLMHeadModel, TFOpenAIGPTModel, ) class TFOpenAIGPTModelTester: def __init__( self, parent, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_token_type_ids = True self.use_input_mask = True self.use_labels = True self.use_mc_token_ids = True self.vocab_size = 99 self.hidden_size = 32 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.scope = None self.pad_token_id = self.vocab_size - 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) mc_token_ids = None if self.use_mc_token_ids: mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = OpenAIGPTConfig( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, # intermediate_size=self.intermediate_size, # hidden_act=self.hidden_act, # hidden_dropout_prob=self.hidden_dropout_prob, # attention_probs_dropout_prob=self.attention_probs_dropout_prob, n_positions=self.max_position_embeddings, # type_vocab_size=self.type_vocab_size, # initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, ) head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def create_and_check_openai_gpt_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = TFOpenAIGPTModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_openai_gpt_lm_head(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = TFOpenAIGPTLMHeadModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_openai_gpt_double_head( self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, *args ): model = TFOpenAIGPTDoubleHeadsModel(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "mc_token_ids": mc_token_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_choices, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.mc_logits.shape, (self.batch_size, self.num_choices)) def create_and_check_openai_gpt_for_sequence_classification( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): config.num_labels = self.num_labels sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "labels": sequence_labels, } model = TFOpenAIGPTForSequenceClassification(config) result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFOpenAIGPTModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (TFOpenAIGPTModel, TFOpenAIGPTLMHeadModel, TFOpenAIGPTDoubleHeadsModel, TFOpenAIGPTForSequenceClassification) if is_tf_available() else () ) all_generative_model_classes = ( (TFOpenAIGPTLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly pipeline_model_mapping = ( { "feature-extraction": TFOpenAIGPTModel, "text-classification": TFOpenAIGPTForSequenceClassification, "text-generation": TFOpenAIGPTLMHeadModel, "zero-shot": TFOpenAIGPTForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def setUp(self): self.model_tester = TFOpenAIGPTModelTester(self) self.config_tester = ConfigTester(self, config_class=OpenAIGPTConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_openai_gpt_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*config_and_inputs) def test_openai_gpt_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_lm_head(*config_and_inputs) def test_openai_gpt_double_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_double_head(*config_and_inputs) def test_openai_gpt_sequence_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFOpenAIGPTModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_tf class TFOPENAIGPTModelLanguageGenerationTest(unittest.TestCase): @slow def test_lm_generate_openai_gpt(self): model = TFOpenAIGPTLMHeadModel.from_pretrained("openai-community/openai-gpt") input_ids = tf.convert_to_tensor([[481, 4735, 544]], dtype=tf.int32) # the president is expected_output_ids = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the output_ids = model.generate(input_ids, do_sample=False) self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids)
transformers/tests/models/openai/test_modeling_tf_openai.py/0
{ "file_path": "transformers/tests/models/openai/test_modeling_tf_openai.py", "repo_id": "transformers", "token_count": 5057 }
397
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch PatchTST model. """ import inspect import random import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin TOLERANCE = 1e-4 if is_torch_available(): import torch from transformers import ( MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING, MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING, PatchTSTConfig, PatchTSTForClassification, PatchTSTForPrediction, PatchTSTForPretraining, PatchTSTForRegression, PatchTSTModel, ) @require_torch class PatchTSTModelTester: def __init__( self, parent, batch_size=13, prediction_length=7, context_length=14, patch_length=5, patch_stride=5, num_input_channels=1, num_time_features=1, is_training=True, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, distil=False, seed=42, num_targets=2, mask_type="random", random_mask_ratio=0, ): self.parent = parent self.batch_size = batch_size self.prediction_length = prediction_length self.context_length = context_length self.patch_length = patch_length self.patch_stride = patch_stride self.num_input_channels = num_input_channels self.num_time_features = num_time_features self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.mask_type = mask_type self.random_mask_ratio = random_mask_ratio self.seed = seed self.num_targets = num_targets self.distil = distil self.num_patches = (max(self.context_length, self.patch_length) - self.patch_length) // self.patch_stride + 1 # define seq_length so that it can pass the test_attention_outputs self.seq_length = self.num_patches def get_config(self): return PatchTSTConfig( prediction_length=self.prediction_length, patch_length=self.patch_length, patch_stride=self.patch_stride, num_input_channels=self.num_input_channels, d_model=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, context_length=self.context_length, activation_function=self.hidden_act, seed=self.seed, num_targets=self.num_targets, mask_type=self.mask_type, random_mask_ratio=self.random_mask_ratio, ) def prepare_patchtst_inputs_dict(self, config): _past_length = config.context_length # bs, num_input_channels, num_patch, patch_len # [bs x seq_len x num_input_channels] past_values = floats_tensor([self.batch_size, _past_length, self.num_input_channels]) future_values = floats_tensor([self.batch_size, config.prediction_length, self.num_input_channels]) inputs_dict = { "past_values": past_values, "future_values": future_values, } return inputs_dict def prepare_config_and_inputs(self): config = self.get_config() inputs_dict = self.prepare_patchtst_inputs_dict(config) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict @require_torch class PatchTSTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( PatchTSTModel, PatchTSTForPrediction, PatchTSTForPretraining, PatchTSTForClassification, PatchTSTForRegression, ) if is_torch_available() else () ) pipeline_model_mapping = {"feature-extraction": PatchTSTModel} if is_torch_available() else {} is_encoder_decoder = False test_pruning = False test_head_masking = False test_missing_keys = True test_torchscript = False test_inputs_embeds = False test_model_common_attributes = False test_resize_embeddings = True test_resize_position_embeddings = False test_mismatched_shapes = True test_model_parallel = False has_attentions = True def setUp(self): self.model_tester = PatchTSTModelTester(self) self.config_tester = ConfigTester( self, config_class=PatchTSTConfig, has_text_modality=False, prediction_length=self.model_tester.prediction_length, ) def test_config(self): self.config_tester.run_common_tests() def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) # if PatchTSTForPretraining if model_class == PatchTSTForPretraining: inputs_dict.pop("future_values") # else if classification model: elif model_class in get_values(MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING): rng = random.Random(self.model_tester.seed) labels = ids_tensor([self.model_tester.batch_size], self.model_tester.num_targets, rng=rng) inputs_dict["target_values"] = labels inputs_dict.pop("future_values") elif model_class in get_values(MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING): rng = random.Random(self.model_tester.seed) target_values = floats_tensor([self.model_tester.batch_size, self.model_tester.num_targets], rng=rng) inputs_dict["target_values"] = target_values inputs_dict.pop("future_values") return inputs_dict def test_save_load_strict(self): config, _ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers ) self.assertEqual(len(hidden_states), expected_num_layers) num_patch = self.model_tester.num_patches self.assertListEqual( list(hidden_states[0].shape[-2:]), [num_patch, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) @unittest.skip(reason="we have no tokens embeddings") def test_resize_tokens_embeddings(self): pass def test_model_main_input_name(self): model_signature = inspect.signature(getattr(PatchTSTModel, "forward")) # The main input is the name of the argument after `self` observed_main_input_name = list(model_signature.parameters.keys())[1] self.assertEqual(PatchTSTModel.main_input_name, observed_main_input_name) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] if model_class == PatchTSTForPretraining: expected_arg_names = [ "past_values", "past_observed_mask", ] elif model_class in get_values(MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING) or model_class in get_values( MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING ): expected_arg_names = ["past_values", "target_values", "past_observed_mask"] else: expected_arg_names = [ "past_values", "past_observed_mask", "future_values", ] expected_arg_names.extend( [ "output_hidden_states", "output_attentions", "return_dict", ] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) @is_flaky() def test_retain_grad_hidden_states_attentions(self): super().test_retain_grad_hidden_states_attentions() def prepare_batch(repo_id="hf-internal-testing/etth1-hourly-batch", file="train-batch.pt"): file = hf_hub_download(repo_id=repo_id, filename=file, repo_type="dataset") batch = torch.load(file, map_location=torch_device) return batch # Note: Pretrained model is not yet downloadable. @require_torch @slow class PatchTSTModelIntegrationTests(unittest.TestCase): # Publishing of pretrained weights are under internal review. Pretrained model is not yet downloadable. def test_pretrain_head(self): model = PatchTSTForPretraining.from_pretrained("namctin/patchtst_etth1_pretrain").to(torch_device) batch = prepare_batch() torch.manual_seed(0) with torch.no_grad(): output = model(past_values=batch["past_values"].to(torch_device)).prediction_output num_patch = ( max(model.config.context_length, model.config.patch_length) - model.config.patch_length ) // model.config.patch_stride + 1 expected_shape = torch.Size([64, model.config.num_input_channels, num_patch, model.config.patch_length]) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-0.0173]], [[-1.0379]], [[-0.1030]], [[0.3642]], [[0.1601]], [[-1.3136]], [[0.8780]]], device=torch_device, ) self.assertTrue(torch.allclose(output[0, :7, :1, :1], expected_slice, atol=TOLERANCE)) # Publishing of pretrained weights are under internal review. Pretrained model is not yet downloadable. def test_prediction_head(self): model = PatchTSTForPrediction.from_pretrained("namctin/patchtst_etth1_forecast").to(torch_device) batch = prepare_batch(file="test-batch.pt") torch.manual_seed(0) with torch.no_grad(): output = model( past_values=batch["past_values"].to(torch_device), future_values=batch["future_values"].to(torch_device), ).prediction_outputs expected_shape = torch.Size([64, model.config.prediction_length, model.config.num_input_channels]) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[0.5142, 0.6928, 0.6118, 0.5724, -0.3735, -0.1336, -0.7124]], device=torch_device, ) self.assertTrue(torch.allclose(output[0, :1, :7], expected_slice, atol=TOLERANCE)) def test_prediction_generation(self): model = PatchTSTForPrediction.from_pretrained("namctin/patchtst_etth1_forecast").to(torch_device) batch = prepare_batch(file="test-batch.pt") torch.manual_seed(0) with torch.no_grad(): outputs = model.generate(past_values=batch["past_values"].to(torch_device)) expected_shape = torch.Size((64, 1, model.config.prediction_length, model.config.num_input_channels)) self.assertEqual(outputs.sequences.shape, expected_shape) expected_slice = torch.tensor( [[0.4075, 0.3716, 0.4786, 0.2842, -0.3107, -0.0569, -0.7489]], device=torch_device, ) mean_prediction = outputs.sequences.mean(dim=1) self.assertTrue(torch.allclose(mean_prediction[0, -1:], expected_slice, atol=TOLERANCE)) def test_regression_generation(self): model = PatchTSTForRegression.from_pretrained("ibm/patchtst-etth1-regression-distribution").to(torch_device) batch = prepare_batch(repo_id="ibm/patchtst-etth1-test-data", file="regression_distribution_batch.pt") torch.manual_seed(0) model.eval() with torch.no_grad(): outputs = model.generate(past_values=batch["past_values"].to(torch_device)) expected_shape = torch.Size((64, model.config.num_parallel_samples, model.config.num_targets)) self.assertEqual(outputs.sequences.shape, expected_shape) expected_slice = torch.tensor( [[-0.08046409], [-0.06570087], [-0.28218266], [-0.20636195], [-0.11787311]], device=torch_device, ) mean_prediction = outputs.sequences.mean(dim=1) self.assertTrue(torch.allclose(mean_prediction[-5:], expected_slice, rtol=TOLERANCE))
transformers/tests/models/patchtst/test_modeling_patchtst.py/0
{ "file_path": "transformers/tests/models/patchtst/test_modeling_patchtst.py", "repo_id": "transformers", "token_count": 6808 }
398
# coding=utf-8 # Copyright 2018 Salesforce and HuggingFace Inc. team. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer from ...test_tokenization_common import TokenizerTesterMixin class PhobertTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "vinai/phobert-base" tokenizer_class = PhobertTokenizer test_rust_tokenizer = False def setUp(self): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt vocab = ["T@@", "i", "I", "R@@", "r", "e@@"] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "l à</w>"] self.special_tokens_map = {"unk_token": "<unk>"} self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: for token in vocab_tokens: fp.write(f"{token} {vocab_tokens[token]}\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) def get_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return PhobertTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self, tokenizer): input_text = "Tôi là VinAI Research" output_text = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>" return input_text, output_text def test_full_tokenizer(self): tokenizer = PhobertTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map) text = "Tôi là VinAI Research" bpe_tokens = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split() tokens = tokenizer.tokenize(text) print(tokens) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + [tokenizer.unk_token] input_bpe_tokens = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
transformers/tests/models/phobert/test_tokenization_phobert.py/0
{ "file_path": "transformers/tests/models/phobert/test_tokenization_phobert.py", "repo_id": "transformers", "token_count": 1190 }
399
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class RagRetrieverTest(TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() self.retrieval_vector_size = 8 # DPR tok vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] dpr_tokenizer_path = os.path.join(self.tmpdirname, "dpr_tokenizer") os.makedirs(dpr_tokenizer_path, exist_ok=True) self.vocab_file = os.path.join(dpr_tokenizer_path, DPR_VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) # BART tok vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] self.special_tokens_map = {"unk_token": "<unk>"} bart_tokenizer_path = os.path.join(self.tmpdirname, "bart_tokenizer") os.makedirs(bart_tokenizer_path, exist_ok=True) self.vocab_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) def get_dpr_tokenizer(self) -> DPRQuestionEncoderTokenizer: return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer")) def get_dpr_ctx_encoder_tokenizer(self) -> DPRContextEncoderTokenizer: return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer")) def get_bart_tokenizer(self) -> BartTokenizer: return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, "bart_tokenizer")) def tearDown(self): shutil.rmtree(self.tmpdirname) def get_dummy_dataset(self): dataset = Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size), 2 * np.ones(self.retrieval_vector_size)], } ) dataset.add_faiss_index("embeddings", string_factory="Flat", metric_type=faiss.METRIC_INNER_PRODUCT) return dataset def get_dummy_canonical_hf_index_retriever(self): dataset = self.get_dummy_dataset() config = RagConfig( retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), ) with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset: mock_load_dataset.return_value = dataset retriever = RagRetriever( config, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), ) return retriever def get_dummy_custom_hf_index_retriever(self, from_disk: bool): dataset = self.get_dummy_dataset() config = RagConfig( retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), index_name="custom", ) if from_disk: config.passages_path = os.path.join(self.tmpdirname, "dataset") config.index_path = os.path.join(self.tmpdirname, "index.faiss") dataset.get_index("embeddings").save(os.path.join(self.tmpdirname, "index.faiss")) dataset.drop_index("embeddings") dataset.save_to_disk(os.path.join(self.tmpdirname, "dataset")) del dataset retriever = RagRetriever( config, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), ) else: retriever = RagRetriever( config, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), index=CustomHFIndex(config.retrieval_vector_size, dataset), ) return retriever def test_canonical_hf_index_retriever_retrieve(self): n_docs = 1 retriever = self.get_dummy_canonical_hf_index_retriever() hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs) self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size)) self.assertEqual(len(doc_dicts), 2) self.assertEqual(sorted(doc_dicts[0]), ["embeddings", "id", "text", "title"]) self.assertEqual(len(doc_dicts[0]["id"]), n_docs) self.assertEqual(doc_dicts[0]["id"][0], "1") # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0], "0") # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist(), [[1], [0]]) def test_canonical_hf_index_retriever_save_and_from_pretrained(self): retriever = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset: mock_load_dataset.return_value = self.get_dummy_dataset() retriever.save_pretrained(tmp_dirname) retriever = RagRetriever.from_pretrained(tmp_dirname) self.assertIsInstance(retriever, RagRetriever) hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) out = retriever.retrieve(hidden_states, n_docs=1) self.assertTrue(out is not None) def test_custom_hf_index_retriever_retrieve(self): n_docs = 1 retriever = self.get_dummy_custom_hf_index_retriever(from_disk=False) hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs) self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size)) self.assertEqual(len(doc_dicts), 2) self.assertEqual(sorted(doc_dicts[0]), ["embeddings", "id", "text", "title"]) self.assertEqual(len(doc_dicts[0]["id"]), n_docs) self.assertEqual(doc_dicts[0]["id"][0], "1") # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0], "0") # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist(), [[1], [0]]) def test_custom_hf_index_retriever_save_and_from_pretrained(self): retriever = self.get_dummy_custom_hf_index_retriever(from_disk=False) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(tmp_dirname) retriever = RagRetriever.from_pretrained(tmp_dirname) self.assertIsInstance(retriever, RagRetriever) hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) out = retriever.retrieve(hidden_states, n_docs=1) self.assertTrue(out is not None) def test_custom_hf_index_retriever_retrieve_from_disk(self): n_docs = 1 retriever = self.get_dummy_custom_hf_index_retriever(from_disk=True) hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs) self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size)) self.assertEqual(len(doc_dicts), 2) self.assertEqual(sorted(doc_dicts[0]), ["embeddings", "id", "text", "title"]) self.assertEqual(len(doc_dicts[0]["id"]), n_docs) self.assertEqual(doc_dicts[0]["id"][0], "1") # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0], "0") # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist(), [[1], [0]]) def test_custom_hf_index_retriever_save_and_from_pretrained_from_disk(self): retriever = self.get_dummy_custom_hf_index_retriever(from_disk=True) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(tmp_dirname) retriever = RagRetriever.from_pretrained(tmp_dirname) self.assertIsInstance(retriever, RagRetriever) hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) out = retriever.retrieve(hidden_states, n_docs=1) self.assertTrue(out is not None) @require_torch @require_tokenizers @require_sentencepiece def test_hf_index_retriever_call(self): import torch n_docs = 1 retriever = self.get_dummy_canonical_hf_index_retriever() question_input_ids = [[5, 7], [10, 11]] hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) out = retriever(question_input_ids, hidden_states, prefix=retriever.config.generator.prefix, n_docs=n_docs) context_input_ids, context_attention_mask, retrieved_doc_embeds = ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size)) self.assertIsInstance(context_input_ids, list) self.assertIsInstance(context_attention_mask, list) self.assertIsInstance(retrieved_doc_embeds, np.ndarray) out = retriever( question_input_ids, hidden_states, prefix=retriever.config.generator.prefix, n_docs=n_docs, return_tensors="pt", ) context_input_ids, context_attention_mask, retrieved_doc_embeds, doc_ids = ( # noqa: F841 out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], out["doc_ids"], ) self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size)) self.assertIsInstance(context_input_ids, torch.Tensor) self.assertIsInstance(context_attention_mask, torch.Tensor) self.assertIsInstance(retrieved_doc_embeds, torch.Tensor) @require_torch @require_tokenizers @require_sentencepiece def test_custom_hf_index_end2end_retriever_call(self): context_encoder_tokenizer = self.get_dpr_ctx_encoder_tokenizer() n_docs = 1 retriever = self.get_dummy_custom_hf_index_retriever(from_disk=False) retriever.set_ctx_encoder_tokenizer(context_encoder_tokenizer) question_input_ids = [[5, 7], [10, 11]] hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) out = retriever(question_input_ids, hidden_states, prefix=retriever.config.generator.prefix, n_docs=n_docs) self.assertEqual( len(out), 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask")), True ) # check for doc token related keys in dictionary.
transformers/tests/models/rag/test_retrieval_rag.py/0
{ "file_path": "transformers/tests/models/rag/test_retrieval_rag.py", "repo_id": "transformers", "token_count": 6761 }
400
# coding=utf-8 # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the RemBert tokenizer. """ import tempfile import unittest from tests.test_tokenization_common import AddedToken, TokenizerTesterMixin from transformers import RemBertTokenizer, RemBertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers SENTENCEPIECE_UNDERLINE = "▁" SPIECE_UNDERLINE = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class RemBertTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "google/rembert" tokenizer_class = RemBertTokenizer rust_tokenizer_class = RemBertTokenizerFast space_between_special_tokens = True test_rust_tokenizer = True test_sentencepiece_ignore_case = True pre_trained_model_path = "google/rembert" def setUp(self): super().setUp() tokenizer = RemBertTokenizer(SAMPLE_VOCAB) tokenizer.save_pretrained(self.tmpdirname) # Copied from ReformerTokenizationTest.get_input_output_texts def get_input_output_texts(self, tokenizer): input_text = "this is a test" output_text = "this is a test" return input_text, output_text def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "<unk>") self.assertEqual(vocab_keys[1], "<s>") self.assertEqual(vocab_keys[5], "▁the") self.assertEqual(vocab_keys[2], "</s>") def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 1_000) def test_full_tokenizer(self): tokenizer = RemBertTokenizer(SAMPLE_VOCAB, keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [285, 46, 10, 170, 382], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [SPIECE_UNDERLINE + "I",SPIECE_UNDERLINE + "was",SPIECE_UNDERLINE + "b","or","n",SPIECE_UNDERLINE + "in",SPIECE_UNDERLINE + "","9","2","0","0","0",",",SPIECE_UNDERLINE + "and",SPIECE_UNDERLINE + "this",SPIECE_UNDERLINE + "is",SPIECE_UNDERLINE + "f","al","s","é",".",],) # fmt: skip ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual(ids, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4]) def test_encode_decode_round_trip(self): tokenizer = RemBertTokenizer(SAMPLE_VOCAB, keep_accents=True) text = "清水寺は京都にある。" tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, ["▁", "清水寺は京都にある。"]) encoded_string = tokenizer.encode(text) self.assertListEqual(encoded_string, [1000, 7, 0, 1001]) decode_text = tokenizer.convert_tokens_to_string(tokens) self.assertEquals(decode_text, text) text = "That's awesome! 🤩 #HuggingFace, 🌟 Have a great day! 🌈" tokens = tokenizer.tokenize(text) self.assertListEqual( tokens, ['▁That', "'", 's', '▁a', 'w', 'es', 'ome', '!', '▁', '🤩', '▁', '#', 'H', 'u', 'g', 'g', 'ing', 'F', 'a', 'ce', ',', '▁', '🌟', '▁H', 'a', 've', '▁a', '▁great', '▁day', '!', '▁', '🌈']) # fmt: skip decode_text = tokenizer.convert_tokens_to_string(tokens) self.assertEquals(decode_text, "That's awesome! 🤩 #HuggingFace, 🌟 Have a great day! 🌈") text = "In the sky up above" tokens = tokenizer._tokenize(text) self.assertListEqual(tokens, ["▁In", "▁the", "▁s", "k", "y", "▁up", "▁a", "b", "o", "ve"]) # fmt: skip encoded_string = tokenizer.encode(text) self.assertListEqual(encoded_string, [1000, 388, 5, 47, 45, 30, 118, 10, 65, 20, 123, 1001]) decode_text = tokenizer.convert_tokens_to_string(tokens) self.assertEqual(text, decode_text) text = "The cat. . Sat <s>.In a room" tokens = tokenizer.tokenize(text) self.assertListEqual( tokens, ["▁The", "▁c", "at", ".", "▁", ".", "▁S", "at", "▁", "<", "s", ">", ".", "I", "n", "▁a", "▁room"] ) encoded_string = tokenizer.encode(text) self.assertListEqual( encoded_string, [1000, 68, 69, 76, 4, 7, 4, 166, 76, 7, 0, 6, 0, 4, 100, 24, 10, 136, 1001] ) decode_text = tokenizer.convert_tokens_to_string(tokens) self.assertEqual(text, decode_text) text = "Invoice #12345, dated 2023-12-01, is due on 2024-01-15." tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, ['▁In', 'v', 'o', 'ic', 'e', '▁', '#', '1', '2', '34', '5', ',', '▁da', 'ted', '▁', '2', '0', '2', '3', '-', '1', '2', '-', '0', '1', ',', '▁is', '▁d', 'u', 'e', '▁on', '▁', '2', '0', '2', '4', '-', '0', '1', '-', '1', '5', '.']) # fmt: skip encoded_string = tokenizer.encode(text) self.assertListEqual(encoded_string, [1000, 388, 83, 20, 113, 15, 7, 0, 356, 602, 0, 555, 3, 417, 273, 7, 602, 347, 602, 0, 33, 356, 602, 33, 347, 356, 3, 46, 229, 51, 15, 59, 7, 602, 347, 602, 0, 33, 347, 356, 33, 356, 555, 4, 1001]) # fmt: skip decode_text = tokenizer.convert_tokens_to_string(tokens) self.assertEqual(text, decode_text) text = "Lorem ipsum dolor sit amet, consectetur adipiscing elit..." tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, ['▁', 'L', 'or', 'em', '▁', 'i', 'p', 's', 'um', '▁do', 'l', 'or', '▁sit', '▁am', 'e', 't', ',', '▁con', 'se', 'c', 'te', 't', 'ur', '▁a', 'd', 'i', 'p', 'is', 'c', 'ing', '▁', 'el', 'it', '.', '.', '.']) # fmt: skip encoded_string = tokenizer.encode(text) self.assertListEqual( encoded_string, [1000, 7, 279, 55, 300, 7, 23, 29, 6, 155, 92, 27, 55, 615, 219, 15, 14, 3, 247, 114, 28, 181, 14, 108, 10, 16, 23, 29, 125, 28, 17, 7, 168, 137, 4, 4, 4, 1001] ) # fmt: skip decode_text = tokenizer.convert_tokens_to_string(tokens) self.assertEqual(text, decode_text) # for multiple language in one sentence text = "Bonjour! Hello! こんにちは!" tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, ["▁B", "on", "j", "o", "ur", "!", "▁He", "ll", "o", "!", "▁", "こんにちは", "!"]) encoded_string = tokenizer.encode(text) self.assertListEqual(encoded_string, [1000, 295, 109, 999, 20, 108, 146, 156, 86, 20, 146, 7, 0, 146, 1001]) decode_text = tokenizer.convert_tokens_to_string(tokens) self.assertEqual(text, decode_text) text = "Extra spaces\tand\nline breaks\r\nshould be handled." tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, ['▁E', 'x', 't', 'r', 'a', '▁sp', 'a', 'ce', 's', '▁and', '▁line', '▁b', 're', 'a', 'k', 's', '▁should', '▁be', '▁hand', 'led', '.']) # fmt: skip encoded_string = tokenizer.encode(text) self.assertListEqual( encoded_string, [1000, 454, 297, 14, 35, 18, 277, 18, 133, 6, 12, 485, 84, 56, 18, 45, 6, 173, 36, 363, 338, 4, 1001], ) decode_text = tokenizer.convert_tokens_to_string(tokens) self.assertEqual("Extra spaces and line breaks should be handled.", decode_text) def test_sequence_builders(self): tokenizer = RemBertTokenizer(SAMPLE_VOCAB) text = tokenizer.encode("sequence builders") text_2 = tokenizer.encode("multi-sequence build") encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_2 + [ tokenizer.sep_token_id ] def test_added_tokens_serialization(self): # Utility to test the added vocab def _test_added_vocab_and_eos(expected, tokenizer_class, expected_eos, temp_dir): tokenizer = tokenizer_class.from_pretrained(temp_dir) self.assertTrue(str(expected_eos) not in tokenizer.additional_special_tokens) self.assertIn(new_eos, tokenizer.added_tokens_decoder.values()) self.assertEqual(tokenizer.added_tokens_decoder[tokenizer.eos_token_id], new_eos) self.assertDictEqual(expected, tokenizer.added_tokens_decoder) return tokenizer new_eos = AddedToken("[NEW_EOS]", rstrip=False, lstrip=True, normalized=False, special=True) new_masked_token = AddedToken("[MASK]", lstrip=True, rstrip=False, normalized=False) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): # Load a slow tokenizer from the hub, init with the new token for fast to also include it tokenizer = self.tokenizer_class.from_pretrained( pretrained_name, eos_token=new_eos, mask_token=new_masked_token ) EXPECTED_ADDED_TOKENS_DECODER = tokenizer.added_tokens_decoder with self.subTest("Hub -> Slow: Test loading a slow tokenizer from the hub)"): self.assertEqual(tokenizer._eos_token, new_eos) self.assertIn(new_eos, list(tokenizer.added_tokens_decoder.values())) with tempfile.TemporaryDirectory() as tmp_dir_2: tokenizer.save_pretrained(tmp_dir_2) with self.subTest( "Hub -> Slow -> Slow: Test saving this slow tokenizer and reloading it in the fast class" ): _test_added_vocab_and_eos( EXPECTED_ADDED_TOKENS_DECODER, self.tokenizer_class, new_eos, tmp_dir_2 ) if self.rust_tokenizer_class is not None: with self.subTest( "Hub -> Slow -> Fast: Test saving this slow tokenizer and reloading it in the fast class" ): tokenizer_fast = _test_added_vocab_and_eos( EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_2 ) with tempfile.TemporaryDirectory() as tmp_dir_3: tokenizer_fast.save_pretrained(tmp_dir_3) with self.subTest( "Hub -> Slow -> Fast -> Fast: Test saving this fast tokenizer and reloading it in the fast class" ): _test_added_vocab_and_eos( EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_3 ) with self.subTest( "Hub -> Slow -> Fast -> Slow: Test saving this slow tokenizer and reloading it in the slow class" ): _test_added_vocab_and_eos( EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_3 ) with self.subTest("Hub -> Fast: Test loading a fast tokenizer from the hub)"): if self.rust_tokenizer_class is not None: tokenizer_fast = self.rust_tokenizer_class.from_pretrained(pretrained_name, eos_token=new_eos) self.assertEqual(tokenizer_fast._eos_token, new_eos) self.assertIn(new_eos, list(tokenizer_fast.added_tokens_decoder.values())) # We can't test the following because for BC we kept the default rstrip lstrip in slow not fast. Will comment once normalization is alright with self.subTest("Hub -> Fast == Hub -> Slow: make sure slow and fast tokenizer match"): self.assertDictEqual(EXPECTED_ADDED_TOKENS_DECODER, tokenizer_fast.added_tokens_decoder) EXPECTED_ADDED_TOKENS_DECODER = tokenizer_fast.added_tokens_decoder with tempfile.TemporaryDirectory() as tmp_dir_4: tokenizer_fast.save_pretrained(tmp_dir_4) with self.subTest("Hub -> Fast -> Fast: saving Fast1 locally and loading"): _test_added_vocab_and_eos( EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_4 ) with self.subTest("Hub -> Fast -> Slow: saving Fast1 locally and loading"): _test_added_vocab_and_eos( EXPECTED_ADDED_TOKENS_DECODER, self.tokenizer_class, new_eos, tmp_dir_4 )
transformers/tests/models/rembert/test_tokenization_rembert.py/0
{ "file_path": "transformers/tests/models/rembert/test_tokenization_rembert.py", "repo_id": "transformers", "token_count": 6759 }
401
# coding=utf-8 # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class BertTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "weiweishi/roc-bert-base-zh" tokenizer_class = RoCBertTokenizer rust_tokenizer_class = None test_rust_tokenizer = False space_between_special_tokens = True from_pretrained_filter = filter_non_english def setUp(self): super().setUp() vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"] word_shape = {} word_pronunciation = {} for i, value in enumerate(vocab_tokens): word_shape[value] = i word_pronunciation[value] = i self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.word_shape_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["word_shape_file"]) self.word_pronunciation_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["word_pronunciation_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) with open(self.word_shape_file, "w", encoding="utf-8") as word_shape_writer: json.dump(word_shape, word_shape_writer, ensure_ascii=False) with open(self.word_pronunciation_file, "w", encoding="utf-8") as word_pronunciation_writer: json.dump(word_pronunciation, word_pronunciation_writer, ensure_ascii=False) def test_full_tokenizer(self): tokenizer = self.tokenizer_class(self.vocab_file, self.word_shape_file, self.word_pronunciation_file) tokens = tokenizer.tokenize("你好[SEP]你是谁") self.assertListEqual(tokens, ["你", "好", "[SEP]", "你", "是", "谁"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [5, 6, 2, 5, 7, 8]) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(tokens), [5, 6, 2, 5, 7, 8]) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(tokens), [5, 6, 2, 5, 7, 8]) # Copied from tests.models.bert.test_tokenization_bert.BertTokenizationTest.test_chinese with BasicTokenizer->RoCBertBasicTokenizer def test_chinese(self): tokenizer = RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz"), ["ah", "\u535A", "\u63A8", "zz"]) # Copied from tests.models.bert.test_tokenization_bert.BertTokenizationTest.test_basic_tokenizer_lower with BasicTokenizer->RoCBertBasicTokenizer def test_basic_tokenizer_lower(self): tokenizer = RoCBertBasicTokenizer(do_lower_case=True) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"]) # Copied from tests.models.bert.test_tokenization_bert.BertTokenizationTest.test_basic_tokenizer_lower_strip_accents_false with BasicTokenizer->RoCBertBasicTokenizer def test_basic_tokenizer_lower_strip_accents_false(self): tokenizer = RoCBertBasicTokenizer(do_lower_case=True, strip_accents=False) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["h\u00E9llo"]) # Copied from tests.models.bert.test_tokenization_bert.BertTokenizationTest.test_basic_tokenizer_lower_strip_accents_true with BasicTokenizer->RoCBertBasicTokenizer def test_basic_tokenizer_lower_strip_accents_true(self): tokenizer = RoCBertBasicTokenizer(do_lower_case=True, strip_accents=True) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"]) # Copied from tests.models.bert.test_tokenization_bert.BertTokenizationTest.test_basic_tokenizer_lower_strip_accents_default with BasicTokenizer->RoCBertBasicTokenizer def test_basic_tokenizer_lower_strip_accents_default(self): tokenizer = RoCBertBasicTokenizer(do_lower_case=True) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"]) # Copied from tests.models.bert.test_tokenization_bert.BertTokenizationTest.test_basic_tokenizer_no_lower with BasicTokenizer->RoCBertBasicTokenizer def test_basic_tokenizer_no_lower(self): tokenizer = RoCBertBasicTokenizer(do_lower_case=False) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["HeLLo", "!", "how", "Are", "yoU", "?"] ) # Copied from tests.models.bert.test_tokenization_bert.BertTokenizationTest.test_basic_tokenizer_no_lower_strip_accents_false with BasicTokenizer->RoCBertBasicTokenizer def test_basic_tokenizer_no_lower_strip_accents_false(self): tokenizer = RoCBertBasicTokenizer(do_lower_case=False, strip_accents=False) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HäLLo", "!", "how", "Are", "yoU", "?"] ) # Copied from tests.models.bert.test_tokenization_bert.BertTokenizationTest.test_basic_tokenizer_no_lower_strip_accents_true with BasicTokenizer->RoCBertBasicTokenizer def test_basic_tokenizer_no_lower_strip_accents_true(self): tokenizer = RoCBertBasicTokenizer(do_lower_case=False, strip_accents=True) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HaLLo", "!", "how", "Are", "yoU", "?"] ) # Copied from tests.models.bert.test_tokenization_bert.BertTokenizationTest.test_basic_tokenizer_respects_never_split_tokens with BasicTokenizer->RoCBertBasicTokenizer def test_basic_tokenizer_respects_never_split_tokens(self): tokenizer = RoCBertBasicTokenizer(do_lower_case=False, never_split=["[UNK]"]) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]"), ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) # Copied from tests.models.bert.test_tokenization_bert.BertTokenizationTest.test_wordpiece_tokenizer with WordpieceTokenizer->RoCBertWordpieceTokenizer def test_wordpiece_tokenizer(self): vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] vocab = {} for i, token in enumerate(vocab_tokens): vocab[token] = i tokenizer = RoCBertWordpieceTokenizer(vocab=vocab, unk_token="[UNK]") self.assertListEqual(tokenizer.tokenize(""), []) self.assertListEqual(tokenizer.tokenize("unwanted running"), ["un", "##want", "##ed", "runn", "##ing"]) self.assertListEqual(tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"]) # Copied from tests.models.bert.test_tokenization_bert.BertTokenizationTest.test_is_whitespace def test_is_whitespace(self): self.assertTrue(_is_whitespace(" ")) self.assertTrue(_is_whitespace("\t")) self.assertTrue(_is_whitespace("\r")) self.assertTrue(_is_whitespace("\n")) self.assertTrue(_is_whitespace("\u00A0")) self.assertFalse(_is_whitespace("A")) self.assertFalse(_is_whitespace("-")) # Copied from tests.models.bert.test_tokenization_bert.BertTokenizationTest.test_is_control def test_is_control(self): self.assertTrue(_is_control("\u0005")) self.assertFalse(_is_control("A")) self.assertFalse(_is_control(" ")) self.assertFalse(_is_control("\t")) self.assertFalse(_is_control("\r")) # Copied from tests.models.bert.test_tokenization_bert.BertTokenizationTest.test_is_punctuation def test_is_punctuation(self): self.assertTrue(_is_punctuation("-")) self.assertTrue(_is_punctuation("$")) self.assertTrue(_is_punctuation("`")) self.assertTrue(_is_punctuation(".")) self.assertFalse(_is_punctuation("A")) self.assertFalse(_is_punctuation(" ")) def test_clean_text(self): tokenizer = self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(t) for t in ["Test", "\xad", "test"]], [["[UNK]"], [], ["[UNK]"]]) if self.test_rust_tokenizer: rust_tokenizer = self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(t) for t in ["Test", "\xad", "test"]], [["[UNK]"], [], ["[UNK]"]] ) # Copied from tests.models.bert.test_tokenization_bert.BertTokenizationTest.test_offsets_with_special_characters def test_offsets_with_special_characters(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) sentence = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence." tokens = tokenizer_r.encode_plus( sentence, return_attention_mask=False, return_token_type_ids=False, return_offsets_mapping=True, add_special_tokens=True, ) do_lower_case = tokenizer_r.do_lower_case if hasattr(tokenizer_r, "do_lower_case") else False expected_results = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["input_ids"]) ) self.assertEqual([e[0] for e in expected_results], tokens["offset_mapping"]) # Copied from tests.models.bert.test_tokenization_bert.BertTokenizationTest.test_change_tokenize_chinese_chars def test_change_tokenize_chinese_chars(self): list_of_commun_chinese_char = ["的", "人", "有"] text_with_chinese_char = "".join(list_of_commun_chinese_char) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): kwargs["tokenize_chinese_chars"] = True tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) ids_without_spe_char_p = tokenizer_p.encode(text_with_chinese_char, add_special_tokens=False) ids_without_spe_char_r = tokenizer_r.encode(text_with_chinese_char, add_special_tokens=False) tokens_without_spe_char_r = tokenizer_r.convert_ids_to_tokens(ids_without_spe_char_r) tokens_without_spe_char_p = tokenizer_p.convert_ids_to_tokens(ids_without_spe_char_p) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(tokens_without_spe_char_p, list_of_commun_chinese_char) self.assertListEqual(tokens_without_spe_char_r, list_of_commun_chinese_char) kwargs["tokenize_chinese_chars"] = False tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) ids_without_spe_char_r = tokenizer_r.encode(text_with_chinese_char, add_special_tokens=False) ids_without_spe_char_p = tokenizer_p.encode(text_with_chinese_char, add_special_tokens=False) tokens_without_spe_char_r = tokenizer_r.convert_ids_to_tokens(ids_without_spe_char_r) tokens_without_spe_char_p = tokenizer_p.convert_ids_to_tokens(ids_without_spe_char_p) # it is expected that only the first Chinese character is not preceded by "##". expected_tokens = [ f"##{token}" if idx != 0 else token for idx, token in enumerate(list_of_commun_chinese_char) ] self.assertListEqual(tokens_without_spe_char_p, expected_tokens) self.assertListEqual(tokens_without_spe_char_r, expected_tokens) @slow def test_sequence_builders(self): tokenizer = self.tokenizer_class(self.vocab_file, self.word_shape_file, self.word_pronunciation_file) text = tokenizer.encode("你好", add_special_tokens=False) text_2 = tokenizer.encode("你是谁", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_2 + [2] def test_prepare_for_model(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): string_sequence = "你好,你是谁" tokens = tokenizer.tokenize(string_sequence) tokens_ids = tokenizer.convert_tokens_to_ids(tokens) tokens_shape_ids = tokenizer.convert_tokens_to_shape_ids(tokens) tokens_proun_ids = tokenizer.convert_tokens_to_pronunciation_ids(tokens) prepared_input_dict = tokenizer.prepare_for_model( tokens_ids, tokens_shape_ids, tokens_proun_ids, add_special_tokens=True ) input_dict = tokenizer.encode_plus(string_sequence, add_special_tokens=True) self.assertEqual(input_dict, prepared_input_dict)
transformers/tests/models/roc_bert/test_tokenization_roc_bert.py/0
{ "file_path": "transformers/tests/models/roc_bert/test_tokenization_roc_bert.py", "repo_id": "transformers", "token_count": 7488 }
402
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, PreTrainedTokenizerFast, SeamlessM4TTokenizer, SeamlessM4TTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.m2m_100.modeling_m2m_100 import shift_tokens_right EN_CODE = 256047 RO_CODE = 256145 SMALL_TRAINING_CORPUS = [ ["This is the first sentence.", "This is the second one."], ["This sentence (contains #) over symbols and numbers 12 3.", "But not this one."], ] @require_sentencepiece @require_tokenizers class SeamlessM4TTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "facebook/hf-seamless-m4t-medium" tokenizer_class = SeamlessM4TTokenizer rust_tokenizer_class = SeamlessM4TTokenizerFast test_rust_tokenizer = True test_sentencepiece = True from_pretrained_kwargs = {} def setUp(self): super().setUp() # We have a SentencePiece fixture for testing tokenizer = SeamlessM4TTokenizer(SAMPLE_VOCAB, keep_accents=True) tokenizer.save_pretrained(self.tmpdirname) def test_full_tokenizer(self): tokenizer = SeamlessM4TTokenizer(SAMPLE_VOCAB, keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual( ids, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] ], ) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) @unittest.skip("This fails currently and is a blocker. No idea why TODO @ylacombe") def test_maximum_encoding_length_single_input(self): tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): seq_0, ids = self.get_clean_sequence(tokenizer, max_length=20) sequence = tokenizer.encode(seq_0, add_special_tokens=False) total_length = len(sequence) self.assertGreater( total_length, 4, "Issue with the testing sequence, please update it, it's too short" ) # Test with max model input length model_max_length = tokenizer.model_max_length self.assertEqual(model_max_length, 100) seq_1 = seq_0 * model_max_length sequence1 = tokenizer(seq_1, add_special_tokens=False) total_length1 = len(sequence1["input_ids"]) self.assertGreater( total_length1, model_max_length, "Issue with the testing sequence, please update it, it's too short", ) # Simple padding_strategies = ( [False, True, "longest"] if tokenizer.pad_token and tokenizer.pad_token_id >= 0 else [False] ) for padding_state in padding_strategies: with self.subTest(f"Padding: {padding_state}"): for truncation_state in [True, "longest_first", "only_first"]: with self.subTest(f"Truncation: {truncation_state}"): output = tokenizer(seq_1, padding=padding_state, truncation=truncation_state) self.assertEqual(len(output["input_ids"]), model_max_length) output = tokenizer([seq_1], padding=padding_state, truncation=truncation_state) self.assertEqual(len(output["input_ids"][0]), model_max_length) # Simple with no truncation # Reset warnings tokenizer.deprecation_warnings = {} with self.assertLogs("transformers", level="WARNING") as cm: output = tokenizer(seq_1, padding=padding_state, truncation=False) self.assertNotEqual(len(output["input_ids"]), model_max_length) self.assertEqual(len(cm.records), 1) self.assertTrue( cm.records[0].message.startswith( "Token indices sequence length is longer than the specified maximum sequence length" " for this model" ) ) tokenizer.deprecation_warnings = {} with self.assertLogs("transformers", level="WARNING") as cm: output = tokenizer([seq_1], padding=padding_state, truncation=False) self.assertNotEqual(len(output["input_ids"][0]), model_max_length) self.assertEqual(len(cm.records), 1) self.assertTrue( cm.records[0].message.startswith( "Token indices sequence length is longer than the specified maximum sequence length" " for this model" ) ) # Overflowing tokens stride = 2 # modify padding because it's activated by default in seamlessM4T information = tokenizer( seq_0, max_length=total_length - 2, add_special_tokens=False, stride=stride, truncation="longest_first", return_overflowing_tokens=True, padding=False, # add_prefix_space=False, ) # Overflowing tokens are handled quite differently in slow and fast tokenizers if isinstance(tokenizer, PreTrainedTokenizerFast): truncated_sequence = information["input_ids"][0] overflowing_tokens = information["input_ids"][1] self.assertEqual(len(information["input_ids"]), 2) self.assertEqual(len(truncated_sequence), total_length - 2) self.assertEqual(truncated_sequence, sequence[:-2]) self.assertEqual(len(overflowing_tokens), 2 + stride) self.assertEqual(overflowing_tokens, sequence[-(2 + stride) :]) else: truncated_sequence = information["input_ids"] overflowing_tokens = information["overflowing_tokens"] self.assertEqual(len(truncated_sequence), total_length - 2) self.assertEqual(truncated_sequence, sequence[:-2]) self.assertEqual(len(overflowing_tokens), 2 + stride) self.assertEqual(overflowing_tokens, sequence[-(2 + stride) :]) @unittest.skip("By defaults, uses pad_to_multiple_of which breaks the test") def test_maximum_encoding_length_pair_input(self): pass def test_padding_to_multiple_of(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): if tokenizer.pad_token is None: self.skipTest("No padding token.") else: empty_tokens = tokenizer("", padding=True, pad_to_multiple_of=8) normal_tokens = tokenizer("This is a sample input", padding=True, pad_to_multiple_of=8) for key, value in empty_tokens.items(): self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") for key, value in normal_tokens.items(): self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") # default to padding=True so need to precise which padding is called normal_tokens = tokenizer("This", pad_to_multiple_of=8, padding=False) for key, value in normal_tokens.items(): self.assertNotEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") # Should also work with truncation normal_tokens = tokenizer("This", padding=True, truncation=True, pad_to_multiple_of=8) for key, value in normal_tokens.items(): self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") # truncation to something which is not a multiple of pad_to_multiple_of raises an error self.assertRaises( ValueError, tokenizer.__call__, "This", padding=True, truncation=True, max_length=12, pad_to_multiple_of=8, ) @require_torch def test_prepare_seq2seq_batch(self): if not self.test_seq2seq: return tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Longer text that will definitely require truncation. src_text = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for" " Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons" " will only worsen the violence and misery for millions of people.", ] tgt_text = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al" ' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi' " că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] try: batch = tokenizer.prepare_seq2seq_batch( src_texts=src_text, tgt_texts=tgt_text, max_length=3, max_target_length=10, return_tensors="pt", src_lang="eng", tgt_lang="ron", pad_to_multiple_of=None, ) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1], 3) self.assertEqual(batch.labels.shape[1], 10) # TODO: not working for tgt_text # max_target_length will default to max_length if not specified batch = tokenizer.prepare_seq2seq_batch( src_texts=src_text, tgt_texts=tgt_text, max_length=4, return_tensors="pt", pad_to_multiple_of=None, ) self.assertEqual(batch.input_ids.shape[1], 4) self.assertEqual(batch.labels.shape[1], 4) batch_encoder_only = tokenizer.prepare_seq2seq_batch( src_texts=src_text, max_length=4, max_target_length=10, return_tensors="pt", pad_to_multiple_of=None, ) self.assertEqual(batch_encoder_only.input_ids.shape[1], 4) self.assertEqual(batch_encoder_only.attention_mask.shape[1], 4) self.assertNotIn("decoder_input_ids", batch_encoder_only) @unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece.") def test_save_slow_from_fast_and_reload_fast(self): pass # Copied from tests.models.nllb.test_tokenization_nllb.NllbTokenizationTest.test_special_tokens_initialization def test_special_tokens_initialization(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): added_tokens = [AddedToken("<special>", lstrip=True)] tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) r_output = tokenizer_r.encode("Hey this is a <special> token") special_token_id = tokenizer_r.encode("<special>", add_special_tokens=False)[0] self.assertTrue(special_token_id in r_output) if self.test_slow_tokenizer: tokenizer_cr = self.rust_tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs, # , from_slow=True <- unfortunately too slow to convert ) tokenizer_p = self.tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) p_output = tokenizer_p.encode("Hey this is a <special> token") cr_output = tokenizer_cr.encode("Hey this is a <special> token") self.assertEqual(p_output, r_output) self.assertEqual(cr_output, r_output) self.assertTrue(special_token_id in p_output) self.assertTrue(special_token_id in cr_output) @unittest.skip( "encode_plus and batch_encode_plus are deprecated and __call__ do some processing, so we expect different results." ) def test_call(self): pass def test_training_new_tokenizer(self): # This feature only exists for fast tokenizers if not self.test_rust_tokenizer: return tokenizer = self.get_rust_tokenizer() new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100) # Test we can use the new tokenizer with something not seen during training inputs = new_tokenizer(["This is the first sentence", "This sentence is different 🤗."]) self.assertEqual(len(inputs["input_ids"]), 2) decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True) expected_result = "This is the first sentence" if tokenizer.backend_tokenizer.normalizer is not None: expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result) self.assertEqual(expected_result, decoded_input) # We check that the parameters of the tokenizer remained the same # Check we have the same number of added_tokens for both pair and non-pair inputs. # make sure it has the same prefix tokens first new_tokenizer.tgt_lang = tokenizer.tgt_lang tokenizer.tgt_lang = tokenizer.tgt_lang self.assertEqual(tokenizer.num_special_tokens_to_add(False), new_tokenizer.num_special_tokens_to_add(False)) self.assertEqual(tokenizer.num_special_tokens_to_add(True), new_tokenizer.num_special_tokens_to_add(True)) # Check we have the correct max_length for both pair and non-pair inputs. self.assertEqual(tokenizer.max_len_single_sentence, new_tokenizer.max_len_single_sentence) self.assertEqual(tokenizer.max_len_sentences_pair, new_tokenizer.max_len_sentences_pair) # Assert the set of special tokens match as we didn't ask to change them self.assertSequenceEqual( tokenizer.all_special_tokens_extended, new_tokenizer.all_special_tokens_extended, ) self.assertDictEqual(tokenizer.special_tokens_map, new_tokenizer.special_tokens_map) @unittest.skip("Fails because of the hack of adding <unk> in _tokenize") def test_pickle_subword_regularization_tokenizer(self): pass @unittest.skip("Fails because of the hack of adding <unk> in _tokenize") def test_subword_regularization_tokenizer(self): pass @require_torch @require_sentencepiece @require_tokenizers class SeamlessM4TDistilledIntegrationTest(unittest.TestCase): checkpoint_name = "facebook/hf-seamless-m4t-medium" src_text = [ " UN Chief Says There Is No Military Solution in Syria", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""", ] tgt_text = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] expected_src_tokens = [256047, 16297, 134408, 8165, 248066, 14734, 950, 1135, 105721, 3573, 83, 27352, 108, 49486, 3] # fmt: skip @classmethod def setUpClass(cls): cls.tokenizer: SeamlessM4TTokenizer = SeamlessM4TTokenizer.from_pretrained( cls.checkpoint_name, src_lang="eng", tgt_lang="ron" ) # cls.pad_token_id = 1 return cls def test_language_codes(self): self.assertEqual(self.tokenizer.convert_tokens_to_ids("__ace_Latn__"), 256002) self.assertEqual(self.tokenizer.convert_tokens_to_ids("__shn__"), 256152) self.assertEqual(self.tokenizer.convert_tokens_to_ids("__eng__"), 256047) self.assertEqual(self.tokenizer.convert_tokens_to_ids("__fra__"), 256057) self.assertEqual(self.tokenizer.convert_tokens_to_ids("__quy__"), 256144) def test_tokenizer_tgt_lang(self): ids = self.tokenizer(self.src_text, src_lang="fra").input_ids[0] self.assertListEqual(self.expected_src_tokens[1:], ids[1 : len(self.expected_src_tokens)]) self.assertEqual(256057, ids[0]) rest_ids = ids[len(self.expected_src_tokens) :] self.assertListEqual([0] * len(rest_ids), rest_ids) ids = self.tokenizer(self.src_text, src_lang="__shn__").input_ids[0] self.assertListEqual(self.expected_src_tokens[1:], ids[1 : len(self.expected_src_tokens)]) self.assertEqual(256152, ids[0]) # Copied from tests.models.nllb.test_tokenization_nllb.NllbDistilledIntegrationTest.test_enro_tokenizer_decode_ignores_language_codes def test_enro_tokenizer_decode_ignores_language_codes(self): self.assertIn(RO_CODE, self.tokenizer.all_special_ids) generated_ids = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047] # fmt: skip result = self.tokenizer.decode(generated_ids, skip_special_tokens=True) expected_romanian = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=True) self.assertEqual(result, expected_romanian) self.assertNotIn(self.tokenizer.eos_token, result) def test_enro_tokenizer_truncation(self): src_text = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0], str) desired_max_length = 10 ids = self.tokenizer(src_text, max_length=desired_max_length, truncation=True).input_ids[0] self.assertEqual(ids[-1], 3) self.assertEqual(ids[0], EN_CODE) self.assertEqual(len(ids), desired_max_length) # Copied from tests.models.nllb.test_tokenization_nllb.NllbDistilledIntegrationTest.test_special_tokens_unaffacted_by_save_load with fairseq_tokens_to_ids->additional_special_tokens, Nllb->SeamlessM4T, Dict->List def test_special_tokens_unaffacted_by_save_load(self): tmpdirname = tempfile.mkdtemp() original_special_tokens = self.tokenizer.additional_special_tokens self.tokenizer.save_pretrained(tmpdirname) new_tok = SeamlessM4TTokenizer.from_pretrained(tmpdirname) self.assertListEqual(new_tok.additional_special_tokens, original_special_tokens) @require_torch def test_enro_tokenizer_prepare_batch(self): batch = self.tokenizer( self.src_text, text_target=self.tgt_text, padding=True, truncation=True, max_length=len(self.expected_src_tokens), pad_to_multiple_of=None, return_tensors="pt", ) batch["decoder_input_ids"] = shift_tokens_right( batch["labels"], self.tokenizer.pad_token_id, self.tokenizer.convert_tokens_to_ids("__ron__") ) self.assertIsInstance(batch, BatchEncoding) self.assertEqual((2, 15), batch.input_ids.shape) self.assertEqual((2, 15), batch.attention_mask.shape) result = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens, result) self.assertEqual(RO_CODE, batch.decoder_input_ids[0, 0]) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens, [EN_CODE]) self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id]) def test_seq2seq_max_length(self): batch = self.tokenizer( self.src_text, padding=True, truncation=True, max_length=3, return_tensors="pt", pad_to_multiple_of=None ) targets = self.tokenizer( text_target=self.tgt_text, padding=True, truncation=True, max_length=10, return_tensors="pt" ) labels = targets["input_ids"] batch["decoder_input_ids"] = shift_tokens_right( labels, self.tokenizer.pad_token_id, decoder_start_token_id=self.tokenizer.convert_tokens_to_ids(self.tokenizer.tgt_lang), ) self.assertEqual(batch.input_ids.shape[1], 3) self.assertEqual(batch.decoder_input_ids.shape[1], 10) @require_torch def test_tokenizer_translation(self): inputs = self.tokenizer._build_translation_inputs( "A test", return_tensors="pt", src_lang="eng", tgt_lang="fra" ) self.assertEqual( nested_simplify(inputs), { # A, test, EOS, en_XX "input_ids": [[256047, 70, 7356, 3]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 256057, }, ) @require_sentencepiece @require_tokenizers class CommonSpmIntegrationTests(unittest.TestCase): """ A class that regroups important test to make sure that we properly handle the special tokens. """ @classmethod def setUpClass(cls): tokenizer = SeamlessM4TTokenizer(SAMPLE_VOCAB, extra_ids=0, add_bos_token=False, legacy=False) tokenizer.add_special_tokens({"additional_special_tokens": [AddedToken("<s>", rstrip=False, lstrip=False)]}) cls.tokenizer = tokenizer return cls def test_add_dummy_prefix(self): # make sure `'▁'` is prepended, and outputs match sp_model's # `sentencepiece.NormalizerSpec.add_dummy_prefix` attribute input_ids = self.tokenizer.encode(". Hello") self.assertEqual(input_ids, [3, 1, 8, 5, 157, 87, 21, 3]) sp_encode = self.tokenizer.sp_model.encode(". Hello") # [bos, lang_id, _] + offset_sp_encode self.assertEqual(input_ids[:-1], [3, 1, 8] + [i + self.tokenizer.fairseq_offset for i in sp_encode]) tokens = self.tokenizer.tokenize(". Hello") self.assertEqual(tokens, ["▁", ".", "▁He", "ll", "o"]) tokens = self.tokenizer.tokenize("") self.assertEqual(tokens, []) self.assertEqual(tokens, self.tokenizer.sp_model.encode("", out_type=str)) tokens = self.tokenizer.tokenize(" ") self.assertEqual(tokens, []) self.assertEqual(tokens, self.tokenizer.sp_model.encode(" ", out_type=str)) tokens = self.tokenizer.tokenize("▁") self.assertEqual(tokens, []) self.assertEqual(tokens, self.tokenizer.sp_model.encode("▁", out_type=str)) def test_remove_extra_whitespaces(self): # make sure the extra spaces are eaten. Since the sample vocab does not have # `______`. sentencepiece.NormalizerSpec.remove_extra_whitespaces attribute is set to False input_ids = self.tokenizer.encode(" . Hello") self.assertEqual(input_ids, [3, 1, 8, 5, 157, 87, 21, 3]) sp_encode = self.tokenizer.sp_model.encode(" . Hello") self.assertEqual([i - self.tokenizer.fairseq_offset for i in input_ids[2:-1]], [7] + sp_encode) tokens = self.tokenizer.tokenize(" . Hello") self.assertEqual(tokens, ["▁", ".", "▁He", "ll", "o"]) # `'▁'` is also a whitespace input_ids = self.tokenizer.encode("▁He is not") self.assertEqual(input_ids, [3, 1, 157, 47, 45, 3]) tokens = self.tokenizer.tokenize("▁He is not") sp_encode = [ self.tokenizer.sp_model.piece_to_id("▁He"), self.tokenizer.sp_model.piece_to_id("▁is"), self.tokenizer.sp_model.piece_to_id("▁not"), ] self.assertEqual([i - self.tokenizer.fairseq_offset for i in input_ids[2:-1]], sp_encode) self.assertEqual(tokens, ["▁He", "▁is", "▁not"]) # no extra space added input_ids = self.tokenizer.encode("▁He is not<s> ▁He") self.assertEqual(input_ids, [3, 1, 157, 47, 45, 2, 157, 3]) tokens = self.tokenizer.tokenize("▁He is not<s> ▁He") self.assertEqual(tokens, ["▁He", "▁is", "▁not", "<s>", "▁He"]) # spaces are eaten by spm + our strip # make sure that the output after the extra id is the same as if # extra_id was not there input_ids = self.tokenizer.encode("▁He is not ▁He") self.assertEqual(input_ids, [3, 1, 157, 47, 45, 157, 3]) tokens = self.tokenizer.tokenize("▁He is not ▁He") self.assertEqual(tokens, ["▁He", "▁is", "▁not", "▁He"]) # spaces are eaten by spm even if not start def test_character_after_special_token(self): # Make sure that `tokenizer.tokenize` is similar to # adding the equivalent special token to the vocab input_ids = self.tokenizer.encode("Hey <s>I") self.assertEqual(input_ids, [3, 1, 157, 31, 2, 101, 3]) sp_encode = self.tokenizer.sp_model.encode("Hey .I") # the last token besides eos should be 100 offset self.assertEqual(input_ids[-2] - self.tokenizer.fairseq_offset, sp_encode[-1]) tokens = self.tokenizer.tokenize("<s>I") self.assertEqual(tokens, ["<s>", "I"]) input_ids = self.tokenizer.encode("Hello, <s>,") self.assertEqual(input_ids, [3, 1, 157, 87, 21, 4, 2, 4, 3]) tokens = self.tokenizer.tokenize("Hello, <s>,") self.assertEqual(tokens, ["▁He", "ll", "o", ",", "<s>", ","]) def test_special_tokens_strip(self): input_ids = self.tokenizer.encode(" <s> ,") self.assertEqual(input_ids, [3, 1, 2, 8, 4, 3]) tokens = self.tokenizer.tokenize(" <s> ,") # spaces are eaten by rstrip / lstrip + spm sp_model.encode(" ") = [] self.assertEqual(tokens, ["<s>", "▁", ","]) input_ids = self.tokenizer.encode("No <s> ▁He") self.assertEqual(input_ids, [3, 1, 285, 2, 157, 3]) tokens = self.tokenizer.tokenize("No <s> ▁He") self.assertEqual(tokens, ["▁No", "<s>", "▁He"]) # spaces are eaten by rstrip / lstrip
transformers/tests/models/seamless_m4t/test_tokenization_seamless_m4t.py/0
{ "file_path": "transformers/tests/models/seamless_m4t/test_tokenization_seamless_m4t.py", "repo_id": "transformers", "token_count": 15190 }
403
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch SigLIP model. """ import inspect import os import tempfile import unittest import numpy as np import requests from transformers import SiglipConfig, SiglipTextConfig, SiglipVisionConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SiglipForImageClassification, SiglipModel, SiglipTextModel, SiglipVisionModel from transformers.models.siglip.modeling_siglip import SIGLIP_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SiglipProcessor class SiglipVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope # in ViT, the seq length equals the number of patches num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches # Copied from tests.models.clip.test_modeling_clip.CLIPVisionModelTester.prepare_config_and_inputs def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return SiglipVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = SiglipVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) # Copied from tests.models.clip.test_modeling_clip.CLIPVisionModelTester.prepare_config_and_inputs_for_common def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SiglipVisionModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as SIGLIP does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (SiglipVisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = SiglipVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=SiglipVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="SIGLIP does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="SiglipVisionModel does not support standalone training") def test_training(self): pass @unittest.skip(reason="SiglipVisionModel does not support standalone training") def test_training_gradient_checkpointing(self): pass @unittest.skip(reason="SiglipVisionModel does not support standalone training") def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip(reason="SiglipVisionModel does not support standalone training") def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="SiglipVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="SiglipVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @unittest.skip(reason="Siglip uses the same initialization scheme as the Flax original implementation") def test_initialization(self): pass @slow def test_model_from_pretrained(self): for model_name in SIGLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = SiglipVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) class SiglipTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTester.prepare_config_and_inputs def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return SiglipTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, input_ids, input_mask): model = SiglipTextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTester.prepare_config_and_inputs_for_common def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class SiglipTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (SiglipTextModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_head_masking = False model_split_percents = [0.5, 0.8, 0.9] # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTest.setUp with CLIP->Siglip def setUp(self): self.model_tester = SiglipTextModelTester(self) self.config_tester = ConfigTester(self, config_class=SiglipTextConfig, hidden_size=37) # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTest.test_config def test_config(self): self.config_tester.run_common_tests() # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTest.test_model def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTest.test_training def test_training(self): pass # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTest.test_training_gradient_checkpointing def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTest.test_training_gradient_checkpointing_use_reentrant def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTest.test_training_gradient_checkpointing_use_reentrant_false def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Siglip does not use inputs_embeds") # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTest.test_inputs_embeds def test_inputs_embeds(self): pass @unittest.skip(reason="SiglipTextModel has no base class and is not available in MODEL_MAPPING") # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTest.test_save_load_fast_init_from_base def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="SiglipTextModel has no base class and is not available in MODEL_MAPPING") # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTest.test_save_load_fast_init_to_base def test_save_load_fast_init_to_base(self): pass @unittest.skip(reason="Siglip uses the same initialization scheme as the Flax original implementation") def test_initialization(self): pass @slow def test_model_from_pretrained(self): for model_name in SIGLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = SiglipTextModel.from_pretrained(model_name) self.assertIsNotNone(model) class SiglipModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = SiglipTextModelTester(parent, **text_kwargs) self.vision_model_tester = SiglipVisionModelTester(parent, **vision_kwargs) self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test self.is_training = is_training # Copied from tests.models.clip.test_modeling_clip.CLIPModelTester.prepare_config_and_inputs def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return SiglipConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = SiglipModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "return_loss": False, } return config, inputs_dict @require_torch class SiglipModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (SiglipModel,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": SiglipModel} if is_torch_available() else {} fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.setUp with CLIP->Siglip def setUp(self): self.model_tester = SiglipModelTester(self) # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_model def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_hidden_states_output def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_inputs_embeds def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_retain_grad_hidden_states_attentions def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="SiglipModel does not have input/output embeddings") # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_model_common_attributes def test_model_common_attributes(self): pass @unittest.skip(reason="SiglipModel does not support training") def test_training(self): pass @unittest.skip(reason="SiglipModel does not support training") def test_training_gradient_checkpointing(self): pass @unittest.skip(reason="SiglipModel does not support training") def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip(reason="SiglipModel does not support training") def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Siglip uses the same initialization scheme as the Flax original implementation") def test_initialization(self): pass # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest._create_and_check_torchscript with CLIP->Siglip def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] # Siglip needs pixel_values traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_load_vision_text_config with CLIP->Siglip def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save SiglipConfig and check if we can load SiglipVisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = SiglipVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save SiglipConfig and check if we can load SiglipTextConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = SiglipTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_model_from_pretrained with CLIPModel->SiglipModel, CLIP->SIGLIP def test_model_from_pretrained(self): for model_name in SIGLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = SiglipModel.from_pretrained(model_name) self.assertIsNotNone(model) class SiglipForImageClassificationModelTester(SiglipModelTester): def __init__(self, parent): super().__init__(parent) self.batch_size = self.vision_model_tester.batch_size self.num_hidden_layers = self.vision_model_tester.num_hidden_layers self.hidden_size = self.vision_model_tester.hidden_size self.seq_length = self.vision_model_tester.seq_length def prepare_config_and_inputs(self): _, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SiglipForImageClassificationModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (SiglipForImageClassification,) if is_torch_available() else () pipeline_model_mapping = {"image-classification": SiglipForImageClassification} if is_torch_available() else {} fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False def setUp(self): self.model_tester = SiglipForImageClassificationModelTester(self) @unittest.skip(reason="SiglipForImageClassification does not support inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="SiglipForImageClassification does not support inputs_embeds") def test_model_common_attributes(self): pass @unittest.skip(reason="SiglipForImageClassification does not support gradient checkpointing yet") def test_training_gradient_checkpointing(self): pass @unittest.skip(reason="SiglipForImageClassification does not support gradient checkpointing yet") def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip(reason="SiglipForImageClassification does not support gradient checkpointing yet") def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Siglip uses the same initialization scheme as the Flax original implementation") def test_initialization(self): pass # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) return image @require_vision @require_torch class SiglipModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "google/siglip-base-patch16-224" model = SiglipModel.from_pretrained(model_name).to(torch_device) processor = SiglipProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor( text=["a photo of 2 cats", "a photo of 2 dogs"], images=image, padding="max_length", return_tensors="pt" ).to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) logits_per_image = outputs.logits_per_image logits_per_text = outputs.logits_per_text # verify the logits self.assertEqual( logits_per_image.shape, torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( logits_per_text.shape, torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) expected_logits = torch.tensor([[-0.7567, -10.3354]], device=torch_device) self.assertTrue(torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3)) # verify the probs probs = torch.sigmoid(logits_per_image) # these are the probabilities expected_probs = torch.tensor([[3.1937e-01, 3.2463e-05]], device=torch_device) self.assertTrue(torch.allclose(probs, expected_probs, atol=1e-3))
transformers/tests/models/siglip/test_modeling_siglip.py/0
{ "file_path": "transformers/tests/models/siglip/test_modeling_siglip.py", "repo_id": "transformers", "token_count": 11477 }
404
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch SpeechT5 model. """ import copy import inspect import tempfile import unittest from transformers import SpeechT5Config, SpeechT5HifiGanConfig from transformers.testing_utils import ( is_torch_available, require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from transformers.trainer_utils import set_seed from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SpeechT5ForSpeechToSpeech, SpeechT5ForSpeechToText, SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Model, SpeechT5Processor, ) def prepare_inputs_dict( config, input_ids=None, input_values=None, decoder_input_ids=None, decoder_input_values=None, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if input_ids is not None: encoder_dict = {"input_ids": input_ids} else: encoder_dict = {"input_values": input_values} if decoder_input_ids is not None: decoder_dict = {"decoder_input_ids": decoder_input_ids} else: decoder_dict = {"decoder_input_values": decoder_input_values} if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) return { **encoder_dict, **decoder_dict, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_torch class SpeechT5ModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=False, vocab_size=81, hidden_size=24, num_hidden_layers=2, num_attention_heads=2, intermediate_size=4, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.seq_length, self.hidden_size], scale=1.0) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) decoder_input_values = floats_tensor([self.batch_size, self.seq_length, self.hidden_size], scale=1.0) decoder_attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() inputs_dict = prepare_inputs_dict( config, input_values=input_values, decoder_input_values=decoder_input_values, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_config(self): return SpeechT5Config( vocab_size=self.vocab_size, hidden_size=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, ) def create_and_check_model_forward(self, config, inputs_dict): model = SpeechT5Model(config=config).to(torch_device).eval() input_values = inputs_dict["input_values"] attention_mask = inputs_dict["attention_mask"] decoder_input_values = inputs_dict["decoder_input_values"] result = model(input_values, attention_mask=attention_mask, decoder_input_values=decoder_input_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) @require_torch class SpeechT5ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (SpeechT5Model,) if is_torch_available() else () pipeline_model_mapping = ( {"automatic-speech-recognition": SpeechT5ForSpeechToText, "feature-extraction": SpeechT5Model} if is_torch_available() else {} ) is_encoder_decoder = True test_pruning = False test_headmasking = False test_resize_embeddings = False input_name = "input_values" def setUp(self): self.model_tester = SpeechT5ModelTester(self) self.config_tester = ConfigTester(self, config_class=SpeechT5Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = [ "input_values", "attention_mask", "decoder_input_values", "decoder_attention_mask", ] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) # this model has no inputs_embeds def test_inputs_embeds(self): pass # this model has no input embeddings def test_model_common_attributes(self): pass def test_retain_grad_hidden_states_attentions(self): # decoder cannot keep gradients pass @slow def test_torchscript_output_attentions(self): # disabled because this model doesn't have decoder_input_ids pass @slow def test_torchscript_output_hidden_state(self): # disabled because this model doesn't have decoder_input_ids pass @slow def test_torchscript_simple(self): # disabled because this model doesn't have decoder_input_ids pass @require_torch class SpeechT5ForSpeechToTextTester: def __init__( self, parent, batch_size=13, encoder_seq_length=1024, # speech is longer decoder_seq_length=7, is_training=False, hidden_size=24, num_hidden_layers=2, num_attention_heads=2, intermediate_size=4, conv_dim=(32, 32, 32), conv_stride=(4, 4, 4), conv_kernel=(8, 8, 8), conv_bias=False, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, vocab_size=81, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.vocab_size = vocab_size def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.encoder_seq_length], scale=1.0) attention_mask = random_attention_mask([self.batch_size, self.encoder_seq_length]) decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size).clamp(2) decoder_attention_mask = random_attention_mask([self.batch_size, self.decoder_seq_length]) config = self.get_config() inputs_dict = prepare_inputs_dict( config, input_values=input_values, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_config(self): return SpeechT5Config( hidden_size=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, vocab_size=self.vocab_size, ) def create_and_check_model_forward(self, config, inputs_dict): model = SpeechT5ForSpeechToText(config=config).to(torch_device).eval() input_values = inputs_dict["input_values"] attention_mask = inputs_dict["attention_mask"] decoder_input_ids = inputs_dict["decoder_input_ids"] result = model(input_values, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.decoder_seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = SpeechT5ForSpeechToText(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["decoder_input_ids"] attention_mask = inputs_dict["decoder_attention_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size).clamp(2) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2)) @require_torch class SpeechT5ForSpeechToTextTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (SpeechT5ForSpeechToText,) if is_torch_available() else () all_generative_model_classes = (SpeechT5ForSpeechToText,) if is_torch_available() else () is_encoder_decoder = True test_pruning = False test_headmasking = False input_name = "input_values" def setUp(self): self.model_tester = SpeechT5ForSpeechToTextTester(self) self.config_tester = ConfigTester(self, config_class=SpeechT5Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_model_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() subsampled_encoder_seq_length = model.speecht5.encoder.prenet._get_feat_extract_output_lengths( encoder_seq_length ) subsampled_encoder_key_length = model.speecht5.encoder.prenet._get_feat_extract_output_lengths( encoder_key_length ) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) out_len = len(outputs) correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, subsampled_encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = [ "input_values", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length else: seq_length = self.model_tester.seq_length subsampled_seq_length = model.speecht5.encoder.prenet._get_feat_extract_output_lengths(seq_length) self.assertListEqual( list(hidden_states[0].shape[-2:]), [subsampled_seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "conv.parametrizations.weight", "masked_spec_embed", "feature_projection.projection.weight", "feature_projection.projection.bias", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # this model has no inputs_embeds def test_inputs_embeds(self): pass def test_resize_embeddings_untied(self): original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return original_config.tie_word_embeddings = False # if model cannot untied embeddings -> leave test if original_config.tie_word_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config).to(torch_device) # if no output embeddings -> leave test if model.get_output_embeddings() is None: continue # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_vocab_size = config.vocab_size model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) def test_resize_tokens_embeddings(self): original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.vocab_size # Retrieve the embeddings and clone theme model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = model_embed.weight.clone() # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) # make sure that decoder_input_ids are resized if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that adding and removing tokens has not modified the first part of the embedding matrix. models_equal = True for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_retain_grad_hidden_states_attentions(self): # decoder cannot keep gradients pass # training is not supported yet def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) @require_torch @require_sentencepiece @require_tokenizers @slow class SpeechT5ForSpeechToTextIntegrationTests(unittest.TestCase): @cached_property def default_processor(self): return SpeechT5Processor.from_pretrained("microsoft/speecht5_asr") def _load_datasamples(self, num_samples): from datasets import load_dataset ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_generation_librispeech(self): model = SpeechT5ForSpeechToText.from_pretrained("microsoft/speecht5_asr") model.to(torch_device) processor = self.default_processor input_speech = self._load_datasamples(1) input_values = processor(audio=input_speech, return_tensors="pt").input_values.to(torch_device) generated_ids = model.generate(input_values) generated_transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) EXPECTED_TRANSCRIPTIONS = [ "mister quilter is the apostle of the middle classes and we are glad to welcome his gospel" ] self.assertListEqual(generated_transcript, EXPECTED_TRANSCRIPTIONS) def test_generation_librispeech_batched(self): model = SpeechT5ForSpeechToText.from_pretrained("microsoft/speecht5_asr") model.to(torch_device) processor = self.default_processor input_speech = self._load_datasamples(4) inputs = processor(audio=input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) generated_ids = model.generate(input_values, attention_mask=attention_mask) generated_transcripts = processor.batch_decode(generated_ids, skip_special_tokens=True) EXPECTED_TRANSCRIPTIONS = [ "mister quilter is the apostle of the middle classes and we are glad to welcome his gospel", "nor is mister quilter's manner less interesting than his matter", "he tells us that at this festive season of the year with christmas and rosebeaf looming before us" " similars drawn from eating and its results occur most readily to the mind", "he has grave doubts whether sir frederick latin's work is really greek after all and can discover in it" " but little of rocky ithica", ] self.assertListEqual(generated_transcripts, EXPECTED_TRANSCRIPTIONS) @require_torch class SpeechT5ForTextToSpeechTester: def __init__( self, parent, batch_size=13, encoder_seq_length=7, decoder_seq_length=1024, # speech is longer is_training=False, hidden_size=24, num_hidden_layers=2, num_attention_heads=2, intermediate_size=4, vocab_size=81, num_mel_bins=20, reduction_factor=2, speech_decoder_postnet_layers=2, speech_decoder_postnet_units=32, speech_decoder_prenet_units=32, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.vocab_size = vocab_size self.num_mel_bins = num_mel_bins self.reduction_factor = reduction_factor self.speech_decoder_postnet_layers = speech_decoder_postnet_layers self.speech_decoder_postnet_units = speech_decoder_postnet_units self.speech_decoder_prenet_units = speech_decoder_prenet_units def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size).clamp(2) attention_mask = random_attention_mask([self.batch_size, self.encoder_seq_length]) decoder_input_values = floats_tensor([self.batch_size, self.decoder_seq_length, self.num_mel_bins], scale=1.0) decoder_attention_mask = random_attention_mask([self.batch_size, self.decoder_seq_length]) config = self.get_config() inputs_dict = prepare_inputs_dict( config, input_ids=input_ids, decoder_input_values=decoder_input_values, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_config(self): return SpeechT5Config( hidden_size=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, vocab_size=self.vocab_size, num_mel_bins=self.num_mel_bins, reduction_factor=self.reduction_factor, speech_decoder_postnet_layers=self.speech_decoder_postnet_layers, speech_decoder_postnet_units=self.speech_decoder_postnet_units, speech_decoder_prenet_units=self.speech_decoder_prenet_units, ) def create_and_check_model_forward(self, config, inputs_dict): model = SpeechT5ForTextToSpeech(config=config).to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] decoder_input_values = inputs_dict["decoder_input_values"] result = model(input_ids, attention_mask=attention_mask, decoder_input_values=decoder_input_values) self.parent.assertEqual( result.spectrogram.shape, (self.batch_size, self.decoder_seq_length * self.reduction_factor, self.num_mel_bins), ) @require_torch class SpeechT5ForTextToSpeechTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (SpeechT5ForTextToSpeech,) if is_torch_available() else () all_generative_model_classes = (SpeechT5ForTextToSpeech,) if is_torch_available() else () is_encoder_decoder = True test_pruning = False test_headmasking = False input_name = "input_ids" def setUp(self): self.model_tester = SpeechT5ForTextToSpeechTester(self) self.config_tester = ConfigTester(self, config_class=SpeechT5Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_model_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) # skipped because there is always dropout in SpeechT5SpeechDecoderPrenet def test_decoder_model_past_with_large_inputs(self): pass # skipped because there is always dropout in SpeechT5SpeechDecoderPrenet def test_determinism(self): pass @unittest.skip("skipped because there is always dropout in SpeechT5SpeechDecoderPrenet") def test_batching_equivalence(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = [ "input_ids", "attention_mask", "decoder_input_values", "decoder_attention_mask", ] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # this model has no inputs_embeds def test_inputs_embeds(self): pass # skipped because there is always dropout in SpeechT5SpeechDecoderPrenet def test_model_outputs_equivalence(self): pass # skipped because there is always dropout in SpeechT5SpeechDecoderPrenet def test_save_load(self): pass def test_retain_grad_hidden_states_attentions(self): # decoder cannot keep gradients pass @slow def test_torchscript_output_attentions(self): # disabled because this model doesn't have decoder_input_ids pass @slow def test_torchscript_output_hidden_state(self): # disabled because this model doesn't have decoder_input_ids pass @slow def test_torchscript_simple(self): # disabled because this model doesn't have decoder_input_ids pass # training is not supported yet def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) @require_torch @require_sentencepiece @require_tokenizers class SpeechT5ForTextToSpeechIntegrationTests(unittest.TestCase): @cached_property def default_model(self): return SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts").to(torch_device) @cached_property def default_processor(self): return SpeechT5Processor.from_pretrained("microsoft/speecht5_tts") @cached_property def default_vocoder(self): return SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(torch_device) def test_generation(self): model = self.default_model processor = self.default_processor input_text = "Mister Quilter is the apostle of the middle classes, and we are glad to welcome his gospel." input_ids = processor(text=input_text, return_tensors="pt").input_ids.to(torch_device) speaker_embeddings = torch.zeros((1, 512), device=torch_device) # Generate speech and validate output dimensions set_seed(555) # Ensure deterministic behavior generated_speech = model.generate_speech(input_ids, speaker_embeddings=speaker_embeddings) num_mel_bins = model.config.num_mel_bins self.assertEqual( generated_speech.shape[1], num_mel_bins, "Generated speech output has an unexpected number of mel bins." ) # Validate generation with additional kwargs using model.generate; # same method than generate_speech set_seed(555) # Reset seed for consistent results generated_speech_with_generate = model.generate( input_ids, attention_mask=None, speaker_embeddings=speaker_embeddings ) self.assertEqual( generated_speech_with_generate.shape, generated_speech.shape, "Shape mismatch between generate_speech and generate methods.", ) def test_one_to_many_generation(self): model = self.default_model processor = self.default_processor vocoder = self.default_vocoder input_text = [ "mister quilter is the apostle of the middle classes and we are glad to welcome his gospel", "nor is mister quilter's manner less interesting than his matter", "he tells us that at this festive season of the year with christmas and rosebeaf looming before us", ] inputs = processor(text=input_text, padding="max_length", max_length=128, return_tensors="pt").to(torch_device) speaker_embeddings = torch.zeros((1, 512), device=torch_device) # Generate spectrograms set_seed(555) # Ensure deterministic behavior spectrograms, spectrogram_lengths = model.generate_speech( input_ids=inputs["input_ids"], speaker_embeddings=speaker_embeddings, attention_mask=inputs["attention_mask"], return_output_lengths=True, ) # Validate generated spectrogram dimensions expected_batch_size = len(input_text) num_mel_bins = model.config.num_mel_bins actual_batch_size, _, actual_num_mel_bins = spectrograms.shape self.assertEqual(actual_batch_size, expected_batch_size, "Batch size of generated spectrograms is incorrect.") self.assertEqual( actual_num_mel_bins, num_mel_bins, "Number of mel bins in batch generated spectrograms is incorrect." ) # Generate waveforms using the vocoder waveforms = vocoder(spectrograms) waveform_lengths = [int(waveforms.size(1) / max(spectrogram_lengths)) * i for i in spectrogram_lengths] # Validate generation with integrated vocoder set_seed(555) # Reset seed for consistent results waveforms_with_vocoder, waveform_lengths_with_vocoder = model.generate_speech( input_ids=inputs["input_ids"], speaker_embeddings=speaker_embeddings, attention_mask=inputs["attention_mask"], vocoder=vocoder, return_output_lengths=True, ) # Check consistency between waveforms generated with and without standalone vocoder self.assertTrue( torch.allclose(waveforms, waveforms_with_vocoder, atol=1e-8), "Mismatch in waveforms generated with and without the standalone vocoder.", ) self.assertEqual( waveform_lengths, waveform_lengths_with_vocoder, "Waveform lengths differ between standalone and integrated vocoder generation.", ) # Test generation consistency without returning lengths set_seed(555) # Reset seed for consistent results waveforms_with_vocoder_no_lengths = model.generate_speech( input_ids=inputs["input_ids"], speaker_embeddings=speaker_embeddings, attention_mask=inputs["attention_mask"], vocoder=vocoder, return_output_lengths=False, ) # Validate waveform consistency without length information self.assertTrue( torch.allclose(waveforms_with_vocoder_no_lengths, waveforms_with_vocoder, atol=1e-8), "Waveforms differ when generated with and without length information.", ) # Validate batch vs. single instance generation consistency for i, text in enumerate(input_text): inputs = processor(text=text, padding="max_length", max_length=128, return_tensors="pt").to(torch_device) set_seed(555) # Reset seed for consistent results spectrogram = model.generate_speech( input_ids=inputs["input_ids"], speaker_embeddings=speaker_embeddings, ) # Check spectrogram shape consistency self.assertEqual( spectrogram.shape, spectrograms[i][: spectrogram_lengths[i]].shape, "Mismatch in spectrogram shape between batch and single instance generation.", ) # Generate and validate waveform for single instance waveform = vocoder(spectrogram) self.assertEqual( waveform.shape, waveforms[i][: waveform_lengths[i]].shape, "Mismatch in waveform shape between batch and single instance generation.", ) # Check waveform consistency with integrated vocoder set_seed(555) # Reset seed for consistent results waveform_with_integrated_vocoder = model.generate_speech( input_ids=inputs["input_ids"], speaker_embeddings=speaker_embeddings, vocoder=vocoder, ) self.assertTrue( torch.allclose(waveform, waveform_with_integrated_vocoder, atol=1e-8), "Mismatch in waveform between standalone and integrated vocoder for single instance generation.", ) def test_batch_generation(self): model = self.default_model processor = self.default_processor vocoder = self.default_vocoder input_text = [ "mister quilter is the apostle of the middle classes and we are glad to welcome his gospel", "nor is mister quilter's manner less interesting than his matter", "he tells us that at this festive season of the year with christmas and rosebeaf looming before us", ] inputs = processor(text=input_text, padding="max_length", max_length=128, return_tensors="pt").to(torch_device) set_seed(555) # Ensure deterministic behavior speaker_embeddings = torch.randn((len(input_text), 512), device=torch_device) # Generate spectrograms set_seed(555) # Reset seed for consistent results spectrograms, spectrogram_lengths = model.generate_speech( input_ids=inputs["input_ids"], speaker_embeddings=speaker_embeddings, attention_mask=inputs["attention_mask"], return_output_lengths=True, ) # Validate generated spectrogram dimensions expected_batch_size = len(input_text) num_mel_bins = model.config.num_mel_bins actual_batch_size, _, actual_num_mel_bins = spectrograms.shape self.assertEqual( actual_batch_size, expected_batch_size, "Batch size of generated spectrograms is incorrect.", ) self.assertEqual( actual_num_mel_bins, num_mel_bins, "Number of mel bins in batch generated spectrograms is incorrect.", ) # Generate waveforms using the vocoder waveforms = vocoder(spectrograms) waveform_lengths = [int(waveforms.size(1) / max(spectrogram_lengths)) * i for i in spectrogram_lengths] # Validate generation with integrated vocoder set_seed(555) # Reset seed for consistent results waveforms_with_vocoder, waveform_lengths_with_vocoder = model.generate_speech( input_ids=inputs["input_ids"], speaker_embeddings=speaker_embeddings, attention_mask=inputs["attention_mask"], vocoder=vocoder, return_output_lengths=True, ) # Check consistency between waveforms generated with and without standalone vocoder self.assertTrue( torch.allclose(waveforms, waveforms_with_vocoder, atol=1e-8), "Mismatch in waveforms generated with and without the standalone vocoder.", ) self.assertEqual( waveform_lengths, waveform_lengths_with_vocoder, "Waveform lengths differ between standalone and integrated vocoder generation.", ) # Test generation consistency without returning lengths set_seed(555) # Reset seed for consistent results waveforms_with_vocoder_no_lengths = model.generate_speech( input_ids=inputs["input_ids"], speaker_embeddings=speaker_embeddings, attention_mask=inputs["attention_mask"], vocoder=vocoder, return_output_lengths=False, ) # Validate waveform consistency without length information self.assertTrue( torch.allclose(waveforms_with_vocoder_no_lengths, waveforms_with_vocoder, atol=1e-8), "Waveforms differ when generated with and without length information.", ) # Validate batch vs. single instance generation consistency for i, text in enumerate(input_text): inputs = processor(text=text, padding="max_length", max_length=128, return_tensors="pt").to(torch_device) current_speaker_embedding = speaker_embeddings[i].unsqueeze(0) set_seed(555) # Reset seed for consistent results spectrogram = model.generate_speech( input_ids=inputs["input_ids"], speaker_embeddings=current_speaker_embedding, ) # Check spectrogram shape consistency self.assertEqual( spectrogram.shape, spectrograms[i][: spectrogram_lengths[i]].shape, "Mismatch in spectrogram shape between batch and single instance generation.", ) # Generate and validate waveform for single instance waveform = vocoder(spectrogram) self.assertEqual( waveform.shape, waveforms[i][: waveform_lengths[i]].shape, "Mismatch in waveform shape between batch and single instance generation.", ) # Check waveform consistency with integrated vocoder set_seed(555) # Reset seed for consistent results waveform_with_integrated_vocoder = model.generate_speech( input_ids=inputs["input_ids"], speaker_embeddings=current_speaker_embedding, vocoder=vocoder, ) self.assertTrue( torch.allclose(waveform, waveform_with_integrated_vocoder, atol=1e-8), "Mismatch in waveform between standalone and integrated vocoder for single instance generation.", ) @require_torch class SpeechT5ForSpeechToSpeechTester: def __init__( self, parent, batch_size=13, encoder_seq_length=1024, # speech is longer decoder_seq_length=1024, is_training=False, hidden_size=24, num_hidden_layers=2, num_attention_heads=2, intermediate_size=4, conv_dim=(32, 32, 32), conv_stride=(4, 4, 4), conv_kernel=(8, 8, 8), conv_bias=False, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, vocab_size=81, num_mel_bins=20, reduction_factor=2, speech_decoder_postnet_layers=2, speech_decoder_postnet_units=32, speech_decoder_prenet_units=32, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.vocab_size = vocab_size self.num_mel_bins = num_mel_bins self.reduction_factor = reduction_factor self.speech_decoder_postnet_layers = speech_decoder_postnet_layers self.speech_decoder_postnet_units = speech_decoder_postnet_units self.speech_decoder_prenet_units = speech_decoder_prenet_units def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.encoder_seq_length], scale=1.0) attention_mask = random_attention_mask([self.batch_size, self.encoder_seq_length]) decoder_input_values = floats_tensor([self.batch_size, self.decoder_seq_length, self.num_mel_bins], scale=1.0) decoder_attention_mask = random_attention_mask([self.batch_size, self.decoder_seq_length]) config = self.get_config() inputs_dict = prepare_inputs_dict( config, input_values=input_values, decoder_input_values=decoder_input_values, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_config(self): return SpeechT5Config( hidden_size=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, vocab_size=self.vocab_size, num_mel_bins=self.num_mel_bins, reduction_factor=self.reduction_factor, speech_decoder_postnet_layers=self.speech_decoder_postnet_layers, speech_decoder_postnet_units=self.speech_decoder_postnet_units, speech_decoder_prenet_units=self.speech_decoder_prenet_units, ) def create_and_check_model_forward(self, config, inputs_dict): model = SpeechT5ForSpeechToSpeech(config=config).to(torch_device).eval() input_values = inputs_dict["input_values"] attention_mask = inputs_dict["attention_mask"] decoder_input_values = inputs_dict["decoder_input_values"] result = model(input_values, attention_mask=attention_mask, decoder_input_values=decoder_input_values) self.parent.assertEqual( result.spectrogram.shape, (self.batch_size, self.decoder_seq_length * self.reduction_factor, self.num_mel_bins), ) @require_torch class SpeechT5ForSpeechToSpeechTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (SpeechT5ForSpeechToSpeech,) if is_torch_available() else () all_generative_model_classes = (SpeechT5ForSpeechToSpeech,) if is_torch_available() else () is_encoder_decoder = True test_pruning = False test_headmasking = False test_resize_embeddings = False input_name = "input_values" def setUp(self): self.model_tester = SpeechT5ForSpeechToSpeechTester(self) self.config_tester = ConfigTester(self, config_class=SpeechT5Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_model_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) # skipped because there is always dropout in SpeechT5SpeechDecoderPrenet def test_decoder_model_past_with_large_inputs(self): pass # skipped because there is always dropout in SpeechT5SpeechDecoderPrenet def test_determinism(self): pass @unittest.skip("skipped because there is always dropout in SpeechT5SpeechDecoderPrenet") def test_batching_equivalence(self): pass def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() subsampled_encoder_seq_length = model.speecht5.encoder.prenet._get_feat_extract_output_lengths( encoder_seq_length ) subsampled_encoder_key_length = model.speecht5.encoder.prenet._get_feat_extract_output_lengths( encoder_key_length ) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) out_len = len(outputs) correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, subsampled_encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = [ "input_values", "attention_mask", "decoder_input_values", "decoder_attention_mask", ] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length else: seq_length = self.model_tester.seq_length subsampled_seq_length = model.speecht5.encoder.prenet._get_feat_extract_output_lengths(seq_length) self.assertListEqual( list(hidden_states[0].shape[-2:]), [subsampled_seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "conv.parametrizations.weight", "masked_spec_embed", "feature_projection.projection.weight", "feature_projection.projection.bias", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # this model has no inputs_embeds def test_inputs_embeds(self): pass # this model has no input embeddings def test_model_common_attributes(self): pass # skipped because there is always dropout in SpeechT5SpeechDecoderPrenet def test_model_outputs_equivalence(self): pass def test_retain_grad_hidden_states_attentions(self): # decoder cannot keep gradients pass # skipped because there is always dropout in SpeechT5SpeechDecoderPrenet def test_save_load(self): pass @slow def test_torchscript_output_attentions(self): # disabled because this model doesn't have decoder_input_ids pass @slow def test_torchscript_output_hidden_state(self): # disabled because this model doesn't have decoder_input_ids pass @slow def test_torchscript_simple(self): # disabled because this model doesn't have decoder_input_ids pass # training is not supported yet def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) @require_torch @require_sentencepiece @require_tokenizers @slow class SpeechT5ForSpeechToSpeechIntegrationTests(unittest.TestCase): @cached_property def default_processor(self): return SpeechT5Processor.from_pretrained("microsoft/speecht5_vc") def _load_datasamples(self, num_samples): from datasets import load_dataset ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_generation_librispeech(self): model = SpeechT5ForSpeechToSpeech.from_pretrained("microsoft/speecht5_vc") model.to(torch_device) processor = self.default_processor input_speech = self._load_datasamples(1) input_values = processor(audio=input_speech, return_tensors="pt").input_values.to(torch_device) speaker_embeddings = torch.zeros((1, 512), device=torch_device) generated_speech = model.generate_speech(input_values, speaker_embeddings=speaker_embeddings) self.assertEqual(generated_speech.shape[1], model.config.num_mel_bins) self.assertGreaterEqual(generated_speech.shape[0], 300) self.assertLessEqual(generated_speech.shape[0], 310) class SpeechT5HifiGanTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=False, num_mel_bins=20, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.num_mel_bins = num_mel_bins def prepare_config_and_inputs(self): input_values = floats_tensor([self.seq_length, self.num_mel_bins], scale=1.0) config = self.get_config() return config, input_values def get_config(self): return SpeechT5HifiGanConfig( model_in_dim=self.num_mel_bins, upsample_initial_channel=32, ) def create_and_check_model(self, config, input_values): model = SpeechT5HifiGan(config=config).to(torch_device).eval() result = model(input_values) self.parent.assertEqual(result.shape, (self.seq_length * 256,)) def prepare_config_and_inputs_for_common(self): config, input_values = self.prepare_config_and_inputs() inputs_dict = {"spectrogram": input_values} return config, inputs_dict @require_torch class SpeechT5HifiGanTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (SpeechT5HifiGan,) if is_torch_available() else () test_torchscript = False test_pruning = False test_resize_embeddings = False test_resize_position_embeddings = False test_head_masking = False test_mismatched_shapes = False test_missing_keys = False test_model_parallel = False is_encoder_decoder = False has_attentions = False input_name = "spectrogram" def setUp(self): self.model_tester = SpeechT5HifiGanTester(self) self.config_tester = ConfigTester(self, config_class=SpeechT5HifiGanConfig) def test_config(self): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_from_and_save_pretrained_subfolder() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = [ "spectrogram", ] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) # this model does not output hidden states def test_hidden_states_output(self): pass # skip def test_initialization(self): pass # this model has no inputs_embeds def test_inputs_embeds(self): pass # this model has no input embeddings def test_model_common_attributes(self): pass # skip as this model doesn't support all arguments tested def test_model_outputs_equivalence(self): pass # this model does not output hidden states def test_retain_grad_hidden_states_attentions(self): pass # skip because it fails on automapping of SpeechT5HifiGanConfig def test_save_load_fast_init_from_base(self): pass # skip because it fails on automapping of SpeechT5HifiGanConfig def test_save_load_fast_init_to_base(self): pass def test_batched_inputs_outputs(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() batched_inputs = inputs["spectrogram"].unsqueeze(0).repeat(2, 1, 1) with torch.no_grad(): batched_outputs = model(batched_inputs.to(torch_device)) self.assertEqual( batched_inputs.shape[0], batched_outputs.shape[0], msg="Got different batch dims for input and output" ) def test_unbatched_inputs_outputs(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(inputs["spectrogram"].to(torch_device)) self.assertTrue(outputs.dim() == 1, msg="Got un-batched inputs but batched output")
transformers/tests/models/speecht5/test_modeling_speecht5.py/0
{ "file_path": "transformers/tests/models/speecht5/test_modeling_speecht5.py", "repo_id": "transformers", "token_count": 35195 }
405
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch SwiftFormer model. """ import copy import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class SwiftFormerModelTester: def __init__( self, parent, batch_size=13, num_channels=3, is_training=True, use_labels=True, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, image_size=224, num_labels=3, layer_depths=[1, 1, 1, 1], embed_dims=[16, 16, 32, 32], ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_labels = num_labels self.image_size = image_size self.layer_depths = layer_depths self.embed_dims = embed_dims def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return SwiftFormerConfig( depths=self.layer_depths, embed_dims=self.embed_dims, mlp_ratio=4, downsamples=[True, True, True, True], hidden_act="gelu", num_labels=self.num_labels, down_patch_size=3, down_stride=2, down_pad=1, drop_rate=0.0, drop_path_rate=0.0, use_layer_scale=True, layer_scale_init_value=1e-5, ) def create_and_check_model(self, config, pixel_values, labels): model = SwiftFormerModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.embed_dims[-1], 7, 7)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.num_labels model = SwiftFormerForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) model = SwiftFormerForImageClassification(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): (config, pixel_values, labels) = self.prepare_config_and_inputs() inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SwiftFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as SwiftFormer does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( {"image-feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification} if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = SwiftFormerModelTester(self) self.config_tester = ConfigTester( self, config_class=SwiftFormerConfig, has_text_modality=False, hidden_size=37, num_attention_heads=12, num_hidden_layers=12, ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="SwiftFormer does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = SwiftFormerModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip(reason="SwiftFormer does not output attentions") def test_attention_outputs(self): pass def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_stages = 8 self.assertEqual(len(hidden_states), expected_num_stages) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(hidden_states)): self.assertEqual( hidden_states[i].shape, torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ), ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_initialization(self): def _config_zero_init(config): configs_no_init = copy.deepcopy(config) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(configs_no_init, key, 1e-10) if isinstance(getattr(configs_no_init, key, None), PretrainedConfig): no_init_subconfig = _config_zero_init(getattr(configs_no_init, key)) setattr(configs_no_init, key, no_init_subconfig) return configs_no_init config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9) / 1e9).round().item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class SwiftFormerModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([[-2.1703e00, 2.1107e00, -2.0811e00]]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
transformers/tests/models/swiftformer/test_modeling_swiftformer.py/0
{ "file_path": "transformers/tests/models/swiftformer/test_modeling_swiftformer.py", "repo_id": "transformers", "token_count": 4796 }
406
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the TVLT image processor. """ import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import TvltImageProcessor def prepare_video(image_processor_tester, width=10, height=10, numpify=False, torchify=False): """This function prepares a video as a list of PIL images/NumPy arrays/PyTorch tensors.""" video = [] for i in range(image_processor_tester.num_frames): video.append(np.random.randint(255, size=(image_processor_tester.num_channels, width, height), dtype=np.uint8)) if not numpify and not torchify: # PIL expects the channel dimension as last dimension video = [Image.fromarray(np.moveaxis(frame, 0, -1)) for frame in video] if torchify: video = [torch.from_numpy(frame) for frame in video] return video def prepare_video_inputs(image_processor_tester, equal_resolution=False, numpify=False, torchify=False): """This function prepares a batch of videos: a list of list of PIL images, or a list of list of numpy arrays if one specifies numpify=True, or a list of list of PyTorch tensors if one specifies torchify=True. One can specify whether the videos are of the same resolution or not. """ assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" video_inputs = [] for i in range(image_processor_tester.batch_size): if equal_resolution: width = height = image_processor_tester.max_resolution else: width, height = np.random.choice( np.arange(image_processor_tester.min_resolution, image_processor_tester.max_resolution), 2 ) video = prepare_video( image_processor_tester=image_processor_tester, width=width, height=height, numpify=numpify, torchify=torchify, ) video_inputs.append(video) return video_inputs class TvltImageProcessorTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, num_frames=4, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], do_center_crop=True, crop_size=None, ): size = size if size is not None else {"shortest_edge": 18} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.num_frames = num_frames self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_center_crop = do_center_crop self.crop_size = crop_size def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class TvltImageProcessorTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = TvltImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = TvltImageProcessorTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processor = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processor, "image_mean")) self.assertTrue(hasattr(image_processor, "image_std")) self.assertTrue(hasattr(image_processor, "do_normalize")) self.assertTrue(hasattr(image_processor, "do_resize")) self.assertTrue(hasattr(image_processor, "do_center_crop")) self.assertTrue(hasattr(image_processor, "size")) def test_call_pil(self): # Initialize image_processor image_processor = self.image_processing_class(**self.image_processor_dict) # create random PIL videos video_inputs = prepare_video_inputs(self.image_processor_tester, equal_resolution=False) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], Image.Image) # Test not batched input encoded_videos = image_processor(video_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) # Test batched encoded_videos = image_processor(video_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) def test_call_numpy(self): # Initialize image_processor image_processor = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors video_inputs = prepare_video_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], np.ndarray) # Test not batched input encoded_videos = image_processor(video_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) # Test batched encoded_videos = image_processor(video_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) def test_call_numpy_4_channels(self): # Initialize image_processor image_processor = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors self.image_processor_tester.num_channels = 4 video_inputs = prepare_video_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], np.ndarray) # Test not batched input encoded_videos = image_processor( video_inputs[0], return_tensors="pt", input_data_format="channels_first", image_mean=0, image_std=1 ).pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) # Test batched encoded_videos = image_processor( video_inputs, return_tensors="pt", input_data_format="channels_first", image_mean=0, image_std=1 ).pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) self.image_processor_tester.num_channels = 3 def test_call_pytorch(self): # Initialize image_processor image_processor = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors video_inputs = prepare_video_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], torch.Tensor) # Test not batched input encoded_videos = image_processor(video_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) # Test batched encoded_videos = image_processor(video_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), )
transformers/tests/models/tvlt/test_image_processor_tvlt.py/0
{ "file_path": "transformers/tests/models/tvlt/test_image_processor_tvlt.py", "repo_id": "transformers", "token_count": 5010 }
407
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the TensorFlow ViTMAE model. """ from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel from transformers.modeling_tf_utils import keras if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class TFViTMAEModelTester: def __init__( self, parent, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, num_labels=3, mask_ratio=0.6, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.mask_ratio = mask_ratio self.scope = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = int(math.ceil((1 - mask_ratio) * (num_patches + 1))) def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return ViTMAEConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, decoder_hidden_size=self.hidden_size, decoder_num_hidden_layers=self.num_hidden_layers, decoder_num_attention_heads=self.num_attention_heads, decoder_intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, mask_ratio=self.mask_ratio, ) def create_and_check_model(self, config, pixel_values, labels): model = TFViTMAEModel(config=config) result = model(pixel_values, training=False) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_pretraining(self, config, pixel_values, labels): model = TFViTMAEForPreTraining(config) result = model(pixel_values, training=False) # expected sequence length = num_patches num_patches = (self.image_size // self.patch_size) ** 2 expected_num_channels = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels)) # test greyscale images config.num_channels = 1 model = TFViTMAEForPreTraining(config) pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values, training=False) expected_num_channels = self.patch_size**2 self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, pixel_values, labels) = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFViTMAEModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as ViTMAE does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () pipeline_model_mapping = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {} test_pruning = False test_onnx = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = TFViTMAEModelTester(self) self.config_tester = ConfigTester(self, config_class=ViTMAEConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="ViTMAE does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (keras.layers.Layer)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, keras.layers.Layer)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) # overwrite from common since TFViTMAEForPretraining has random masking, we need to fix the noise # to generate masks during test def test_keyword_and_dict_args(self): # make the mask reproducible np.random.seed(2) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() num_patches = int((config.image_size // config.patch_size) ** 2) noise = np.random.uniform(size=(self.model_tester.batch_size, num_patches)) for model_class in self.all_model_classes: model = model_class(config) inputs = self._prepare_for_class(inputs_dict, model_class) outputs_dict = model(inputs, noise=noise) inputs_keywords = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) outputs_keywords = model(**inputs_keywords, noise=noise) output_dict = outputs_dict[0].numpy() output_keywords = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords)), 1e-6) # overwrite from common since TFViTMAEForPretraining has random masking, we need to fix the noise # to generate masks during test def test_numpy_arrays_inputs(self): # make the mask reproducible np.random.seed(2) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() num_patches = int((config.image_size // config.patch_size) ** 2) noise = np.random.uniform(size=(self.model_tester.batch_size, num_patches)) def prepare_numpy_arrays(inputs_dict): inputs_np_dict = {} for k, v in inputs_dict.items(): if tf.is_tensor(v): inputs_np_dict[k] = v.numpy() else: inputs_np_dict[k] = np.array(k) return inputs_np_dict for model_class in self.all_model_classes: model = model_class(config) inputs = self._prepare_for_class(inputs_dict, model_class) inputs_np = prepare_numpy_arrays(inputs) output_for_dict_input = model(inputs_np, noise=noise) output_for_kw_input = model(**inputs_np, noise=noise) self.assert_outputs_same(output_for_dict_input, output_for_kw_input) # overwrite from common since TFViTMAEForPretraining has random masking, we need to fix the noise # to generate masks during test def check_pt_tf_models(self, tf_model, pt_model, tf_inputs_dict): # make masks reproducible np.random.seed(2) num_patches = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2) noise = np.random.uniform(size=(self.model_tester.batch_size, num_patches)) tf_noise = tf.constant(noise) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument tf_inputs_dict["noise"] = tf_noise super().check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) # overwrite from common since TFViTMAEForPretraining has random masking, we need to fix the noise # to generate masks during test def test_keras_save_load(self): # make mask reproducible np.random.seed(2) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() tf_main_layer_classes = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__),) for module_member_name in dir(module) if module_member_name.endswith("MainLayer") # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("MainLayer")] == model_class.__name__[: -len("Model")] for module_member in (getattr(module, module_member_name),) if isinstance(module_member, type) and keras.layers.Layer in module_member.__bases__ and getattr(module_member, "_keras_serializable", False) } num_patches = int((config.image_size // config.patch_size) ** 2) noise = np.random.uniform(size=(self.model_tester.batch_size, num_patches)) noise = tf.convert_to_tensor(noise) inputs_dict.update({"noise": noise}) for main_layer_class in tf_main_layer_classes: main_layer = main_layer_class(config) symbolic_inputs = { name: keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items() } model = keras.Model(symbolic_inputs, outputs=main_layer(symbolic_inputs)) outputs = model(inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: filepath = os.path.join(tmpdirname, "keras_model.h5") model.save(filepath) model = keras.models.load_model(filepath, custom_objects={main_layer_class.__name__: main_layer_class}) assert isinstance(model, keras.Model) after_outputs = model(inputs_dict) self.assert_outputs_same(after_outputs, outputs) # overwrite from common since TFViTMAEForPretraining has random masking, we need to fix the noise # to generate masks during test @slow def test_save_load(self): # make mask reproducible np.random.seed(2) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() num_patches = int((config.image_size // config.patch_size) ** 2) noise = np.random.uniform(size=(self.model_tester.batch_size, num_patches)) for model_class in self.all_model_classes: model = model_class(config) model_input = self._prepare_for_class(inputs_dict, model_class) outputs = model(model_input, noise=noise) if model_class.__name__ == "TFViTMAEModel": out_2 = outputs.last_hidden_state.numpy() out_2[np.isnan(out_2)] = 0 else: out_2 = outputs.logits.numpy() out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=False) model = model_class.from_pretrained(tmpdirname) after_outputs = model(model_input, noise=noise) if model_class.__name__ == "TFViTMAEModel": out_1 = after_outputs["last_hidden_state"].numpy() out_1[np.isnan(out_1)] = 0 else: out_1 = after_outputs["logits"].numpy() out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) # overwrite from common since TFViTMAEForPretraining has random masking, we need to fix the noise # to generate masks during test def test_save_load_config(self): # make mask reproducible np.random.seed(2) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() num_patches = int((config.image_size // config.patch_size) ** 2) noise = np.random.uniform(size=(self.model_tester.batch_size, num_patches)) for model_class in self.all_model_classes: model = model_class(config) model_inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(model_inputs, noise=noise) model_config = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(model_config) new_model = model_class.from_config(model.get_config()) # make sure it also accepts a normal config _ = model_class.from_config(model.config) _ = new_model(model_inputs) # Build model new_model.set_weights(model.get_weights()) after_outputs = new_model(model_inputs, noise=noise) self.assert_outputs_same(after_outputs, outputs) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def test_determinism(self): pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""") def test_model_outputs_equivalence(self): pass @slow def test_model_from_pretrained(self): model = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224") self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf @require_vision class TFViTMAEModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ViTImageProcessor.from_pretrained("facebook/vit-mae-base") if is_vision_available() else None @slow def test_inference_for_pretraining(self): # make random mask reproducible across the PT and TF model np.random.seed(2) model = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base") image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="tf") # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) vit_mae_config = ViTMAEConfig() num_patches = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2) noise = np.random.uniform(size=(1, num_patches)) # forward pass outputs = model(**inputs, noise=noise) # verify the logits expected_shape = tf.convert_to_tensor([1, 196, 768]) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = tf.convert_to_tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3], expected_slice, atol=1e-4)
transformers/tests/models/vit_mae/test_modeling_tf_vit_mae.py/0
{ "file_path": "transformers/tests/models/vit_mae/test_modeling_tf_vit_mae.py", "repo_id": "transformers", "token_count": 7979 }
408
# coding=utf-8 # Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import random import unittest import numpy as np from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, Wav2Vec2Config, Wav2Vec2FeatureExtractor from transformers.testing_utils import require_torch, slow from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin global_rng = random.Random() # Copied from tests.models.whisper.test_feature_extraction_whisper.floats_list def floats_list(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = global_rng values = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values class Wav2Vec2FeatureExtractionTester(unittest.TestCase): def __init__( self, parent, batch_size=7, min_seq_length=400, max_seq_length=2000, feature_size=1, padding_value=0.0, sampling_rate=16000, return_attention_mask=True, do_normalize=True, ): self.parent = parent self.batch_size = batch_size self.min_seq_length = min_seq_length self.max_seq_length = max_seq_length self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) self.feature_size = feature_size self.padding_value = padding_value self.sampling_rate = sampling_rate self.return_attention_mask = return_attention_mask self.do_normalize = do_normalize def prepare_feat_extract_dict(self): return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def prepare_inputs_for_common(self, equal_length=False, numpify=False): def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) if equal_length: speech_inputs = floats_list((self.batch_size, self.max_seq_length)) else: # make sure that inputs increase in size speech_inputs = [ _flatten(floats_list((x, self.feature_size))) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) ] if numpify: speech_inputs = [np.asarray(x) for x in speech_inputs] return speech_inputs class Wav2Vec2FeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = Wav2Vec2FeatureExtractor def setUp(self): self.feat_extract_tester = Wav2Vec2FeatureExtractionTester(self) def _check_zero_mean_unit_variance(self, input_vector): self.assertTrue(np.all(np.mean(input_vector, axis=0) < 1e-3)) self.assertTrue(np.all(np.abs(np.var(input_vector, axis=0) - 1) < 1e-3)) def test_call(self): # Tests that all call wrap to encode_plus and batch_encode_plus feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) # create three inputs of length 800, 1000, and 1200 speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] # Test not batched input encoded_sequences_1 = feat_extract(speech_inputs[0], return_tensors="np").input_values encoded_sequences_2 = feat_extract(np_speech_inputs[0], return_tensors="np").input_values self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) # Test batched encoded_sequences_1 = feat_extract(speech_inputs, return_tensors="np").input_values encoded_sequences_2 = feat_extract(np_speech_inputs, return_tensors="np").input_values for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) # Test 2-D numpy arrays are batched. speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)] np_speech_inputs = np.asarray(speech_inputs) encoded_sequences_1 = feat_extract(speech_inputs, return_tensors="np").input_values encoded_sequences_2 = feat_extract(np_speech_inputs, return_tensors="np").input_values for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) def test_zero_mean_unit_variance_normalization_np(self): feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] paddings = ["longest", "max_length", "do_not_pad"] max_lengths = [None, 1600, None] for max_length, padding in zip(max_lengths, paddings): processed = feat_extract(speech_inputs, padding=padding, max_length=max_length, return_tensors="np") input_values = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800]) self.assertTrue(input_values[0][800:].sum() < 1e-6) self._check_zero_mean_unit_variance(input_values[1][:1000]) self.assertTrue(input_values[0][1000:].sum() < 1e-6) self._check_zero_mean_unit_variance(input_values[2][:1200]) def test_zero_mean_unit_variance_normalization(self): feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) lengths = range(800, 1400, 200) speech_inputs = [floats_list((1, x))[0] for x in lengths] paddings = ["longest", "max_length", "do_not_pad"] max_lengths = [None, 1600, None] for max_length, padding in zip(max_lengths, paddings): processed = feat_extract(speech_inputs, max_length=max_length, padding=padding) input_values = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800]) self._check_zero_mean_unit_variance(input_values[1][:1000]) self._check_zero_mean_unit_variance(input_values[2][:1200]) def test_zero_mean_unit_variance_normalization_trunc_np_max_length(self): feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] processed = feat_extract( speech_inputs, truncation=True, max_length=1000, padding="max_length", return_tensors="np" ) input_values = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800]) self._check_zero_mean_unit_variance(input_values[1]) self._check_zero_mean_unit_variance(input_values[2]) def test_zero_mean_unit_variance_normalization_trunc_np_longest(self): feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] processed = feat_extract( speech_inputs, truncation=True, max_length=1000, padding="longest", return_tensors="np" ) input_values = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800]) self._check_zero_mean_unit_variance(input_values[1, :1000]) self._check_zero_mean_unit_variance(input_values[2]) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1000)) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] processed = feat_extract( speech_inputs, truncation=True, max_length=2000, padding="longest", return_tensors="np" ) input_values = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800]) self._check_zero_mean_unit_variance(input_values[1, :1000]) self._check_zero_mean_unit_variance(input_values[2]) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1200)) @require_torch def test_double_precision_pad(self): import torch feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) np_speech_inputs = np.random.rand(100).astype(np.float64) py_speech_inputs = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: np_processed = feature_extractor.pad([{"input_values": inputs}], return_tensors="np") self.assertTrue(np_processed.input_values.dtype == np.float32) pt_processed = feature_extractor.pad([{"input_values": inputs}], return_tensors="pt") self.assertTrue(pt_processed.input_values.dtype == torch.float32) @slow @require_torch def test_pretrained_checkpoints_are_set_correctly(self): # this test makes sure that models that are using # group norm don't have their feature extractor return the # attention_mask for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST: config = Wav2Vec2Config.from_pretrained(model_id) feat_extract = Wav2Vec2FeatureExtractor.from_pretrained(model_id) # only "layer" feature extraction norm should make use of # attention_mask self.assertEqual(feat_extract.return_attention_mask, config.feat_extract_norm == "layer")
transformers/tests/models/wav2vec2/test_feature_extraction_wav2vec2.py/0
{ "file_path": "transformers/tests/models/wav2vec2/test_feature_extraction_wav2vec2.py", "repo_id": "transformers", "token_count": 4411 }
409
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch WavLM model. """ import math import unittest import pytest from datasets import load_dataset from transformers import WavLMConfig, is_torch_available from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( Wav2Vec2FeatureExtractor, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, ) class WavLMModelTester: def __init__( self, parent, batch_size=13, seq_length=1024, # speech is longer is_training=False, hidden_size=16, feat_extract_norm="group", feat_extract_dropout=0.0, feat_extract_activation="gelu", conv_dim=(32, 32, 32), conv_stride=(4, 4, 4), conv_kernel=(8, 8, 8), conv_bias=False, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, num_hidden_layers=2, num_attention_heads=2, hidden_dropout_prob=0.1, # this is most likely not correctly set yet intermediate_size=20, layer_norm_eps=1e-5, hidden_act="gelu", initializer_range=0.02, vocab_size=32, do_stable_layer_norm=False, tdnn_dim=(32, 32), tdnn_kernel=(3, 3), tdnn_dilation=(1, 1), xvector_output_dim=32, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_dropout = feat_extract_dropout self.feat_extract_activation = feat_extract_activation self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.intermediate_size = intermediate_size self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.tdnn_dim = tdnn_dim self.tdnn_kernel = tdnn_kernel self.tdnn_dilation = tdnn_dilation self.xvector_output_dim = xvector_output_dim self.scope = scope output_seq_length = self.seq_length for kernel, stride in zip(self.conv_kernel, self.conv_stride): output_seq_length = (output_seq_length - (kernel - 1)) / stride self.output_seq_length = int(math.ceil(output_seq_length)) self.encoder_seq_length = self.output_seq_length def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.seq_length], scale=1.0) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() return config, input_values, attention_mask def get_config(self): return WavLMConfig( hidden_size=self.hidden_size, feat_extract_norm=self.feat_extract_norm, feat_extract_dropout=self.feat_extract_dropout, feat_extract_activation=self.feat_extract_activation, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_dropout_prob=self.hidden_dropout_prob, intermediate_size=self.intermediate_size, layer_norm_eps=self.layer_norm_eps, hidden_act=self.hidden_act, initializer_range=self.initializer_range, vocab_size=self.vocab_size, tdnn_dim=self.tdnn_dim, tdnn_kernel=self.tdnn_kernel, tdnn_dilation=self.tdnn_dilation, xvector_output_dim=self.xvector_output_dim, ) def create_and_check_model(self, config, input_values, attention_mask): model = WavLMModel(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_batch_inference(self, config, input_values, *args): # test does not pass for models making use of `group_norm` # check: https://github.com/pytorch/fairseq/issues/3227 model = WavLMModel(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0.0 batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state for i in range(input_values.shape[0]): input_slice = input_values[i : i + 1, : input_lengths[i]] output = model(input_slice).last_hidden_state batch_output = batch_outputs[i : i + 1, : output.shape[1]] self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3)) def check_ctc_loss(self, config, input_values, *args): model = WavLMForCTC(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 model.config.ctc_loss_reduction = "sum" sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() model.config.ctc_loss_reduction = "mean" mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() self.parent.assertTrue(isinstance(sum_loss, float)) self.parent.assertTrue(isinstance(mean_loss, float)) def check_seq_classifier_loss(self, config, input_values, *args): model = WavLMForSequenceClassification(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 masked_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() unmasked_loss = model(input_values, labels=labels).loss.item() self.parent.assertTrue(isinstance(masked_loss, float)) self.parent.assertTrue(isinstance(unmasked_loss, float)) self.parent.assertTrue(masked_loss != unmasked_loss) def check_ctc_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = WavLMForCTC(config=config) model.to(torch_device) model.train() # freeze feature encoder model.freeze_feature_encoder() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 if max_length_labels[i] < labels.shape[-1]: # it's important that we make sure that target lengths are at least # one shorter than logit lengths to prevent -inf labels[i, max_length_labels[i] - 1 :] = -100 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_seq_classifier_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = WavLMForSequenceClassification(config=config) model.to(torch_device) model.train() # freeze everything but the classification head model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_labels_out_of_vocab(self, config, input_values, *args): model = WavLMForCTC(config) model.to(torch_device) model.train() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100) with pytest.raises(ValueError): model(input_values, labels=labels) def prepare_config_and_inputs_for_common(self): config, input_values, attention_mask = self.prepare_config_and_inputs() inputs_dict = {"input_values": input_values, "attention_mask": attention_mask} return config, inputs_dict @require_torch class WavLMModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (WavLMForCTC, WavLMModel, WavLMForAudioFrameClassification, WavLMForSequenceClassification, WavLMForXVector) if is_torch_available() else () ) pipeline_model_mapping = ( { "audio-classification": WavLMForSequenceClassification, "automatic-speech-recognition": WavLMForCTC, "feature-extraction": WavLMModel, } if is_torch_available() else {} ) test_pruning = False test_headmasking = False def setUp(self): self.model_tester = WavLMModelTester(self) self.config_tester = ConfigTester(self, config_class=WavLMConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) # WavLM has no inputs_embeds def test_inputs_embeds(self): pass # `input_ids` is renamed to `input_values` def test_forward_signature(self): pass # WavLM cannot resize token embeddings # since it has no tokens embeddings def test_resize_tokens_embeddings(self): pass # WavLM has no inputs_embeds # and thus the `get_input_embeddings` fn # is not implemented def test_model_common_attributes(self): pass # WavLM uses PyTorch's multi-head-attention class # and thus can't retain gradients on attentions def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) # set layer drop to 0 model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] hidden_states.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "conv.parametrizations.weight", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "feature_projection.projection.weight", "feature_projection.projection.bias", "label_embeddings_concat", "rel_attn_embed", "objective.weight", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "codevectors") and module.codevectors is not None: module.codevectors.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) @unittest.skip(reason="Feed forward chunking is not implemented for WavLM") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): model = WavLMModel.from_pretrained("microsoft/wavlm-base-plus") self.assertIsNotNone(model) @require_torch @require_torchaudio @slow class WavLMModelIntegrationTest(unittest.TestCase): def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").filter( lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)] )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def _load_superb(self, task, num_samples): ds = load_dataset("anton-l/superb_dummy", task, split="test") return ds[:num_samples] def test_inference_base(self): model = WavLMModel.from_pretrained("microsoft/wavlm-base-plus").to(torch_device) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( "microsoft/wavlm-base-plus", return_attention_mask=True ) input_speech = self._load_datasamples(2) inputs = feature_extractor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): hidden_states_slice = ( model(input_values, attention_mask=attention_mask).last_hidden_state[:, -2:, -2:].cpu() ) EXPECTED_HIDDEN_STATES_SLICE = torch.tensor( [[[0.0577, 0.1161], [0.0579, 0.1165]], [[0.0199, 0.1237], [0.0059, 0.0605]]] ) self.assertTrue(torch.allclose(hidden_states_slice, EXPECTED_HIDDEN_STATES_SLICE, atol=5e-2)) def test_inference_large(self): model = WavLMModel.from_pretrained("microsoft/wavlm-large").to(torch_device) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( "microsoft/wavlm-large", return_attention_mask=True ) input_speech = self._load_datasamples(2) inputs = feature_extractor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): hidden_states_slice = ( model(input_values, attention_mask=attention_mask).last_hidden_state[:, -2:, -2:].cpu() ) EXPECTED_HIDDEN_STATES_SLICE = torch.tensor( [[[0.2122, 0.0500], [0.2118, 0.0563]], [[0.1353, 0.1818], [0.2453, 0.0595]]] ) self.assertTrue(torch.allclose(hidden_states_slice, EXPECTED_HIDDEN_STATES_SLICE, rtol=5e-2)) def test_inference_diarization(self): model = WavLMForAudioFrameClassification.from_pretrained("microsoft/wavlm-base-plus-sd").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("microsoft/wavlm-base-plus-sd") input_data = self._load_superb("sd", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True, sampling_rate=16_000) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask) # labels is a one-hot array of shape (num_frames, num_speakers) labels = (outputs.logits > 0).long() # s3prl logits for the same batch expected_logits = torch.tensor( [ [[-5.9566, -8.6554], [-5.7137, -8.9386], [-5.7906, -7.0973], [-5.7829, -5.9999]], [[-5.2086, -7.7878], [-4.8890, -7.9312], [-4.2004, -3.9101], [-5.4480, -4.6932]], [[-4.6105, -6.7178], [-5.1930, -6.1635], [-2.6228, -4.1123], [-2.7646, -3.1576]], [[-4.4477, -7.9206], [-3.9339, -7.3707], [-4.9528, -4.8242], [-3.6921, -2.9687]], ], device=torch_device, ) self.assertEqual(labels[0, :, 0].sum(), 258) self.assertEqual(labels[0, :, 1].sum(), 647) self.assertTrue(torch.allclose(outputs.logits[:, :4], expected_logits, atol=1e-2)) def test_inference_speaker_verification(self): model = WavLMForXVector.from_pretrained("microsoft/wavlm-base-plus-sv").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("microsoft/wavlm-base-plus-sv") input_data = self._load_superb("si", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True) labels = torch.tensor([5, 1, 1, 3], device=torch_device).T with torch.no_grad(): input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) outputs = model(input_values, attention_mask=attention_mask, labels=labels) embeddings = torch.nn.functional.normalize(outputs.embeddings, dim=-1) cosine_sim = torch.nn.CosineSimilarity(dim=-1) # id10002 vs id10002 self.assertAlmostEqual(cosine_sim(embeddings[1], embeddings[2]).item(), 0.9787, 3) # id10006 vs id10002 self.assertAlmostEqual(cosine_sim(embeddings[0], embeddings[1]).item(), 0.5064, 3) # id10002 vs id10004 self.assertAlmostEqual(cosine_sim(embeddings[2], embeddings[3]).item(), 0.4780, 3) self.assertAlmostEqual(outputs.loss.item(), 18.4154, 2)
transformers/tests/models/wavlm/test_modeling_wavlm.py/0
{ "file_path": "transformers/tests/models/wavlm/test_modeling_wavlm.py", "repo_id": "transformers", "token_count": 10880 }
410
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMModel, TFXLMWithLMHeadModel, XLMConfig, ) class TFXLMModelTester: def __init__( self, parent, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_lengths = True self.use_token_type_ids = True self.use_labels = True self.gelu_activation = True self.sinusoidal_embeddings = False self.causal = False self.asm = False self.n_langs = 2 self.vocab_size = 99 self.n_special = 0 self.hidden_size = 32 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.summary_type = "last" self.use_proj = True self.scope = None self.bos_token_id = 0 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = random_attention_mask([self.batch_size, self.seq_length], dtype=tf.float32) input_lengths = None if self.use_input_lengths: input_lengths = ( ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2 ) # small variation of seq_length token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.n_langs) sequence_labels = None token_labels = None is_impossible_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) is_impossible_labels = ids_tensor([self.batch_size], 2, dtype=tf.float32) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = XLMConfig( vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, bos_token_id=self.bos_token_id, ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def create_and_check_xlm_model( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = TFXLMModel(config=config) inputs = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_xlm_lm_head( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = TFXLMWithLMHeadModel(config) inputs = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} outputs = model(inputs) result = outputs self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_xlm_qa( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = TFXLMForQuestionAnsweringSimple(config) inputs = {"input_ids": input_ids, "lengths": input_lengths} result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_xlm_sequence_classif( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = TFXLMForSequenceClassification(config) inputs = {"input_ids": input_ids, "lengths": input_lengths} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def create_and_check_xlm_for_token_classification( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): config.num_labels = self.num_labels model = TFXLMForTokenClassification(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_xlm_for_multiple_choice( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): config.num_choices = self.num_choices model = TFXLMForMultipleChoice(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "langs": token_type_ids, "lengths": input_lengths, } return config, inputs_dict @require_tf class TFXLMModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFXLMModel, TFXLMWithLMHeadModel, TFXLMForSequenceClassification, TFXLMForQuestionAnsweringSimple, TFXLMForTokenClassification, TFXLMForMultipleChoice, ) if is_tf_available() else () ) all_generative_model_classes = ( (TFXLMWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable pipeline_model_mapping = ( { "feature-extraction": TFXLMModel, "fill-mask": TFXLMWithLMHeadModel, "question-answering": TFXLMForQuestionAnsweringSimple, "text-classification": TFXLMForSequenceClassification, "text-generation": TFXLMWithLMHeadModel, "token-classification": TFXLMForTokenClassification, "zero-shot": TFXLMForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def setUp(self): self.model_tester = TFXLMModelTester(self) self.config_tester = ConfigTester(self, config_class=XLMConfig, emb_dim=37) def test_config(self): self.config_tester.run_common_tests() def test_xlm_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*config_and_inputs) def test_xlm_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*config_and_inputs) def test_xlm_qa(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*config_and_inputs) def test_xlm_sequence_classif(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_token_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFXLMModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_tf class TFXLMModelLanguageGenerationTest(unittest.TestCase): @slow def test_lm_generate_xlm_mlm_en_2048(self): model = TFXLMWithLMHeadModel.from_pretrained("FacebookAI/xlm-mlm-en-2048") input_ids = tf.convert_to_tensor([[14, 447]], dtype=tf.int32) # the president expected_output_ids = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference output_ids = model.generate(input_ids, do_sample=False) self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids)
transformers/tests/models/xlm/test_modeling_tf_xlm.py/0
{ "file_path": "transformers/tests/models/xlm/test_modeling_tf_xlm.py", "repo_id": "transformers", "token_count": 6451 }
411
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow, torch_device from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. _TO_SKIP = {"LayoutLMv2Config", "LayoutLMv3Config"} @is_pipeline_test class TextClassificationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: model_mapping = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: tf_model_mapping = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def test_small_model_pt(self): text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) outputs = text_classifier("This is great !") self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}]) outputs = text_classifier("This is great !", top_k=2) self.assertEqual( nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}] ) outputs = text_classifier(["This is great !", "This is bad"], top_k=2) self.assertEqual( nested_simplify(outputs), [ [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], ], ) outputs = text_classifier("This is great !", top_k=1) self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}]) # Legacy behavior outputs = text_classifier("This is great !", return_all_scores=False) self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}]) outputs = text_classifier("This is great !", return_all_scores=True) self.assertEqual( nested_simplify(outputs), [[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]] ) outputs = text_classifier(["This is great !", "Something else"], return_all_scores=True) self.assertEqual( nested_simplify(outputs), [ [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], ], ) outputs = text_classifier(["This is great !", "Something else"], return_all_scores=False) self.assertEqual( nested_simplify(outputs), [ {"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_0", "score": 0.504}, ], ) @require_torch def test_accepts_torch_device(self): text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt", device=torch_device, ) outputs = text_classifier("This is great !") self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}]) @require_tf def test_small_model_tf(self): text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="tf" ) outputs = text_classifier("This is great !") self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}]) @slow @require_torch def test_pt_bert(self): text_classifier = pipeline("text-classification") outputs = text_classifier("This is great !") self.assertEqual(nested_simplify(outputs), [{"label": "POSITIVE", "score": 1.0}]) outputs = text_classifier("This is bad !") self.assertEqual(nested_simplify(outputs), [{"label": "NEGATIVE", "score": 1.0}]) outputs = text_classifier("Birds are a type of animal") self.assertEqual(nested_simplify(outputs), [{"label": "POSITIVE", "score": 0.988}]) @slow @require_tf def test_tf_bert(self): text_classifier = pipeline("text-classification", framework="tf") outputs = text_classifier("This is great !") self.assertEqual(nested_simplify(outputs), [{"label": "POSITIVE", "score": 1.0}]) outputs = text_classifier("This is bad !") self.assertEqual(nested_simplify(outputs), [{"label": "NEGATIVE", "score": 1.0}]) outputs = text_classifier("Birds are a type of animal") self.assertEqual(nested_simplify(outputs), [{"label": "POSITIVE", "score": 0.988}]) def get_test_pipeline(self, model, tokenizer, processor): text_classifier = TextClassificationPipeline(model=model, tokenizer=tokenizer) return text_classifier, ["HuggingFace is in", "This is another test"] def run_pipeline_test(self, text_classifier, _): model = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 valid_inputs = "HuggingFace is in" outputs = text_classifier(valid_inputs) self.assertEqual(nested_simplify(outputs), [{"label": ANY(str), "score": ANY(float)}]) self.assertTrue(outputs[0]["label"] in model.config.id2label.values()) valid_inputs = ["HuggingFace is in ", "Paris is in France"] outputs = text_classifier(valid_inputs) self.assertEqual( nested_simplify(outputs), [{"label": ANY(str), "score": ANY(float)}, {"label": ANY(str), "score": ANY(float)}], ) self.assertTrue(outputs[0]["label"] in model.config.id2label.values()) self.assertTrue(outputs[1]["label"] in model.config.id2label.values()) # Forcing to get all results with `top_k=None` # This is NOT the legacy format outputs = text_classifier(valid_inputs, top_k=None) N = len(model.config.id2label.values()) self.assertEqual( nested_simplify(outputs), [[{"label": ANY(str), "score": ANY(float)}] * N, [{"label": ANY(str), "score": ANY(float)}] * N], ) valid_inputs = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"} outputs = text_classifier(valid_inputs) self.assertEqual( nested_simplify(outputs), {"label": ANY(str), "score": ANY(float)}, ) self.assertTrue(outputs["label"] in model.config.id2label.values()) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. invalid_input = [["HuggingFace is in ", "Paris is in France"]] with self.assertRaises(ValueError): text_classifier(invalid_input) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility outputs = text_classifier([[["HuggingFace is in ", "Paris is in France"]]]) self.assertEqual( nested_simplify(outputs), [{"label": ANY(str), "score": ANY(float)}], ) self.assertTrue(outputs[0]["label"] in model.config.id2label.values())
transformers/tests/pipelines/test_pipelines_text_classification.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_text_classification.py", "repo_id": "transformers", "token_count": 3566 }
412
import json import logging import os import subprocess from argparse import ArgumentParser logger = logging.getLogger(__name__) def parse_args(): parser = ArgumentParser() parsed, unknown = parser.parse_known_args() for arg in unknown: if arg.startswith(("-", "--")): parser.add_argument(arg.split("=")[0]) return parser.parse_args() def main(): args = parse_args() port = 8888 num_gpus = int(os.environ["SM_NUM_GPUS"]) hosts = json.loads(os.environ["SM_HOSTS"]) num_nodes = len(hosts) current_host = os.environ["SM_CURRENT_HOST"] rank = hosts.index(current_host) os.environ["NCCL_DEBUG"] = "INFO" if num_nodes > 1: cmd = f"""python -m torch.distributed.launch \ --nnodes={num_nodes} \ --node_rank={rank} \ --nproc_per_node={num_gpus} \ --master_addr={hosts[0]} \ --master_port={port} \ ./run_glue.py \ {"".join([f" --{parameter} {value}" for parameter,value in args.__dict__.items()])}""" else: cmd = f"""python -m torch.distributed.launch \ --nproc_per_node={num_gpus} \ ./run_glue.py \ {"".join([f" --{parameter} {value}" for parameter,value in args.__dict__.items()])}""" try: subprocess.run(cmd, shell=True) except Exception as e: logger.info(e) if __name__ == "__main__": main()
transformers/tests/sagemaker/scripts/pytorch/run_ddp.py/0
{ "file_path": "transformers/tests/sagemaker/scripts/pytorch/run_ddp.py", "repo_id": "transformers", "token_count": 694 }
413
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from parameterized import parameterized from transformers.testing_utils import require_flax, require_tf, require_torch, require_vision from transformers.utils.import_utils import is_flax_available, is_tf_available, is_torch_available, is_vision_available if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf if is_flax_available(): import jax if is_vision_available(): import PIL.Image from transformers.image_transforms import ( center_crop, center_to_corners_format, convert_to_rgb, corners_to_center_format, flip_channel_order, get_resize_output_image_size, id_to_rgb, normalize, pad, resize, rgb_to_id, to_channel_dimension_format, to_pil_image, ) def get_random_image(height, width, num_channels=3, channels_first=True): shape = (num_channels, height, width) if channels_first else (height, width, num_channels) random_array = np.random.randint(0, 256, shape, dtype=np.uint8) return random_array @require_vision class ImageTransformsTester(unittest.TestCase): @parameterized.expand( [ ("numpy_float_channels_first", (3, 4, 5), np.float32), ("numpy_float_channels_last", (4, 5, 3), np.float32), ("numpy_float_channels_first", (3, 4, 5), np.float64), ("numpy_float_channels_last", (4, 5, 3), np.float64), ("numpy_int_channels_first", (3, 4, 5), np.int32), ("numpy_uint_channels_first", (3, 4, 5), np.uint8), ] ) @require_vision def test_to_pil_image(self, name, image_shape, dtype): image = np.random.randint(0, 256, image_shape).astype(dtype) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) # make sure image is correctly rescaled self.assertTrue(np.abs(np.asarray(pil_image)).sum() > 0) @parameterized.expand( [ ("numpy_float_channels_first", (3, 4, 5), np.float32), ("numpy_float_channels_first", (3, 4, 5), np.float64), ("numpy_float_channels_last", (4, 5, 3), np.float32), ("numpy_float_channels_last", (4, 5, 3), np.float64), ] ) @require_vision def test_to_pil_image_from_float(self, name, image_shape, dtype): image = np.random.rand(*image_shape).astype(dtype) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) # make sure image is correctly rescaled self.assertTrue(np.abs(np.asarray(pil_image)).sum() > 0) # Make sure that an exception is raised if image is not in [0, 1] image = np.random.randn(*image_shape).astype(dtype) with self.assertRaises(ValueError): to_pil_image(image) @require_vision def test_to_pil_image_from_mask(self): # Make sure binary mask remains a binary mask image = np.random.randint(0, 2, (3, 4, 5)).astype(np.uint8) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) np_img = np.asarray(pil_image) self.assertTrue(np_img.min() == 0) self.assertTrue(np_img.max() == 1) image = np.random.randint(0, 2, (3, 4, 5)).astype(np.float32) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) np_img = np.asarray(pil_image) self.assertTrue(np_img.min() == 0) self.assertTrue(np_img.max() == 1) @require_tf def test_to_pil_image_from_tensorflow(self): # channels_first image = tf.random.uniform((3, 4, 5)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) # channels_last image = tf.random.uniform((4, 5, 3)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) @require_torch def test_to_pil_image_from_torch(self): # channels first image = torch.rand((3, 4, 5)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) # channels last image = torch.rand((4, 5, 3)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) @require_flax def test_to_pil_image_from_jax(self): key = jax.random.PRNGKey(0) # channel first image = jax.random.uniform(key, (3, 4, 5)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) # channel last image = jax.random.uniform(key, (4, 5, 3)) pil_image = to_pil_image(image) self.assertIsInstance(pil_image, PIL.Image.Image) self.assertEqual(pil_image.size, (5, 4)) def test_to_channel_dimension_format(self): # Test that function doesn't reorder if channel dim matches the input. image = np.random.rand(3, 4, 5) image = to_channel_dimension_format(image, "channels_first") self.assertEqual(image.shape, (3, 4, 5)) image = np.random.rand(4, 5, 3) image = to_channel_dimension_format(image, "channels_last") self.assertEqual(image.shape, (4, 5, 3)) # Test that function reorders if channel dim doesn't match the input. image = np.random.rand(3, 4, 5) image = to_channel_dimension_format(image, "channels_last") self.assertEqual(image.shape, (4, 5, 3)) image = np.random.rand(4, 5, 3) image = to_channel_dimension_format(image, "channels_first") self.assertEqual(image.shape, (3, 4, 5)) # Can pass in input_data_format and works if data format is ambiguous or unknown. image = np.random.rand(4, 5, 6) image = to_channel_dimension_format(image, "channels_first", input_channel_dim="channels_last") self.assertEqual(image.shape, (6, 4, 5)) def test_get_resize_output_image_size(self): image = np.random.randint(0, 256, (3, 224, 224)) # Test the output size defaults to (x, x) if an int is given. self.assertEqual(get_resize_output_image_size(image, 10), (10, 10)) self.assertEqual(get_resize_output_image_size(image, [10]), (10, 10)) self.assertEqual(get_resize_output_image_size(image, (10,)), (10, 10)) # Test the output size is the same as the input if a two element tuple/list is given. self.assertEqual(get_resize_output_image_size(image, (10, 20)), (10, 20)) self.assertEqual(get_resize_output_image_size(image, [10, 20]), (10, 20)) self.assertEqual(get_resize_output_image_size(image, (10, 20), default_to_square=True), (10, 20)) # To match pytorch behaviour, max_size is only relevant if size is an int self.assertEqual(get_resize_output_image_size(image, (10, 20), max_size=5), (10, 20)) # Test output size = (int(size * height / width), size) if size is an int and height > width image = np.random.randint(0, 256, (3, 50, 40)) self.assertEqual(get_resize_output_image_size(image, 20, default_to_square=False), (25, 20)) # Test output size = (size, int(size * width / height)) if size is an int and width <= height image = np.random.randint(0, 256, (3, 40, 50)) self.assertEqual(get_resize_output_image_size(image, 20, default_to_square=False), (20, 25)) # Test size is resized if longer size > max_size image = np.random.randint(0, 256, (3, 50, 40)) self.assertEqual(get_resize_output_image_size(image, 20, default_to_square=False, max_size=22), (22, 17)) # Test output size = (int(size * height / width), size) if size is an int and height > width and # input has 4 channels image = np.random.randint(0, 256, (4, 50, 40)) self.assertEqual( get_resize_output_image_size(image, 20, default_to_square=False, input_data_format="channels_first"), (25, 20), ) # Test correct channel dimension is returned if output size if height == 3 # Defaults to input format - channels first image = np.random.randint(0, 256, (3, 18, 97)) resized_image = resize(image, (3, 20)) self.assertEqual(resized_image.shape, (3, 3, 20)) # Defaults to input format - channels last image = np.random.randint(0, 256, (18, 97, 3)) resized_image = resize(image, (3, 20)) self.assertEqual(resized_image.shape, (3, 20, 3)) image = np.random.randint(0, 256, (3, 18, 97)) resized_image = resize(image, (3, 20), data_format="channels_last") self.assertEqual(resized_image.shape, (3, 20, 3)) image = np.random.randint(0, 256, (18, 97, 3)) resized_image = resize(image, (3, 20), data_format="channels_first") self.assertEqual(resized_image.shape, (3, 3, 20)) def test_resize(self): image = np.random.randint(0, 256, (3, 224, 224)) # Check the channel order is the same by default resized_image = resize(image, (30, 40)) self.assertIsInstance(resized_image, np.ndarray) self.assertEqual(resized_image.shape, (3, 30, 40)) # Check channel order is changed if specified resized_image = resize(image, (30, 40), data_format="channels_last") self.assertIsInstance(resized_image, np.ndarray) self.assertEqual(resized_image.shape, (30, 40, 3)) # Check PIL.Image.Image is returned if return_numpy=False resized_image = resize(image, (30, 40), return_numpy=False) self.assertIsInstance(resized_image, PIL.Image.Image) # PIL size is in (width, height) order self.assertEqual(resized_image.size, (40, 30)) # Check an image with float values between 0-1 is returned with values in this range image = np.random.rand(3, 224, 224) resized_image = resize(image, (30, 40)) self.assertIsInstance(resized_image, np.ndarray) self.assertEqual(resized_image.shape, (3, 30, 40)) self.assertTrue(np.all(resized_image >= 0)) self.assertTrue(np.all(resized_image <= 1)) # Check that an image with 4 channels is resized correctly image = np.random.randint(0, 256, (4, 224, 224)) resized_image = resize(image, (30, 40), input_data_format="channels_first") self.assertIsInstance(resized_image, np.ndarray) self.assertEqual(resized_image.shape, (4, 30, 40)) def test_normalize(self): image = np.random.randint(0, 256, (224, 224, 3)) / 255 # Test that exception is raised if inputs are incorrect # Not a numpy array image with self.assertRaises(ValueError): normalize(5, 5, 5) # Number of mean values != number of channels with self.assertRaises(ValueError): normalize(image, mean=(0.5, 0.6), std=1) # Number of std values != number of channels with self.assertRaises(ValueError): normalize(image, mean=1, std=(0.5, 0.6)) # Test result is correct - output data format is channels_first and normalization # correctly computed mean = (0.5, 0.6, 0.7) std = (0.1, 0.2, 0.3) expected_image = ((image - mean) / std).transpose((2, 0, 1)) normalized_image = normalize(image, mean=mean, std=std, data_format="channels_first") self.assertIsInstance(normalized_image, np.ndarray) self.assertEqual(normalized_image.shape, (3, 224, 224)) self.assertTrue(np.allclose(normalized_image, expected_image, atol=1e-6)) # Test image with 4 channels is normalized correctly image = np.random.randint(0, 256, (224, 224, 4)) / 255 mean = (0.5, 0.6, 0.7, 0.8) std = (0.1, 0.2, 0.3, 0.4) expected_image = (image - mean) / std self.assertTrue( np.allclose( normalize(image, mean=mean, std=std, input_data_format="channels_last"), expected_image, atol=1e-6 ) ) # Test float32 image input keeps float32 dtype image = np.random.randint(0, 256, (224, 224, 3)).astype(np.float32) / 255 mean = (0.5, 0.6, 0.7) std = (0.1, 0.2, 0.3) expected_image = ((image - mean) / std).astype(np.float32) normalized_image = normalize(image, mean=mean, std=std) self.assertEqual(normalized_image.dtype, np.float32) self.assertTrue(np.allclose(normalized_image, expected_image, atol=1e-6)) # Test float16 image input keeps float16 dtype image = np.random.randint(0, 256, (224, 224, 3)).astype(np.float16) / 255 mean = (0.5, 0.6, 0.7) std = (0.1, 0.2, 0.3) # The mean and std are cast to match the dtype of the input image cast_mean = np.array(mean, dtype=np.float16) cast_std = np.array(std, dtype=np.float16) expected_image = (image - cast_mean) / cast_std normalized_image = normalize(image, mean=mean, std=std) self.assertEqual(normalized_image.dtype, np.float16) self.assertTrue(np.allclose(normalized_image, expected_image, atol=1e-6)) # Test int image input is converted to float32 image = np.random.randint(0, 2, (224, 224, 3), dtype=np.uint8) mean = (0.5, 0.6, 0.7) std = (0.1, 0.2, 0.3) expected_image = (image.astype(np.float32) - mean) / std normalized_image = normalize(image, mean=mean, std=std) self.assertEqual(normalized_image.dtype, np.float32) self.assertTrue(np.allclose(normalized_image, expected_image, atol=1e-6)) def test_center_crop(self): image = np.random.randint(0, 256, (3, 224, 224)) # Test that exception is raised if inputs are incorrect with self.assertRaises(ValueError): center_crop(image, 10) # Test result is correct - output data format is channels_first and center crop # correctly computed expected_image = image[:, 52:172, 82:142].transpose(1, 2, 0) cropped_image = center_crop(image, (120, 60), data_format="channels_last") self.assertIsInstance(cropped_image, np.ndarray) self.assertEqual(cropped_image.shape, (120, 60, 3)) self.assertTrue(np.allclose(cropped_image, expected_image)) # Test that image is padded with zeros if crop size is larger than image size expected_image = np.zeros((300, 260, 3)) expected_image[38:262, 18:242, :] = image.transpose((1, 2, 0)) cropped_image = center_crop(image, (300, 260), data_format="channels_last") self.assertIsInstance(cropped_image, np.ndarray) self.assertEqual(cropped_image.shape, (300, 260, 3)) self.assertTrue(np.allclose(cropped_image, expected_image)) # Test image with 4 channels is cropped correctly image = np.random.randint(0, 256, (224, 224, 4)) expected_image = image[52:172, 82:142, :] self.assertTrue(np.allclose(center_crop(image, (120, 60), input_data_format="channels_last"), expected_image)) def test_center_to_corners_format(self): bbox_center = np.array([[10, 20, 4, 8], [15, 16, 3, 4]]) expected = np.array([[8, 16, 12, 24], [13.5, 14, 16.5, 18]]) self.assertTrue(np.allclose(center_to_corners_format(bbox_center), expected)) # Check that the function and inverse function are inverse of each other self.assertTrue(np.allclose(corners_to_center_format(center_to_corners_format(bbox_center)), bbox_center)) def test_corners_to_center_format(self): bbox_corners = np.array([[8, 16, 12, 24], [13.5, 14, 16.5, 18]]) expected = np.array([[10, 20, 4, 8], [15, 16, 3, 4]]) self.assertTrue(np.allclose(corners_to_center_format(bbox_corners), expected)) # Check that the function and inverse function are inverse of each other self.assertTrue(np.allclose(center_to_corners_format(corners_to_center_format(bbox_corners)), bbox_corners)) def test_rgb_to_id(self): # test list input rgb = [125, 4, 255] self.assertEqual(rgb_to_id(rgb), 16712829) # test numpy array input color = np.array( [ [ [213, 54, 165], [88, 207, 39], [156, 108, 128], ], [ [183, 194, 46], [137, 58, 88], [114, 131, 233], ], ] ) expected = np.array([[10827477, 2608984, 8416412], [3064503, 5782153, 15303538]]) self.assertTrue(np.allclose(rgb_to_id(color), expected)) def test_id_to_rgb(self): # test int input self.assertEqual(id_to_rgb(16712829), [125, 4, 255]) # test array input id_array = np.array([[10827477, 2608984, 8416412], [3064503, 5782153, 15303538]]) color = np.array( [ [ [213, 54, 165], [88, 207, 39], [156, 108, 128], ], [ [183, 194, 46], [137, 58, 88], [114, 131, 233], ], ] ) self.assertTrue(np.allclose(id_to_rgb(id_array), color)) def test_pad(self): # fmt: off image = np.array([[ [0, 1], [2, 3], ]]) # fmt: on # Test that exception is raised if unknown padding mode is specified with self.assertRaises(ValueError): pad(image, 10, mode="unknown") # Test that exception is raised if invalid padding is specified with self.assertRaises(ValueError): # Cannot pad on channel dimension pad(image, (5, 10, 10)) # Test image is padded equally on all sides is padding is an int # fmt: off expected_image = np.array([ [[0, 0, 0, 0], [0, 0, 1, 0], [0, 2, 3, 0], [0, 0, 0, 0]], ]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, 1))) # Test the left and right of each axis is padded (pad_left, pad_right) # fmt: off expected_image = np.array( [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 2, 3, 0], [0, 0, 0, 0, 0]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, (2, 1)))) # Test only one axis is padded (pad_left, pad_right) # fmt: off expected_image = np.array([[ [9, 9], [9, 9], [0, 1], [2, 3], [9, 9] ]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, ((2, 1), (0, 0)), constant_values=9))) # Test padding with a constant value # fmt: off expected_image = np.array([[ [8, 8, 0, 1, 9], [8, 8, 2, 3, 9], [8, 8, 7, 7, 9], [8, 8, 7, 7, 9] ]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), constant_values=((6, 7), (8, 9))))) # fmt: off image = np.array([[ [0, 1, 2], [3, 4, 5], [6, 7, 8], ]]) # fmt: on # Test padding with PaddingMode.REFLECT # fmt: off expected_image = np.array([[ [2, 1, 0, 1, 2, 1], [5, 4, 3, 4, 5, 4], [8, 7, 6, 7, 8, 7], [5, 4, 3, 4, 5, 4], [2, 1, 0, 1, 2, 1], ]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), mode="reflect"))) # Test padding with PaddingMode.REPLICATE # fmt: off expected_image = np.array([[ [0, 0, 0, 1, 2, 2], [3, 3, 3, 4, 5, 5], [6, 6, 6, 7, 8, 8], [6, 6, 6, 7, 8, 8], [6, 6, 6, 7, 8, 8], ]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), mode="replicate"))) # Test padding with PaddingMode.SYMMETRIC # fmt: off expected_image = np.array([[ [1, 0, 0, 1, 2, 2], [4, 3, 3, 4, 5, 5], [7, 6, 6, 7, 8, 8], [7, 6, 6, 7, 8, 8], [4, 3, 3, 4, 5, 5], ]]) # fmt: on self.assertTrue(np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), mode="symmetric"))) # Test we can specify the output data format # Test padding with PaddingMode.REFLECT # fmt: off image = np.array([[ [0, 1], [2, 3], ]]) expected_image = np.array([ [[0], [1], [0], [1], [0]], [[2], [3], [2], [3], [2]], [[0], [1], [0], [1], [0]], [[2], [3], [2], [3], [2]] ]) # fmt: on self.assertTrue( np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), mode="reflect", data_format="channels_last")) ) # Test we can pad on an image with 2 channels # fmt: off image = np.array([ [[0, 1], [2, 3]], ]) expected_image = np.array([ [[0, 0], [0, 1], [2, 3]], [[0, 0], [0, 0], [0, 0]], ]) # fmt: on self.assertTrue( np.allclose( expected_image, pad(image, ((0, 1), (1, 0)), mode="constant", input_data_format="channels_last") ) ) @require_vision def test_convert_to_rgb(self): # Test that an RGBA image is converted to RGB image = np.array([[[1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.uint8) pil_image = PIL.Image.fromarray(image) self.assertEqual(pil_image.mode, "RGBA") self.assertEqual(pil_image.size, (2, 1)) # For the moment, numpy images are returned as is rgb_image = convert_to_rgb(image) self.assertEqual(rgb_image.shape, (1, 2, 4)) self.assertTrue(np.allclose(rgb_image, image)) # And PIL images are converted rgb_image = convert_to_rgb(pil_image) self.assertEqual(rgb_image.mode, "RGB") self.assertEqual(rgb_image.size, (2, 1)) self.assertTrue(np.allclose(np.array(rgb_image), np.array([[[1, 2, 3], [5, 6, 7]]], dtype=np.uint8))) # Test that a grayscale image is converted to RGB image = np.array([[0, 255]], dtype=np.uint8) pil_image = PIL.Image.fromarray(image) self.assertEqual(pil_image.mode, "L") self.assertEqual(pil_image.size, (2, 1)) rgb_image = convert_to_rgb(pil_image) self.assertEqual(rgb_image.mode, "RGB") self.assertEqual(rgb_image.size, (2, 1)) self.assertTrue(np.allclose(np.array(rgb_image), np.array([[[0, 0, 0], [255, 255, 255]]], dtype=np.uint8))) def test_flip_channel_order(self): # fmt: off img_channels_first = np.array([ [[ 0, 1, 2, 3], [ 4, 5, 6, 7]], [[ 8, 9, 10, 11], [12, 13, 14, 15]], [[16, 17, 18, 19], [20, 21, 22, 23]], ]) # fmt: on img_channels_last = np.moveaxis(img_channels_first, 0, -1) # fmt: off flipped_img_channels_first = np.array([ [[16, 17, 18, 19], [20, 21, 22, 23]], [[ 8, 9, 10, 11], [12, 13, 14, 15]], [[ 0, 1, 2, 3], [ 4, 5, 6, 7]], ]) # fmt: on flipped_img_channels_last = np.moveaxis(flipped_img_channels_first, 0, -1) self.assertTrue(np.allclose(flip_channel_order(img_channels_first), flipped_img_channels_first)) self.assertTrue( np.allclose(flip_channel_order(img_channels_first, "channels_last"), flipped_img_channels_last) ) self.assertTrue(np.allclose(flip_channel_order(img_channels_last), flipped_img_channels_last)) self.assertTrue( np.allclose(flip_channel_order(img_channels_last, "channels_first"), flipped_img_channels_first) ) # Can flip when the image has 2 channels # fmt: off img_channels_first = np.array([ [[ 0, 1, 2, 3], [ 4, 5, 6, 7]], [[ 8, 9, 10, 11], [12, 13, 14, 15]], ]) # fmt: on flipped_img_channels_first = img_channels_first[::-1, :, :] self.assertTrue( np.allclose( flip_channel_order(img_channels_first, input_data_format="channels_first"), flipped_img_channels_first ) )
transformers/tests/test_image_transforms.py/0
{ "file_path": "transformers/tests/test_image_transforms.py", "repo_id": "transformers", "token_count": 12379 }
414
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def get_new_path(suffix="") -> str: directory = tempfile.mkdtemp() return os.path.join(directory, str(uuid.uuid4()) + suffix) @require_soundfile @require_torch class AgentAudioTests(unittest.TestCase): def test_from_tensor(self): tensor = torch.rand(12, dtype=torch.float64) - 0.5 agent_type = AgentAudio(tensor) path = str(agent_type.to_string()) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(tensor, agent_type.to_raw(), atol=1e-4)) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(path)) # Ensure that the file contains the same value as the original tensor new_tensor, _ = sf.read(path) self.assertTrue(torch.allclose(tensor, torch.tensor(new_tensor), atol=1e-4)) def test_from_string(self): tensor = torch.rand(12, dtype=torch.float64) - 0.5 path = get_new_path(suffix=".wav") sf.write(path, tensor, 16000) agent_type = AgentAudio(path) self.assertTrue(torch.allclose(tensor, agent_type.to_raw(), atol=1e-4)) self.assertEqual(agent_type.to_string(), path) @require_vision @require_torch class AgentImageTests(unittest.TestCase): def test_from_tensor(self): tensor = torch.randint(0, 256, (64, 64, 3)) agent_type = AgentImage(tensor) path = str(agent_type.to_string()) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(tensor, agent_type._tensor, atol=1e-4)) self.assertIsInstance(agent_type.to_raw(), Image.Image) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(path)) def test_from_string(self): path = Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png" image = Image.open(path) agent_type = AgentImage(path) self.assertTrue(path.samefile(agent_type.to_string())) self.assertTrue(image == agent_type.to_raw()) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(path)) def test_from_image(self): path = Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png" image = Image.open(path) agent_type = AgentImage(image) self.assertFalse(path.samefile(agent_type.to_string())) self.assertTrue(image == agent_type.to_raw()) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(path)) class AgentTextTests(unittest.TestCase): def test_from_string(self): string = "Hey!" agent_type = AgentText(string) self.assertEqual(string, agent_type.to_string()) self.assertEqual(string, agent_type.to_raw()) self.assertEqual(string, agent_type)
transformers/tests/tools/test_agent_types.py/0
{ "file_path": "transformers/tests/tools/test_agent_types.py", "repo_id": "transformers", "token_count": 1583 }
415
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class MyTestTrainerCallback(TrainerCallback): "A callback that registers the events that goes through." def __init__(self): self.events = [] def on_init_end(self, args, state, control, **kwargs): self.events.append("on_init_end") def on_train_begin(self, args, state, control, **kwargs): self.events.append("on_train_begin") def on_train_end(self, args, state, control, **kwargs): self.events.append("on_train_end") def on_epoch_begin(self, args, state, control, **kwargs): self.events.append("on_epoch_begin") def on_epoch_end(self, args, state, control, **kwargs): self.events.append("on_epoch_end") def on_step_begin(self, args, state, control, **kwargs): self.events.append("on_step_begin") def on_step_end(self, args, state, control, **kwargs): self.events.append("on_step_end") def on_evaluate(self, args, state, control, **kwargs): self.events.append("on_evaluate") def on_predict(self, args, state, control, **kwargs): self.events.append("on_predict") def on_save(self, args, state, control, **kwargs): self.events.append("on_save") def on_log(self, args, state, control, **kwargs): self.events.append("on_log") def on_prediction_step(self, args, state, control, **kwargs): self.events.append("on_prediction_step") @require_torch class TrainerCallbackTest(unittest.TestCase): def setUp(self): self.output_dir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.output_dir) def get_trainer(self, a=0, b=0, train_len=64, eval_len=64, callbacks=None, disable_tqdm=False, **kwargs): # disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure # its set to False since the tests later on depend on its value. train_dataset = RegressionDataset(length=train_len) eval_dataset = RegressionDataset(length=eval_len) config = RegressionModelConfig(a=a, b=b) model = RegressionPreTrainedModel(config) args = TrainingArguments(self.output_dir, disable_tqdm=disable_tqdm, report_to=[], **kwargs) return Trainer( model, args, train_dataset=train_dataset, eval_dataset=eval_dataset, callbacks=callbacks, ) def check_callbacks_equality(self, cbs1, cbs2): self.assertEqual(len(cbs1), len(cbs2)) # Order doesn't matter cbs1 = sorted(cbs1, key=lambda cb: cb.__name__ if isinstance(cb, type) else cb.__class__.__name__) cbs2 = sorted(cbs2, key=lambda cb: cb.__name__ if isinstance(cb, type) else cb.__class__.__name__) for cb1, cb2 in zip(cbs1, cbs2): if isinstance(cb1, type) and isinstance(cb2, type): self.assertEqual(cb1, cb2) elif isinstance(cb1, type) and not isinstance(cb2, type): self.assertEqual(cb1, cb2.__class__) elif not isinstance(cb1, type) and isinstance(cb2, type): self.assertEqual(cb1.__class__, cb2) else: self.assertEqual(cb1, cb2) def get_expected_events(self, trainer): expected_events = ["on_init_end", "on_train_begin"] step = 0 train_dl_len = len(trainer.get_eval_dataloader()) evaluation_events = ["on_prediction_step"] * len(trainer.get_eval_dataloader()) + ["on_log", "on_evaluate"] for _ in range(trainer.state.num_train_epochs): expected_events.append("on_epoch_begin") for _ in range(train_dl_len): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append("on_log") if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append("on_save") expected_events.append("on_epoch_end") if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def test_init_callback(self): trainer = self.get_trainer() expected_callbacks = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) # Callbacks passed at init are added to the default callbacks trainer = self.get_trainer(callbacks=[MyTestTrainerCallback]) expected_callbacks.append(MyTestTrainerCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback trainer = self.get_trainer(disable_tqdm=True) expected_callbacks = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) def test_add_remove_callback(self): expected_callbacks = DEFAULT_CALLBACKS.copy() + [ProgressCallback] trainer = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(DefaultFlowCallback) expected_callbacks.remove(DefaultFlowCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) trainer = self.get_trainer() cb = trainer.pop_callback(DefaultFlowCallback) self.assertEqual(cb.__class__, DefaultFlowCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) trainer.add_callback(DefaultFlowCallback) expected_callbacks.insert(0, DefaultFlowCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) # We can also add, pop, or remove by instance trainer = self.get_trainer() cb = trainer.callback_handler.callbacks[0] trainer.remove_callback(cb) expected_callbacks.remove(DefaultFlowCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) trainer = self.get_trainer() cb1 = trainer.callback_handler.callbacks[0] cb2 = trainer.pop_callback(cb1) self.assertEqual(cb1, cb2) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) trainer.add_callback(cb1) expected_callbacks.insert(0, DefaultFlowCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) def test_event_flow(self): import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action="ignore", category=UserWarning) trainer = self.get_trainer(callbacks=[MyTestTrainerCallback]) trainer.train() events = trainer.callback_handler.callbacks[-2].events self.assertEqual(events, self.get_expected_events(trainer)) # Independent log/save/eval trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], logging_steps=5) trainer.train() events = trainer.callback_handler.callbacks[-2].events self.assertEqual(events, self.get_expected_events(trainer)) trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], save_steps=5) trainer.train() events = trainer.callback_handler.callbacks[-2].events self.assertEqual(events, self.get_expected_events(trainer)) trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], eval_steps=5, evaluation_strategy="steps") trainer.train() events = trainer.callback_handler.callbacks[-2].events self.assertEqual(events, self.get_expected_events(trainer)) trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], evaluation_strategy="epoch") trainer.train() events = trainer.callback_handler.callbacks[-2].events self.assertEqual(events, self.get_expected_events(trainer)) # A bit of everything trainer = self.get_trainer( callbacks=[MyTestTrainerCallback], logging_steps=3, save_steps=10, eval_steps=5, evaluation_strategy="steps", ) trainer.train() events = trainer.callback_handler.callbacks[-2].events self.assertEqual(events, self.get_expected_events(trainer)) # warning should be emitted for duplicated callbacks with patch("transformers.trainer_callback.logger.warning") as warn_mock: trainer = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback], ) assert str(MyTestTrainerCallback) in warn_mock.call_args[0][0]
transformers/tests/trainer/test_trainer_callback.py/0
{ "file_path": "transformers/tests/trainer/test_trainer_callback.py", "repo_id": "transformers", "token_count": 4143 }
416
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class GenericTester(unittest.TestCase): def test_flatten_dict(self): input_dict = { "task_specific_params": { "summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4}, "summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4}, "summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6}, } } expected_dict = { "task_specific_params.summarization.length_penalty": 1.0, "task_specific_params.summarization.max_length": 128, "task_specific_params.summarization.min_length": 12, "task_specific_params.summarization.num_beams": 4, "task_specific_params.summarization_cnn.length_penalty": 2.0, "task_specific_params.summarization_cnn.max_length": 142, "task_specific_params.summarization_cnn.min_length": 56, "task_specific_params.summarization_cnn.num_beams": 4, "task_specific_params.summarization_xsum.length_penalty": 1.0, "task_specific_params.summarization_xsum.max_length": 62, "task_specific_params.summarization_xsum.min_length": 11, "task_specific_params.summarization_xsum.num_beams": 6, } self.assertEqual(flatten_dict(input_dict), expected_dict) def test_transpose_numpy(self): x = np.random.randn(3, 4) self.assertTrue(np.allclose(transpose(x), x.transpose())) x = np.random.randn(3, 4, 5) self.assertTrue(np.allclose(transpose(x, axes=(1, 2, 0)), x.transpose((1, 2, 0)))) @require_torch def test_transpose_torch(self): x = np.random.randn(3, 4) t = torch.tensor(x) self.assertTrue(np.allclose(transpose(x), transpose(t).numpy())) x = np.random.randn(3, 4, 5) t = torch.tensor(x) self.assertTrue(np.allclose(transpose(x, axes=(1, 2, 0)), transpose(t, axes=(1, 2, 0)).numpy())) @require_tf def test_transpose_tf(self): x = np.random.randn(3, 4) t = tf.constant(x) self.assertTrue(np.allclose(transpose(x), transpose(t).numpy())) x = np.random.randn(3, 4, 5) t = tf.constant(x) self.assertTrue(np.allclose(transpose(x, axes=(1, 2, 0)), transpose(t, axes=(1, 2, 0)).numpy())) @require_flax def test_transpose_flax(self): x = np.random.randn(3, 4) t = jnp.array(x) self.assertTrue(np.allclose(transpose(x), np.asarray(transpose(t)))) x = np.random.randn(3, 4, 5) t = jnp.array(x) self.assertTrue(np.allclose(transpose(x, axes=(1, 2, 0)), np.asarray(transpose(t, axes=(1, 2, 0))))) def test_reshape_numpy(self): x = np.random.randn(3, 4) self.assertTrue(np.allclose(reshape(x, (4, 3)), np.reshape(x, (4, 3)))) x = np.random.randn(3, 4, 5) self.assertTrue(np.allclose(reshape(x, (12, 5)), np.reshape(x, (12, 5)))) @require_torch def test_reshape_torch(self): x = np.random.randn(3, 4) t = torch.tensor(x) self.assertTrue(np.allclose(reshape(x, (4, 3)), reshape(t, (4, 3)).numpy())) x = np.random.randn(3, 4, 5) t = torch.tensor(x) self.assertTrue(np.allclose(reshape(x, (12, 5)), reshape(t, (12, 5)).numpy())) @require_tf def test_reshape_tf(self): x = np.random.randn(3, 4) t = tf.constant(x) self.assertTrue(np.allclose(reshape(x, (4, 3)), reshape(t, (4, 3)).numpy())) x = np.random.randn(3, 4, 5) t = tf.constant(x) self.assertTrue(np.allclose(reshape(x, (12, 5)), reshape(t, (12, 5)).numpy())) @require_flax def test_reshape_flax(self): x = np.random.randn(3, 4) t = jnp.array(x) self.assertTrue(np.allclose(reshape(x, (4, 3)), np.asarray(reshape(t, (4, 3))))) x = np.random.randn(3, 4, 5) t = jnp.array(x) self.assertTrue(np.allclose(reshape(x, (12, 5)), np.asarray(reshape(t, (12, 5))))) def test_squeeze_numpy(self): x = np.random.randn(1, 3, 4) self.assertTrue(np.allclose(squeeze(x), np.squeeze(x))) x = np.random.randn(1, 4, 1, 5) self.assertTrue(np.allclose(squeeze(x, axis=2), np.squeeze(x, axis=2))) @require_torch def test_squeeze_torch(self): x = np.random.randn(1, 3, 4) t = torch.tensor(x) self.assertTrue(np.allclose(squeeze(x), squeeze(t).numpy())) x = np.random.randn(1, 4, 1, 5) t = torch.tensor(x) self.assertTrue(np.allclose(squeeze(x, axis=2), squeeze(t, axis=2).numpy())) @require_tf def test_squeeze_tf(self): x = np.random.randn(1, 3, 4) t = tf.constant(x) self.assertTrue(np.allclose(squeeze(x), squeeze(t).numpy())) x = np.random.randn(1, 4, 1, 5) t = tf.constant(x) self.assertTrue(np.allclose(squeeze(x, axis=2), squeeze(t, axis=2).numpy())) @require_flax def test_squeeze_flax(self): x = np.random.randn(1, 3, 4) t = jnp.array(x) self.assertTrue(np.allclose(squeeze(x), np.asarray(squeeze(t)))) x = np.random.randn(1, 4, 1, 5) t = jnp.array(x) self.assertTrue(np.allclose(squeeze(x, axis=2), np.asarray(squeeze(t, axis=2)))) def test_expand_dims_numpy(self): x = np.random.randn(3, 4) self.assertTrue(np.allclose(expand_dims(x, axis=1), np.expand_dims(x, axis=1))) @require_torch def test_expand_dims_torch(self): x = np.random.randn(3, 4) t = torch.tensor(x) self.assertTrue(np.allclose(expand_dims(x, axis=1), expand_dims(t, axis=1).numpy())) @require_tf def test_expand_dims_tf(self): x = np.random.randn(3, 4) t = tf.constant(x) self.assertTrue(np.allclose(expand_dims(x, axis=1), expand_dims(t, axis=1).numpy())) @require_flax def test_expand_dims_flax(self): x = np.random.randn(3, 4) t = jnp.array(x) self.assertTrue(np.allclose(expand_dims(x, axis=1), np.asarray(expand_dims(t, axis=1))))
transformers/tests/utils/test_generic.py/0
{ "file_path": "transformers/tests/utils/test_generic.py", "repo_id": "transformers", "token_count": 3485 }
417
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py PATH_TO_TRANSFORMERS = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. transformers = direct_transformers_import(PATH_TO_TRANSFORMERS) CONFIG_MAPPING = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[google-bert/bert-base-uncased](https://huggingface.co/google-bert/bert-base-uncased)` _re_checkpoint = re.compile(r"\[(.+?)\]\((https://huggingface\.co/.+?)\)") CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK = { "DecisionTransformerConfig", "EncoderDecoderConfig", "MusicgenConfig", "RagConfig", "SpeechEncoderDecoderConfig", "TimmBackboneConfig", "VisionEncoderDecoderConfig", "VisionTextDualEncoderConfig", "LlamaConfig", } def get_checkpoint_from_config_class(config_class): checkpoint = None # source code of `config_class` config_source = inspect.getsource(config_class) checkpoints = _re_checkpoint.findall(config_source) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('google-bert/bert-base-uncased', 'https://huggingface.co/google-bert/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith("/"): ckpt_link = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link ckpt_link_from_name = f"https://huggingface.co/{ckpt_name}" if ckpt_link == ckpt_link_from_name: checkpoint = ckpt_name break return checkpoint def check_config_docstrings_have_checkpoints(): configs_without_checkpoint = [] for config_class in list(CONFIG_MAPPING.values()): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue checkpoint = get_checkpoint_from_config_class(config_class) name = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(name) if len(configs_without_checkpoint) > 0: message = "\n".join(sorted(configs_without_checkpoint)) raise ValueError( f"The following configurations don't contain any valid checkpoint:\n{message}\n\n" "The requirement is to include a link pointing to one of the models of this architecture in the " "docstring of the config classes listed above. The link should have be a markdown format like " "[myorg/mymodel](https://huggingface.co/myorg/mymodel)." ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
transformers/utils/check_config_docstrings.py/0
{ "file_path": "transformers/utils/check_config_docstrings.py", "repo_id": "transformers", "token_count": 1293 }
418
""" Script for downloading all GLUE data. Original source: https://gist.github.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e Note: for legal reasons, we are unable to host MRPC. You can either use the version hosted by the SentEval team, which is already tokenized, or you can download the original data from (https://download.microsoft.com/download/D/4/6/D46FF87A-F6B9-4252-AA8B-3604ED519838/MSRParaphraseCorpus.msi) and extract the data from it manually. For Windows users, you can run the .msi file. For Mac and Linux users, consider an external library such as 'cabextract' (see below for an example). You should then rename and place specific files in a folder (see below for an example). mkdir MRPC cabextract MSRParaphraseCorpus.msi -d MRPC cat MRPC/_2DEC3DBE877E4DB192D17C0256E90F1D | tr -d $'\r' > MRPC/msr_paraphrase_train.txt cat MRPC/_D7B391F9EAFF4B1B8BCE8F21B20B1B61 | tr -d $'\r' > MRPC/msr_paraphrase_test.txt rm MRPC/_* rm MSRParaphraseCorpus.msi 1/30/19: It looks like SentEval is no longer hosting their extracted and tokenized MRPC data, so you'll need to download the data from the original source for now. 2/11/19: It looks like SentEval actually *is* hosting the extracted data. Hooray! """ import argparse import os import sys import urllib.request import zipfile TASKS = ["CoLA", "SST", "MRPC", "QQP", "STS", "MNLI", "SNLI", "QNLI", "RTE", "WNLI", "diagnostic"] TASK2PATH = { "CoLA": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FCoLA.zip?alt=media&token=46d5e637-3411-4188-bc44-5809b5bfb5f4", "SST": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSST-2.zip?alt=media&token=aabc5f6b-e466-44a2-b9b4-cf6337f84ac8", "MRPC": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2Fmrpc_dev_ids.tsv?alt=media&token=ec5c0836-31d5-48f4-b431-7480817f1adc", "QQP": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQQP.zip?alt=media&token=700c6acf-160d-4d89-81d1-de4191d02cb5", "STS": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSTS-B.zip?alt=media&token=bddb94a7-8706-4e0d-a694-1109e12273b5", "MNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FMNLI.zip?alt=media&token=50329ea1-e339-40e2-809c-10c40afff3ce", "SNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSNLI.zip?alt=media&token=4afcfbb2-ff0c-4b2d-a09a-dbf07926f4df", "QNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQNLIv2.zip?alt=media&token=6fdcf570-0fc5-4631-8456-9505272d1601", "RTE": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FRTE.zip?alt=media&token=5efa7e85-a0bb-4f19-8ea2-9e1840f077fb", "WNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FWNLI.zip?alt=media&token=068ad0a0-ded7-4bd7-99a5-5e00222e0faf", "diagnostic": "https://storage.googleapis.com/mtl-sentence-representations.appspot.com/tsvsWithoutLabels%2FAX.tsv?GoogleAccessId=firebase-adminsdk-0khhl@mtl-sentence-representations.iam.gserviceaccount.com&Expires=2498860800&Signature=DuQ2CSPt2Yfre0C%2BiISrVYrIFaZH1Lc7hBVZDD4ZyR7fZYOMNOUGpi8QxBmTNOrNPjR3z1cggo7WXFfrgECP6FBJSsURv8Ybrue8Ypt%2FTPxbuJ0Xc2FhDi%2BarnecCBFO77RSbfuz%2Bs95hRrYhTnByqu3U%2FYZPaj3tZt5QdfpH2IUROY8LiBXoXS46LE%2FgOQc%2FKN%2BA9SoscRDYsnxHfG0IjXGwHN%2Bf88q6hOmAxeNPx6moDulUF6XMUAaXCSFU%2BnRO2RDL9CapWxj%2BDl7syNyHhB7987hZ80B%2FwFkQ3MEs8auvt5XW1%2Bd4aCU7ytgM69r8JDCwibfhZxpaa4gd50QXQ%3D%3D", } MRPC_TRAIN = "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt" MRPC_TEST = "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt" def download_and_extract(task, data_dir): print(f"Downloading and extracting {task}...") data_file = f"{task}.zip" urllib.request.urlretrieve(TASK2PATH[task], data_file) with zipfile.ZipFile(data_file) as zip_ref: zip_ref.extractall(data_dir) os.remove(data_file) print("\tCompleted!") def format_mrpc(data_dir, path_to_data): print("Processing MRPC...") mrpc_dir = os.path.join(data_dir, "MRPC") if not os.path.isdir(mrpc_dir): os.mkdir(mrpc_dir) if path_to_data: mrpc_train_file = os.path.join(path_to_data, "msr_paraphrase_train.txt") mrpc_test_file = os.path.join(path_to_data, "msr_paraphrase_test.txt") else: print("Local MRPC data not specified, downloading data from %s" % MRPC_TRAIN) mrpc_train_file = os.path.join(mrpc_dir, "msr_paraphrase_train.txt") mrpc_test_file = os.path.join(mrpc_dir, "msr_paraphrase_test.txt") urllib.request.urlretrieve(MRPC_TRAIN, mrpc_train_file) urllib.request.urlretrieve(MRPC_TEST, mrpc_test_file) if not os.path.isfile(mrpc_train_file): raise ValueError(f"Train data not found at {mrpc_train_file}") if not os.path.isfile(mrpc_test_file): raise ValueError(f"Test data not found at {mrpc_test_file}") urllib.request.urlretrieve(TASK2PATH["MRPC"], os.path.join(mrpc_dir, "dev_ids.tsv")) dev_ids = [] with open(os.path.join(mrpc_dir, "dev_ids.tsv"), encoding="utf8") as ids_fh: for row in ids_fh: dev_ids.append(row.strip().split("\t")) with open(mrpc_train_file, encoding="utf8") as data_fh, open( os.path.join(mrpc_dir, "train.tsv"), "w", encoding="utf8" ) as train_fh, open(os.path.join(mrpc_dir, "dev.tsv"), "w", encoding="utf8") as dev_fh: header = data_fh.readline() train_fh.write(header) dev_fh.write(header) for row in data_fh: label, id1, id2, s1, s2 = row.strip().split("\t") if [id1, id2] in dev_ids: dev_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2)) else: train_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2)) with open(mrpc_test_file, encoding="utf8") as data_fh, open( os.path.join(mrpc_dir, "test.tsv"), "w", encoding="utf8" ) as test_fh: header = data_fh.readline() test_fh.write("index\t#1 ID\t#2 ID\t#1 String\t#2 String\n") for idx, row in enumerate(data_fh): label, id1, id2, s1, s2 = row.strip().split("\t") test_fh.write("%d\t%s\t%s\t%s\t%s\n" % (idx, id1, id2, s1, s2)) print("\tCompleted!") def download_diagnostic(data_dir): print("Downloading and extracting diagnostic...") if not os.path.isdir(os.path.join(data_dir, "diagnostic")): os.mkdir(os.path.join(data_dir, "diagnostic")) data_file = os.path.join(data_dir, "diagnostic", "diagnostic.tsv") urllib.request.urlretrieve(TASK2PATH["diagnostic"], data_file) print("\tCompleted!") return def get_tasks(task_names): task_names = task_names.split(",") if "all" in task_names: tasks = TASKS else: tasks = [] for task_name in task_names: if task_name not in TASKS: raise ValueError(f"Task {task_name} not found!") tasks.append(task_name) return tasks def main(arguments): parser = argparse.ArgumentParser() parser.add_argument("--data_dir", help="directory to save data to", type=str, default="glue_data") parser.add_argument( "--tasks", help="tasks to download data for as a comma separated string", type=str, default="all" ) parser.add_argument( "--path_to_mrpc", help="path to directory containing extracted MRPC data, msr_paraphrase_train.txt and msr_paraphrase_text.txt", type=str, default="", ) args = parser.parse_args(arguments) if not os.path.isdir(args.data_dir): os.mkdir(args.data_dir) tasks = get_tasks(args.tasks) for task in tasks: if task == "MRPC": format_mrpc(args.data_dir, args.path_to_mrpc) elif task == "diagnostic": download_diagnostic(args.data_dir) else: download_and_extract(task, args.data_dir) if __name__ == "__main__": sys.exit(main(sys.argv[1:]))
transformers/utils/download_glue_data.py/0
{ "file_path": "transformers/utils/download_glue_data.py", "repo_id": "transformers", "token_count": 3917 }
419
# pip install openrlbenchmark==0.2.1a5 # see https://github.com/openrlbenchmark/openrlbenchmark#get-started for documentation BASELINE_PR_TAG=v0.4.7-55-g110e672 BASELINE_PR_NAME=PR-662 python -m openrlbenchmark.rlops_multi_metrics \ --filters '?we=huggingface&wpn=trl&xaxis=_step&ceik=trl_ppo_trainer_config.value.reward_model&cen=trl_ppo_trainer_config.value.exp_name&metrics=env/reward_mean&metrics=objective/kl' \ "sentiment_tuning?tag=$BASELINE_PR_TAG&cl=sentiment lvwerra/gpt2-imdb ($BASELINE_PR_NAME)" \ --env-ids sentiment-analysis:lvwerra/distilbert-imdb \ --no-check-empty-runs \ --pc.ncols 2 \ --pc.ncols-legend 1 \ --output-filename benchmark/trl/$BASELINE_PR_TAG/sentiment \ --scan-history python -m openrlbenchmark.rlops_multi_metrics \ --filters '?we=huggingface&wpn=trl&xaxis=_step&ceik=trl_ppo_trainer_config.value.reward_model&cen=trl_ppo_trainer_config.value.exp_name&metrics=env/reward_mean&metrics=objective/kl' \ "sentiment_tuning?tag=$BASELINE_PR_TAG&cl=sentiment lvwerra/gpt2-imdb ($BASELINE_PR_NAME)" \ "sentiment_tuning_step_grad_accu?tag=$BASELINE_PR_TAG&cl=sentiment lvwerra/gpt2-imdb gradient accumulation ($BASELINE_PR_NAME)" \ --env-ids sentiment-analysis:lvwerra/distilbert-imdb \ --no-check-empty-runs \ --pc.ncols 2 \ --pc.ncols-legend 1 \ --output-filename benchmark/trl/$BASELINE_PR_TAG/gradient_accu \ --scan-history python -m openrlbenchmark.rlops_multi_metrics \ --filters '?we=huggingface&wpn=trl&xaxis=_step&ceik=trl_ppo_trainer_config.value.reward_model&cen=trl_ppo_trainer_config.value.exp_name&metrics=env/reward_mean&metrics=objective/kl' \ "sentiment_tuning?tag=$BASELINE_PR_TAG&cl=sentiment lvwerra/gpt2-imdb ($BASELINE_PR_NAME)" \ "sentiment_tuning_gpt2?tag=$BASELINE_PR_TAG&cl=sentiment gpt2 ($BASELINE_PR_NAME)" \ "sentiment_tuning_falcon_rw_1b?tag=$BASELINE_PR_TAG&cl=sentiment tiiuae/falcon-rw-1b ($BASELINE_PR_NAME)" \ "sentiment_tuning_gpt2xl_grad_accu?tag=$BASELINE_PR_TAG&cl=sentiment gpt2xl ($BASELINE_PR_NAME)" \ --env-ids sentiment-analysis:lvwerra/distilbert-imdb \ --no-check-empty-runs \ --pc.ncols 2 \ --pc.ncols-legend 1 \ --output-filename benchmark/trl/$BASELINE_PR_TAG/different_models \ --scan-history python -m openrlbenchmark.rlops_multi_metrics \ --filters '?we=huggingface&wpn=trl&xaxis=_step&ceik=trl_ppo_trainer_config.value.reward_model&cen=trl_ppo_trainer_config.value.exp_name&metrics=env/reward_mean&metrics=objective/kl' \ "sentiment_tuning?tag=$BASELINE_PR_TAG&cl=sentiment lvwerra/gpt2-imdb ($BASELINE_PR_NAME)" \ "sentiment_tuning_peft?tag=$BASELINE_PR_TAG&cl=sentiment lvwerra/gpt2-imdb w/ peft ($BASELINE_PR_NAME)" \ --env-ids sentiment-analysis:lvwerra/distilbert-imdb \ --no-check-empty-runs \ --pc.ncols 2 \ --pc.ncols-legend 1 \ --output-filename benchmark/trl/$BASELINE_PR_TAG/peft \ --scan-history python benchmark/upload_benchmark.py \ --folder_path="benchmark/trl/$BASELINE_PR_TAG" \ --path_in_repo="images/benchmark/$BASELINE_PR_TAG" \ --repo_id="trl-internal-testing/example-images" \ --repo_type="dataset"
trl/benchmark/plot.sh/0
{ "file_path": "trl/benchmark/plot.sh", "repo_id": "trl", "token_count": 1454 }
420
# DPO Trainer TRL supports the DPO Trainer for training language models from preference data, as described in the paper [Direct Preference Optimization: Your Language Model is Secretly a Reward Model](https://arxiv.org/abs/2305.18290) by Rafailov et al., 2023. For a full example have a look at [`examples/scripts/dpo.py`](https://github.com/huggingface/trl/blob/main/examples/scripts/dpo.py). The first step as always is to train your SFT model, to ensure the data we train on is in-distribution for the DPO algorithm. ## How DPO works Fine-tuning a language model via DPO consists of two steps and is easier than PPO: 1. **Data collection**: Gather a preference dataset with positive and negative selected pairs of generation, given a prompt. 2. **Optimization**: Maximize the log-likelihood of the DPO loss directly. DPO-compatible datasets can be found with [the tag `dpo` on Hugging Face Hub](https://huggingface.co/datasets?other=dpo). This process is illustrated in the sketch below (from [figure 1 of the original paper](https://arxiv.org/pdf/2305.18290.pdf)): <img width="835" alt="Screenshot 2024-03-19 at 12 39 41" src="https://github.com/huggingface/trl/assets/49240599/9150fac6-3d88-4ca2-8ec6-2a6f3473216d"> Read more about DPO algorithm in the [original paper](https://arxiv.org/pdf/2305.18290.pdf). ## Expected dataset format The DPO trainer expects a very specific format for the dataset. Since the model will be trained to directly optimize the preference of which sentence is the most relevant, given two sentences. We provide an example from the [`Anthropic/hh-rlhf`](https://huggingface.co/datasets/Anthropic/hh-rlhf) dataset below: <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/rlhf-antropic-example.png", width="50%"> </div> Therefore the final dataset object should contain these 3 entries if you use the default `DPODataCollatorWithPadding` data collator. The entries should be named: - `prompt` - `chosen` - `rejected` for example: ```py dpo_dataset_dict = { "prompt": [ "hello", "how are you", "What is your name?", "What is your name?", "Which is the best programming language?", "Which is the best programming language?", "Which is the best programming language?", ], "chosen": [ "hi nice to meet you", "I am fine", "My name is Mary", "My name is Mary", "Python", "Python", "Java", ], "rejected": [ "leave me alone", "I am not fine", "Whats it to you?", "I dont have a name", "Javascript", "C++", "C++", ], } ``` where the `prompt` contains the context inputs, `chosen` contains the corresponding chosen responses and `rejected` contains the corresponding negative (rejected) responses. As can be seen a prompt can have multiple responses and this is reflected in the entries being repeated in the dictionary's value arrays. ## Expected model format The DPO trainer expects a model of `AutoModelForCausalLM`, compared to PPO that expects `AutoModelForCausalLMWithValueHead` for the value function. ## Using the `DPOTrainer` For a detailed example have a look at the `examples/scripts/dpo.py` script. At a high level we need to initialize the `DPOTrainer` with a `model` we wish to train, a reference `ref_model` which we will use to calculate the implicit rewards of the preferred and rejected response, the `beta` refers to the hyperparameter of the implicit reward, and the dataset contains the 3 entries listed above. Note that the `model` and `ref_model` need to have the same architecture (ie decoder only or encoder-decoder). ```py dpo_trainer = DPOTrainer( model, model_ref, args=training_args, beta=0.1, train_dataset=train_dataset, tokenizer=tokenizer, ) ``` After this one can then call: ```py dpo_trainer.train() ``` Note that the `beta` is the temperature parameter for the DPO loss, typically something in the range of `0.1` to `0.5`. We ignore the reference model as `beta` -> 0. ## Loss functions Given the preference data, we can fit a binary classifier according to the Bradley-Terry model and in fact the DPO authors propose the sigmoid loss on the normalized likelihood via the `logsigmoid` to fit a logistic regression. The [RSO](https://arxiv.org/abs/2309.06657) authors propose to use a hinge loss on the normalized likelihood from the [SLiC](https://arxiv.org/abs/2305.10425) paper. The `DPOTrainer` can be switched to this loss via the `loss_type="hinge"` argument and the `beta` in this case is the reciprocal of the margin. The [IPO](https://arxiv.org/abs/2310.12036) authors provide a deeper theoretical understanding of the DPO algorithms and identify an issue with overfitting and propose an alternative loss which can be used via the `loss_type="ipo"` argument to the trainer. Note that the `beta` parameter is the reciprocal of the gap between the log-likelihood ratios of the chosen vs the rejected completion pair and thus the smaller the `beta` the larger this gaps is. As per the paper the loss is averaged over log-likelihoods of the completion (unlike DPO which is summed only). The [cDPO](https://ericmitchell.ai/cdpo.pdf) is a tweak on the DPO loss where we assume that the preference labels are noisy with some probability that can be passed to the `DPOTrainer` via `label_smoothing` argument (between 0 and 0.5) and then a conservative DPO loss is used. Use the `loss_type="cdpo"` argument to the trainer to use it. The [KTO](https://arxiv.org/abs/2402.01306) authors directly maximize the utility of LLM generations instead of the log-likelihood of preferences. To use preference data with KTO, we recommend breaking up the n preferences into 2n examples and using [`KTOTrainer`](kto_trainer) (i.e., treating the data like an unpaired feedback dataset). Although it is possible to pass in `loss_type="kto_pair"` into DPOTrainer, this is a highly simplified version of KTO that we *do not recommend* in most cases. Please use [`KTOTrainer`](kto_trainer) when possible. ## Logging While training and evaluating we record the following reward metrics: * `rewards/chosen`: the mean difference between the log probabilities of the policy model and the reference model for the chosen responses scaled by beta * `rewards/rejected`: the mean difference between the log probabilities of the policy model and the reference model for the rejected responses scaled by beta * `rewards/accuracies`: mean of how often the chosen rewards are > than the corresponding rejected rewards * `rewards/margins`: the mean difference between the chosen and corresponding rejected rewards ## Accelerate DPO fine-tuning using `unsloth` You can further accelerate QLoRA / LoRA (2x faster, 60% less memory) using the [`unsloth`](https://github.com/unslothai/unsloth) library that is fully compatible with `SFTTrainer`. Currently `unsloth` supports only Llama (Yi, TinyLlama, Qwen, Deepseek etc) and Mistral architectures. Some benchmarks for DPO listed below: | GPU | Model | Dataset | 🤗 | 🤗 + Flash Attention 2 | 🦥 Unsloth | 🦥 VRAM saved | |----------|-----------------|-----------|------|------------------------|-----------------|----------------| | A100 40G | Zephyr 7b | Ultra Chat| 1x | 1.24x | **1.88x** | -11.6% | | Tesla T4 | Zephyr 7b | Ultra Chat| 1x | 1.09x | **1.55x** | -18.6% | First install `unsloth` according to the [official documentation](https://github.com/unslothai/unsloth). Once installed, you can incorporate unsloth into your workflow in a very simple manner; instead of loading `AutoModelForCausalLM`, you just need to load a `FastLanguageModel` as follows: ```python import torch from transformers import TrainingArguments from trl import DPOTrainer from unsloth import FastLanguageModel max_seq_length = 2048 # Supports automatic RoPE Scaling, so choose any number. # Load model model, tokenizer = FastLanguageModel.from_pretrained( model_name = "unsloth/zephyr-sft", max_seq_length = max_seq_length, dtype = None, # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+ load_in_4bit = True, # Use 4bit quantization to reduce memory usage. Can be False. # token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf ) # Do model patching and add fast LoRA weights model = FastLanguageModel.get_peft_model( model, r = 16, target_modules = ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj",], lora_alpha = 16, lora_dropout = 0, # Dropout = 0 is currently optimized bias = "none", # Bias = "none" is currently optimized use_gradient_checkpointing = True, random_state = 3407, ) training_args = TrainingArguments(output_dir="./output") dpo_trainer = DPOTrainer( model, model_ref=None, args=training_args, beta=0.1, train_dataset=train_dataset, tokenizer=tokenizer, ) dpo_trainer.train() ``` The saved model is fully compatible with Hugging Face's transformers library. Learn more about unsloth in their [official repository](https://github.com/unslothai/unsloth). ## Reference model considerations with PEFT You have three main options (plus several variants) for how the reference model works when using PEFT, assuming the model that you would like to further enhance with DPO was tuned using (Q)LoRA. 1. Simply create two instances of the model, each loading your adapter - works fine but is very inefficient. 2. Merge the adapter into the base model, create another adapter on top, then leave the `model_ref` param null, in which case DPOTrainer will unload the adapter for reference inference - efficient, but has potential downsides discussed below. 3. Load the adapter twice with different names, then use `set_adapter` during training to swap between the adapter being DPO'd and the reference adapter - slightly less efficient compared to 2 (~adapter size VRAM overhead), but avoids the pitfalls. ### Downsides to merging QLoRA before DPO (approach 2) As suggested by [Benjamin Marie](https://medium.com/@bnjmn_marie/dont-merge-your-lora-adapter-into-a-4-bit-llm-65b6da287997), the best option for merging QLoRA adapters is to first dequantize the base model, then merge the adapter. Something similar to [this script](https://github.com/jondurbin/qlora/blob/main/qmerge.py). However, after using this approach, you will have an unquantized base model. Therefore, to use QLoRA for DPO, you will need to re-quantize the merged model or use the unquantized merge (resulting in higher memory demand). ### Using option 3 - load the adapter twice To avoid the downsides with option 2, you can load your fine-tuned adapter into the model twice, with different names, and set the model/ref adapter names in DPOTrainer. For example: ```python # Load the base model. bnb_config = BitsAndBytesConfig( load_in_4bit=True, llm_int8_threshold=6.0, llm_int8_has_fp16_weight=False, bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", ) model = AutoModelForCausalLM.from_pretrained( "mistralai/mixtral-8x7b-v0.1", load_in_4bit=True, quantization_config=bnb_config, attn_implementation="flash_attention_2", torch_dtype=torch.bfloat16, device_map="auto", ) model.config.use_cache = False # Load the adapter. model = PeftModel.from_pretrained( model, "/path/to/peft", is_trainable=True, adapter_name="train", ) # Load the adapter a second time, with a different name, which will be our reference model. model.load_adapter("/path/to/peft", adapter_name="reference") # Initialize the trainer, without a ref_model param. dpo_trainer = DPOTrainer( model, ... model_adapter_name="train", ref_adapter_name="reference", ) ``` ## DPOTrainer [[autodoc]] DPOTrainer
trl/docs/source/dpo_trainer.mdx/0
{ "file_path": "trl/docs/source/dpo_trainer.mdx", "repo_id": "trl", "token_count": 3902 }
421
# Supervised Fine-tuning Trainer Supervised fine-tuning (or SFT for short) is a crucial step in RLHF. In TRL we provide an easy-to-use API to create your SFT models and train them with few lines of code on your dataset. Check out a complete flexible example at [`examples/scripts/sft.py`](https://github.com/huggingface/trl/tree/main/examples/scripts/sft.py). ## Quickstart If you have a dataset hosted on the 🤗 Hub, you can easily fine-tune your SFT model using [`SFTTrainer`] from TRL. Let us assume your dataset is `imdb`, the text you want to predict is inside the `text` field of the dataset, and you want to fine-tune the `facebook/opt-350m` model. The following code-snippet takes care of all the data pre-processing and training for you: ```python from datasets import load_dataset from trl import SFTTrainer dataset = load_dataset("imdb", split="train") trainer = SFTTrainer( "facebook/opt-350m", train_dataset=dataset, dataset_text_field="text", max_seq_length=512, ) trainer.train() ``` Make sure to pass a correct value for `max_seq_length` as the default value will be set to `min(tokenizer.model_max_length, 1024)`. You can also construct a model outside of the trainer and pass it as follows: ```python from transformers import AutoModelForCausalLM from datasets import load_dataset from trl import SFTTrainer dataset = load_dataset("imdb", split="train") model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") trainer = SFTTrainer( model, train_dataset=dataset, dataset_text_field="text", max_seq_length=512, ) trainer.train() ``` The above snippets will use the default training arguments from the [`transformers.TrainingArguments`](https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments) class. If you want to modify that, make sure to create your own `TrainingArguments` object and pass it to the [`SFTTrainer`] constructor as it is done on the [`supervised_finetuning.py` script](https://github.com/huggingface/trl/blob/main/examples/stack_llama/scripts/supervised_finetuning.py) on the stack-llama example. ## Advanced usage ### Train on completions only You can use the `DataCollatorForCompletionOnlyLM` to train your model on the generated prompts only. Note that this works only in the case when `packing=False`. To instantiate that collator for instruction data, pass a response template and the tokenizer. Here is an example of how it would work to fine-tune `opt-350m` on completions only on the CodeAlpaca dataset: ```python from transformers import AutoModelForCausalLM, AutoTokenizer from datasets import load_dataset from trl import SFTTrainer, DataCollatorForCompletionOnlyLM dataset = load_dataset("lucasmccabe-lmi/CodeAlpaca-20k", split="train") model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") def formatting_prompts_func(example): output_texts = [] for i in range(len(example['instruction'])): text = f"### Question: {example['instruction'][i]}\n ### Answer: {example['output'][i]}" output_texts.append(text) return output_texts response_template = " ### Answer:" collator = DataCollatorForCompletionOnlyLM(response_template, tokenizer=tokenizer) trainer = SFTTrainer( model, train_dataset=dataset, formatting_func=formatting_prompts_func, data_collator=collator, ) trainer.train() ``` To instantiate that collator for assistant style conversation data, pass a response template, an instruction template and the tokenizer. Here is an example of how it would work to fine-tune `opt-350m` on assistant completions only on the Open Assistant Guanaco dataset: ```python from transformers import AutoModelForCausalLM, AutoTokenizer from datasets import load_dataset from trl import SFTTrainer, DataCollatorForCompletionOnlyLM dataset = load_dataset("timdettmers/openassistant-guanaco", split="train") model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") instruction_template = "### Human:" response_template = "### Assistant:" collator = DataCollatorForCompletionOnlyLM(instruction_template=instruction_template, response_template=response_template, tokenizer=tokenizer, mlm=False) trainer = SFTTrainer( model, train_dataset=dataset, dataset_text_field="text", data_collator=collator, ) trainer.train() ``` Make sure to have a `pad_token_id` which is different from `eos_token_id` which can result in the model not properly predicting EOS (End of Sentence) tokens during generation. #### Using token_ids directly for `response_template` Some tokenizers like Llama 2 (`meta-llama/Llama-2-XXb-hf`) tokenize sequences differently depending whether they have context or not. For example: ```python from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf") def print_tokens_with_ids(txt): tokens = tokenizer.tokenize(txt, add_special_tokens=False) token_ids = tokenizer.encode(txt, add_special_tokens=False) print(list(zip(tokens, token_ids))) prompt = """### User: Hello\n\n### Assistant: Hi, how can I help you?""" print_tokens_with_ids(prompt) # [..., ('▁Hello', 15043), ('<0x0A>', 13), ('<0x0A>', 13), ('##', 2277), ('#', 29937), ('▁Ass', 4007), ('istant', 22137), (':', 29901), ...] response_template = "### Assistant:" print_tokens_with_ids(response_template) # [('▁###', 835), ('▁Ass', 4007), ('istant', 22137), (':', 29901)] ``` In this case, and due to lack of context in `response_template`, the same string ("### Assistant:") is tokenized differently: - Text (with context): `[2277, 29937, 4007, 22137, 29901]` - `response_template` (without context): `[835, 4007, 22137, 29901]` This will lead to an error when the `DataCollatorForCompletionOnlyLM` does not find the `response_template` in the dataset example text: ``` RuntimeError: Could not find response key [835, 4007, 22137, 29901] in token IDs tensor([ 1, 835, ...]) ``` To solve this, you can tokenize the `response_template` with the same context than in the dataset, truncate it as needed and pass the `token_ids` directly to the `response_template` argument of the `DataCollatorForCompletionOnlyLM` class. For example: ```python response_template_with_context = "\n### Assistant:" # We added context here: "\n". This is enough for this tokenizer response_template_ids = tokenizer.encode(response_template_with_context, add_special_tokens=False)[2:] # Now we have it like in the dataset texts: `[2277, 29937, 4007, 22137, 29901]` data_collator = DataCollatorForCompletionOnlyLM(response_template_ids, tokenizer=tokenizer) ``` ### Add Special Tokens for Chat Format Adding special tokens to a language model is crucial for training chat models. These tokens are added between the different roles in a conversation, such as the user, assistant, and system and help the model recognize the structure and flow of a conversation. This setup is essential for enabling the model to generate coherent and contextually appropriate responses in a chat environment. The [`setup_chat_format`] function in `trl` easily sets up a model and tokenizer for conversational AI tasks. This function: - Adds special tokens to the tokenizer, e.g. `<|im_start|>` and `<|im_end|>`, to indicate the start and end of a conversation. - Resizes the model’s embedding layer to accommodate the new tokens. - Sets the `chat_template` of the tokenizer, which is used to format the input data into a chat-like format. The default is `chatml` from OpenAI. - _optionally_ you can pass `resize_to_multiple_of` to resize the embedding layer to a multiple of the `resize_to_multiple_of` argument, e.g. 64. If you want to see more formats being supported in the future, please open a GitHub issue on [trl](https://github.com/huggingface/trl) ```python from transformers import AutoModelForCausalLM, AutoTokenizer # Load model and tokenizer model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") # Set up the chat format with default 'chatml' format model, tokenizer = setup_chat_format(model, tokenizer) ``` With our model and tokenizer set up, we can now fine-tune our model on a conversational dataset. Below is an example of how a dataset can be formatted for fine-tuning. ### Dataset format support The [`SFTTrainer`] supports popular dataset formats. This allows you to pass the dataset to the trainer without any pre-processing directly. The following formats are supported: * conversational format ```json {"messages": [{"role": "system", "content": "You are helpful"}, {"role": "user", "content": "What's the capital of France?"}, {"role": "assistant", "content": "..."}]} {"messages": [{"role": "system", "content": "You are helpful"}, {"role": "user", "content": "Who wrote 'Romeo and Juliet'?"}, {"role": "assistant", "content": "..."}]} {"messages": [{"role": "system", "content": "You are helpful"}, {"role": "user", "content": "How far is the Moon from Earth?"}, {"role": "assistant", "content": "..."}]} ``` * instruction format ```json {"prompt": "<prompt text>", "completion": "<ideal generated text>"} {"prompt": "<prompt text>", "completion": "<ideal generated text>"} {"prompt": "<prompt text>", "completion": "<ideal generated text>"} ``` If your dataset uses one of the above formats, you can directly pass it to the trainer without pre-processing. The [`SFTTrainer`] will then format the dataset for you using the defined format from the model's tokenizer with the [apply_chat_template](https://huggingface.co/docs/transformers/main/en/chat_templating#templates-for-chat-models) method. ```python from datasets import load_dataset from trl import SFTTrainer ... # load jsonl dataset dataset = load_dataset("json", data_files="path/to/dataset.jsonl", split="train") # load dataset from the HuggingFace Hub dataset = load_dataset("philschmid/dolly-15k-oai-style", split="train") ... trainer = SFTTrainer( "facebook/opt-350m", args=training_args, train_dataset=dataset, packing=True, ) ``` If the dataset is not in one those format you can either preprocess the dataset to match the formatting or pass a formatting function to the SFTTrainer to do it for you. Let's have a look. ### Format your input prompts For instruction fine-tuning, it is quite common to have two columns inside the dataset: one for the prompt & the other for the response. This allows people to format examples like [Stanford-Alpaca](https://github.com/tatsu-lab/stanford_alpaca) did as follows: ```bash Below is an instruction ... ### Instruction {prompt} ### Response: {completion} ``` Let us assume your dataset has two fields, `question` and `answer`. Therefore you can just run: ```python ... def formatting_prompts_func(example): output_texts = [] for i in range(len(example['question'])): text = f"### Question: {example['question'][i]}\n ### Answer: {example['answer'][i]}" output_texts.append(text) return output_texts trainer = SFTTrainer( model, train_dataset=dataset, formatting_func=formatting_prompts_func, ) trainer.train() ``` To properly format your input make sure to process all the examples by looping over them and returning a list of processed text. Check out a full example on how to use SFTTrainer on alpaca dataset [here](https://github.com/huggingface/trl/pull/444#issue-1760952763) ### Packing dataset ([`ConstantLengthDataset`]) [`SFTTrainer`] supports _example packing_, where multiple short examples are packed in the same input sequence to increase training efficiency. This is done with the [`ConstantLengthDataset`] utility class that returns constant length chunks of tokens from a stream of examples. To enable the usage of this dataset class, simply pass `packing=True` to the [`SFTTrainer`] constructor. ```python ... trainer = SFTTrainer( "facebook/opt-350m", train_dataset=dataset, dataset_text_field="text", packing=True ) trainer.train() ``` Note that if you use a packed dataset and if you pass `max_steps` in the training arguments you will probably train your models for more than few epochs, depending on the way you have configured the packed dataset and the training protocol. Double check that you know and understand what you are doing. If you don't want to pack your `eval_dataset`, you can pass `eval_packing=False` to the `SFTTrainer` init method. #### Customize your prompts using packed dataset If your dataset has several fields that you want to combine, for example if the dataset has `question` and `answer` fields and you want to combine them, you can pass a formatting function to the trainer that will take care of that. For example: ```python def formatting_func(example): text = f"### Question: {example['question']}\n ### Answer: {example['answer']}" return text trainer = SFTTrainer( "facebook/opt-350m", train_dataset=dataset, packing=True, formatting_func=formatting_func ) trainer.train() ``` You can also customize the [`ConstantLengthDataset`] much more by directly passing the arguments to the [`SFTTrainer`] constructor. Please refer to that class' signature for more information. ### Control over the pretrained model You can directly pass the kwargs of the `from_pretrained()` method to the [`SFTTrainer`]. For example, if you want to load a model in a different precision, analogous to ```python model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m", torch_dtype=torch.bfloat16) ``` ```python ... trainer = SFTTrainer( "facebook/opt-350m", train_dataset=dataset, dataset_text_field="text", model_init_kwargs={ "torch_dtype": torch.bfloat16, }, ) trainer.train() ``` Note that all keyword arguments of `from_pretrained()` are supported. ### Training adapters We also support a tight integration with 🤗 PEFT library so that any user can conveniently train adapters and share them on the Hub instead of training the entire model ```python from datasets import load_dataset from trl import SFTTrainer from peft import LoraConfig dataset = load_dataset("imdb", split="train") peft_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) trainer = SFTTrainer( "EleutherAI/gpt-neo-125m", train_dataset=dataset, dataset_text_field="text", peft_config=peft_config ) trainer.train() ``` You can also continue training your `PeftModel`. For that, first load a `PeftModel` outside `SFTTrainer` and pass it directly to the trainer without the `peft_config` argument being passed. ### Training adapters with base 8 bit models For that you need to first load your 8bit model outside the Trainer and pass a `PeftConfig` to the trainer. For example: ```python ... peft_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m", load_in_8bit=True, device_map="auto", ) trainer = SFTTrainer( model, train_dataset=dataset, dataset_text_field="text", peft_config=peft_config, ) trainer.train() ``` ## Using Flash Attention and Flash Attention 2 You can benefit from Flash Attention 1 & 2 using SFTTrainer out of the box with minimal changes of code. First, to make sure you have all the latest features from transformers, install transformers from source ```bash pip install -U git+https://github.com/huggingface/transformers.git ``` Note that Flash Attention only works on GPU now and under half-precision regime (when using adapters, base model loaded in half-precision) Note also both features are perfectly compatible with other tools such as quantization. ### Using Flash-Attention 1 For Flash Attention 1 you can use the `BetterTransformer` API and force-dispatch the API to use Flash Attention kernel. First, install the latest optimum package: ```bash pip install -U optimum ``` Once you have loaded your model, wrap the `trainer.train()` call under the `with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False):` context manager: ```diff ... + with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False): trainer.train() ``` Note that you cannot train your model using Flash Attention 1 on an arbitrary dataset as `torch.scaled_dot_product_attention` does not support training with padding tokens if you use Flash Attention kernels. Therefore you can only use that feature with `packing=True`. If your dataset contains padding tokens, consider switching to Flash Attention 2 integration. Below are some numbers you can get in terms of speedup and memory efficiency, using Flash Attention 1, on a single NVIDIA-T4 16GB. | use_flash_attn_1 | model_name | max_seq_len | batch_size | time per training step | | ---------------- | ----------------- | ----------- | ---------- | ---------------------- | | x | facebook/opt-350m | 2048 | 8 | ~59.1s | | | facebook/opt-350m | 2048 | 8 | **OOM** | | x | facebook/opt-350m | 2048 | 4 | ~30.3s | | | facebook/opt-350m | 2048 | 4 | ~148.9s | ### Using Flash Attention-2 To use Flash Attention 2, first install the latest `flash-attn` package: ```bash pip install -U flash-attn ``` And add `attn_implementation="flash_attention_2"` when calling `from_pretrained`: ```python model = AutoModelForCausalLM.from_pretrained( model_id, load_in_4bit=True, attn_implementation="flash_attention_2" ) ``` If you don't use quantization, make sure your model is loaded in half-precision and dispatch your model on a supported GPU device. After loading your model, you can either train it as it is, or attach adapters and train adapters on it in case your model is quantized. In contrary to Flash Attention 1, the integration makes it possible to train your model on an arbitrary dataset that also includes padding tokens. ### Using model creation utility We included a utility function to create your model. [[autodoc]] ModelConfig ```python from trl import ModelConfig, SFTTrainer, get_kbit_device_map, get_peft_config, get_quantization_config model_config = ModelConfig( model_name_or_path="facebook/opt-350m" attn_implementation=None, # or "flash_attention_2" ) torch_dtype = ( model_config.torch_dtype if model_config.torch_dtype in ["auto", None] else getattr(torch, model_config.torch_dtype) ) quantization_config = get_quantization_config(model_config) model_kwargs = dict( revision=model_config.model_revision, trust_remote_code=model_config.trust_remote_code, attn_implementation=model_config.attn_implementation, torch_dtype=torch_dtype, use_cache=False if training_args.gradient_checkpointing else True, device_map=get_kbit_device_map() if quantization_config is not None else None, quantization_config=quantization_config, ) model = AutoModelForCausalLM.from_pretrained(model_config.model_name_or_path, **model_kwargs) trainer = SFTTrainer( ..., model=model_config.model_name_or_path, peft_config=get_peft_config(model_config), ) ``` ### Enhance model's performances using NEFTune NEFTune is a technique to boost the performance of chat models and was introduced by the paper ["NEFTune: Noisy Embeddings Improve Instruction Finetuning"](https://arxiv.org/abs/2310.05914) from Jain et al. it consists of adding noise to the embedding vectors during training. According to the abstract of the paper: > Standard finetuning of LLaMA-2-7B using Alpaca achieves 29.79% on AlpacaEval, which rises to 64.69% using noisy embeddings. NEFTune also improves over strong baselines on modern instruction datasets. Models trained with Evol-Instruct see a 10% improvement, with ShareGPT an 8% improvement, and with OpenPlatypus an 8% improvement. Even powerful models further refined with RLHF such as LLaMA-2-Chat benefit from additional training with NEFTune. <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/neft-screenshot.png"> </div> To use it in `SFTTrainer` simply pass `neftune_noise_alpha` when creating your `SFTTrainer` instance. Note that to avoid any surprising behaviour, NEFTune is disabled after training to retrieve back the original behaviour of the embedding layer. ```python from datasets import load_dataset from trl import SFTTrainer dataset = load_dataset("imdb", split="train") trainer = SFTTrainer( "facebook/opt-350m", train_dataset=dataset, dataset_text_field="text", max_seq_length=512, neftune_noise_alpha=5, ) trainer.train() ``` We have tested NEFTune by training `mistralai/Mistral-7B-v0.1` on the [OpenAssistant dataset](https://huggingface.co/datasets/timdettmers/openassistant-guanaco) and validated that using NEFTune led to a performance boost of ~25% on MT Bench. <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl-neftune-mistral-7b.png"> </div> Note however, that the amount of performance gain is _dataset dependent_ and in particular, applying NEFTune on synthetic datasets like [UltraChat](https://huggingface.co/datasets/stingning/ultrachat) typically produces smaller gains. ### Accelerate fine-tuning 2x using `unsloth` You can further accelerate QLoRA / LoRA (2x faster, 60% less memory) using the [`unsloth`](https://github.com/unslothai/unsloth) library that is fully compatible with `SFTTrainer`. Currently `unsloth` supports only Llama (Yi, TinyLlama, Qwen, Deepseek etc) and Mistral architectures. Some benchmarks on 1x A100 listed below: | 1 A100 40GB | Dataset | 🤗 | 🤗 + Flash Attention 2 | 🦥 Unsloth | 🦥 VRAM saved | |-----------------|-----------|-----|-------------------------|-----------------|----------------| | Code Llama 34b | Slim Orca | 1x | 1.01x | **1.94x** | -22.7% | | Llama-2 7b | Slim Orca | 1x | 0.96x | **1.87x** | -39.3% | | Mistral 7b | Slim Orca | 1x | 1.17x | **1.88x** | -65.9% | | Tiny Llama 1.1b | Alpaca | 1x | 1.55x | **2.74x** | -57.8% | First install `unsloth` according to the [official documentation](https://github.com/unslothai/unsloth). Once installed, you can incorporate unsloth into your workflow in a very simple manner; instead of loading `AutoModelForCausalLM`, you just need to load a `FastLanguageModel` as follows: ```python import torch from transformers import TrainingArguments from trl import SFTTrainer from unsloth import FastLanguageModel max_seq_length = 2048 # Supports automatic RoPE Scaling, so choose any number # Load model model, tokenizer = FastLanguageModel.from_pretrained( model_name = "unsloth/mistral-7b", max_seq_length = max_seq_length, dtype = None, # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+ load_in_4bit = True, # Use 4bit quantization to reduce memory usage. Can be False # token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf ) # Do model patching and add fast LoRA weights model = FastLanguageModel.get_peft_model( model, r = 16, target_modules = ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj",], lora_alpha = 16, lora_dropout = 0, # Dropout = 0 is currently optimized bias = "none", # Bias = "none" is currently optimized use_gradient_checkpointing = True, random_state = 3407, ) args = TrainingArguments(output_dir = "./output") trainer = SFTTrainer( model = model, args = args, train_dataset = dataset, dataset_text_field = "text", max_seq_length = max_seq_length, ) trainer.train() ``` The saved model is fully compatible with Hugging Face's transformers library. Learn more about unsloth in their [official repository](https://github.com/unslothai/unsloth). ## Best practices Pay attention to the following best practices when training a model with that trainer: - [`SFTTrainer`] always pads by default the sequences to the `max_seq_length` argument of the [`SFTTrainer`]. If none is passed, the trainer will retrieve that value from the tokenizer. Some tokenizers do not provide default value, so there is a check to retrieve the minimum between 2048 and that value. Make sure to check it before training. - For training adapters in 8bit, you might need to tweak the arguments of the `prepare_model_for_kbit_training` method from PEFT, hence we advise users to use `prepare_in_int8_kwargs` field, or create the `PeftModel` outside the [`SFTTrainer`] and pass it. - For a more memory-efficient training using adapters, you can load the base model in 8bit, for that simply add `load_in_8bit` argument when creating the [`SFTTrainer`], or create a base model in 8bit outside the trainer and pass it. - If you create a model outside the trainer, make sure to not pass to the trainer any additional keyword arguments that are relative to `from_pretrained()` method. ## Multi-GPU Training Trainer (and thus SFTTrainer) supports multi-GPU training. If you run your script with `python script.py` it will default to using DP as the strategy, which may be [slower than expected](https://github.com/huggingface/trl/issues/1303). To use DDP (which is generally recommended, see [here](https://huggingface.co/docs/transformers/en/perf_train_gpu_many?select-gpu=Accelerate#data-parallelism) for more info) you must launch the script with `python -m torch.distributed.launch script.py` or `accelerate launch script.py`. For DDP to work you must also check the following: - If you're using gradient_checkpointing, add the following to the TrainingArguments: `gradient_checkpointing_kwargs={'use_reentrant':False}` (more info [here](https://github.com/huggingface/transformers/issues/26969) - Ensure that the model is placed on the correct device: ```python from accelerate import PartialState device_string = PartialState().process_index model = AutoModelForCausalLM.from_pretrained( ... device_map={'':device_string} ) ``` ## GPTQ Conversion You may experience some issues with GPTQ Quantization after completing training. Lowering `gradient_accumulation_steps` to `4` will resolve most issues during the quantization process to GPTQ format. ## SFTTrainer [[autodoc]] SFTTrainer ## ConstantLengthDataset [[autodoc]] trainer.ConstantLengthDataset
trl/docs/source/sft_trainer.mdx/0
{ "file_path": "trl/docs/source/sft_trainer.mdx", "repo_id": "trl", "token_count": 8707 }
422
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from peft import LoraConfig from transformers import AutoTokenizer, HfArgumentParser, load_tool from trl import AutoModelForCausalLMWithValueHead, PPOConfig, PPOTrainer, TextEnvironment os.environ["HF_ALLOW_CODE_EVAL"] = "1" os.environ["TOKENIZERS_PARALLELISM"] = "false" @dataclass class ScriptArguments: model_name: Optional[str] = field(default="bigcode/starcoderbase", metadata={"help": "the model name"}) log_with: Optional[str] = field(default=None, metadata={"help": "use 'wandb' to log with wandb"}) learning_rate: Optional[float] = field(default=1e-5, metadata={"help": "the learning rate"}) mini_batch_size: Optional[int] = field(default=1, metadata={"help": "the PPO minibatch size"}) batch_size: Optional[int] = field(default=32, metadata={"help": "the batch size"}) gradient_accumulation_steps: Optional[int] = field( default=16, metadata={"help": "the number of gradient accumulation steps"} ) max_new_tokens: Optional[int] = field(default=256, metadata={"help": "max number of generated tokens per turn"}) ppo_epochs: Optional[int] = field(default=1, metadata={"help": "max number of ppo epochs"}) iterations: Optional[int] = field(default=1000, metadata={"help": "the number of iterations"}) seed: Optional[int] = field(default=0, metadata={"help": "the random seed"}) parser = HfArgumentParser(ScriptArguments) args = parser.parse_args_into_dataclasses()[0] lora_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", target_modules=["c_proj", "c_attn", "q_attn"], ) # set up models model = AutoModelForCausalLMWithValueHead.from_pretrained( args.model_name, use_auth_token=True, trust_remote_code=True, load_in_4bit=True, peft_config=lora_config, ) tokenizer = AutoTokenizer.from_pretrained(args.model_name, use_auth_token=True) tokenizer.pad_token = tokenizer.eos_token # system prompt prompt = """\ Answer the following question: Q: In which branch of the arts is Patricia Neary famous? A: Ballets A2: <request><Wiki>Patricia Neary<call>Patricia Neary (born October 27, 1942) is an American ballerina, choreographer and ballet director, who has been particularly active in Switzerland. She has also been a highly successful ambassador for the Balanchine Trust, bringing George Balanchine's ballets to 60 cities around the globe.<response> Result=Ballets<submit> Q: Who won Super Bowl XX? A: Chicago Bears A2: <request><Wiki>Super Bowl XX<call>Super Bowl XX was an American football game between the National Football Conference (NFC) champion Chicago Bears and the American Football Conference (AFC) champion New England Patriots to decide the National Football League (NFL) champion for the 1985 season. The Bears defeated the Patriots by the score of 46–10, capturing their first NFL championship (and Chicago's first overall sports victory) since 1963, three years prior to the birth of the Super Bowl. Super Bowl XX was played on January 26, 1986 at the Louisiana Superdome in New Orleans.<response> Result=Chicago Bears<submit> Q: """ generation_kwargs = { "min_length": -1, "top_k": 0.0, "top_p": 1.0, "do_sample": True, "pad_token_id": tokenizer.eos_token_id, "eos_token_id": -1, "max_new_tokens": args.max_new_tokens, } # trainer config = PPOConfig( batch_size=args.batch_size, model_name=args.model_name, learning_rate=args.learning_rate, log_with=args.log_with, mini_batch_size=args.mini_batch_size, ppo_epochs=args.ppo_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, seed=args.seed, optimize_cuda_cache=True, ) ppo_trainer = PPOTrainer(config=config, model=model, tokenizer=tokenizer) dataset = load_dataset("trivia_qa", "rc", split="train") local_seed = args.seed + ppo_trainer.accelerator.process_index * 100003 # Prime dataset = dataset.shuffle(local_seed) def data_generator(): for i in range(len(dataset)): yield dataset[i]["question"], list(dataset[i]["answer"]["normalized_aliases"]) gen = data_generator() gen = iter(gen) def generate_data(n): tasks, answers = [], [] for _i in range(n): q, a = next(gen) tasks.append(q) answers.append(a) return tasks, answers def exact_match_reward(responses, answers=None): """Reward if generated response contains correct answer.""" rewards = [] for response, answer in zip(responses, answers): reward = 0.0 for a in answer: if a.lower() in response.lower(): reward += 1.0 break rewards.append(torch.tensor(reward)) return rewards def tool_fn(x): # limit the amount of tokens return tool(x).split("\n")[1][:600] # text env tool = load_tool("vwxyzjn/pyserini-wikipedia-kilt-doc") text_env = TextEnvironment( model, tokenizer, {"Wiki": tool_fn}, exact_match_reward, prompt, generation_kwargs=generation_kwargs, max_tool_reponse=400, ) def print_trainable_parameters(model): trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}" ) print_trainable_parameters(model) # main training loop for i in range(args.iterations): tasks, answers = generate_data(config.batch_size) queries, responses, masks, rewards, histories = text_env.run(tasks, answers=answers) train_stats = ppo_trainer.step(queries, responses, rewards, masks) response_texts = [tokenizer.decode(response) for response in responses] query_texts = [tokenizer.decode(query) for query in queries] texts = { "query": [qt.split("<submit>")[-1].strip() for qt in query_texts], "response": response_texts, "answer": [", ".join(item) for item in answers], } all_rewards = ppo_trainer.accelerator.gather(torch.tensor(rewards, device=ppo_trainer.accelerator.device)) ppo_trainer.log_stats(train_stats, texts, list(all_rewards), columns_to_log=["query", "response", "answer"]) if i % 100 == 0: ppo_trainer.save_pretrained(f"models/{args.model_name}_{args.seed}_{i}_triviaqa")
trl/examples/research_projects/tools/triviaqa.py/0
{ "file_path": "trl/examples/research_projects/tools/triviaqa.py", "repo_id": "trl", "token_count": 2555 }
423
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import os from datetime import date from pathlib import Path from tabulate import tabulate MAX_LEN_MESSAGE = 2900 # slack endpoint has a limit of 3001 characters parser = argparse.ArgumentParser() parser.add_argument("--slack_channel_name", default="trl-push-ci") def main(slack_channel_name=None): failed = [] passed = [] group_info = [] total_num_failed = 0 empty_file = False or len(list(Path().glob("*.log"))) == 0 total_empty_files = [] for log in Path().glob("*.log"): section_num_failed = 0 i = 0 with open(log) as f: for line in f: line = json.loads(line) i += 1 if line.get("nodeid", "") != "": test = line["nodeid"] if line.get("duration", None) is not None: duration = f'{line["duration"]:.4f}' if line.get("outcome", "") == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split("_")[0]]) total_num_failed += 1 else: passed.append([test, duration, log.name.split("_")[0]]) empty_file = i == 0 group_info.append([str(log), section_num_failed, failed]) total_empty_files.append(empty_file) os.remove(log) failed = [] no_error_payload = { "type": "section", "text": { "type": "plain_text", "text": "🌞 There were no failures!" if not any(total_empty_files) else "Something went wrong there is at least one empty file - please check GH action results.", "emoji": True, }, } message = "" payload = [ { "type": "header", "text": { "type": "plain_text", "text": "🤗 Results of the {} TRL tests.".format(os.environ.get("TEST_TYPE", "")), }, }, ] if total_num_failed > 0: for i, (name, num_failed, failed_tests) in enumerate(group_info): if num_failed > 0: if num_failed == 1: message += f"*{name}: {num_failed} failed test*\n" else: message += f"*{name}: {num_failed} failed tests*\n" failed_table = [] for test in failed_tests: failed_report = test[0].split("::") # Truncate the last string as some test names might be long failed_report[-1] = failed_report[-1][:30] + ".." failed_table.append(failed_report) failed_table = tabulate( failed_table, headers=["Test Location", "Test Case", "Test Name"], showindex="always", tablefmt="grid", maxcolwidths=[12, 12, 12], ) message += "\n```\n" + failed_table + "\n```" if total_empty_files[i]: message += f"\n*{name}: Warning! Empty file - please check the GitHub action job *\n" print(f"### {message}") else: payload.append(no_error_payload) if os.environ.get("TEST_TYPE", "") != "": from slack_sdk import WebClient if len(message) > MAX_LEN_MESSAGE: message = f"There are {total_num_failed} failed tests in total ! Cannot display the entire summary - please check the action results directly" if len(message) != 0: md_report = { "type": "section", "text": {"type": "mrkdwn", "text": message}, } payload.append(md_report) action_button = { "type": "section", "text": {"type": "mrkdwn", "text": "*For more details:*"}, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f"https://github.com/huggingface/trl/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } payload.append(action_button) date_report = { "type": "context", "elements": [ { "type": "plain_text", "text": f"On Push main {os.environ.get('TEST_TYPE')} test results for {date.today()}", }, ], } payload.append(date_report) print(payload) client = WebClient(token=os.environ.get("SLACK_API_TOKEN")) client.chat_postMessage(channel=f"#{slack_channel_name}", text=message, blocks=payload) if __name__ == "__main__": args = parser.parse_args() main(args.slack_channel_name)
trl/scripts/log_reports.py/0
{ "file_path": "trl/scripts/log_reports.py", "repo_id": "trl", "token_count": 2727 }
424
import subprocess def test_hello_world(): subprocess.run( "python examples/hello_world.py", shell=True, check=True, )
trl/tests/test_e2e.py/0
{ "file_path": "trl/tests/test_e2e.py", "repo_id": "trl", "token_count": 69 }
425
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import warnings from contextlib import contextmanager from typing import Dict, List, Optional, Tuple, Union import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.utils.rnn import pad_sequence from transformers.generation import TopKLogitsWarper, TopPLogitsWarper from .import_utils import is_npu_available, is_xpu_available try: from collections.abc import Mapping except ImportError: from collections.abc import Mapping WANDB_PADDING = -1 def top_k_top_p_filtering( logits: torch.FloatTensor, top_k: int = 0, top_p: float = 1.0, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1, ) -> torch.FloatTensor: """ Filter a distribution of logits using top-k and/or nucleus (top-p) filtering. Args: logits: logits distribution shape (batch size, vocabulary size) top_k (`int`, *optional*, defaults to 0): If > 0, only keep the top k tokens with highest probability (top-k filtering) top_p (`float`, *optional*, defaults to 1.0): If < 1.0, only keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimumber of tokens we keep per batch example in the output. From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 """ if top_k > 0: logits = TopKLogitsWarper(top_k=top_k, filter_value=filter_value, min_tokens_to_keep=min_tokens_to_keep)( None, logits ) if 0 <= top_p <= 1.0: logits = TopPLogitsWarper(top_p=top_p, filter_value=filter_value, min_tokens_to_keep=min_tokens_to_keep)( None, logits ) return logits def flatten_dict(nested: Dict, sep: str = "/") -> Dict: """Flatten dictionary and concatenate nested keys with separator.""" def recurse(nest: Dict, prefix: str, into: Dict) -> None: for k, v in nest.items(): if sep in k: raise ValueError(f"separator '{sep}' not allowed to be in key '{k}'") if isinstance(v, Mapping): recurse(v, prefix + k + sep, into) else: into[prefix + k] = v flat = {} recurse(nested, "", flat) return flat def convert_to_scalar(stats: Dict) -> Dict: """ Converts the stats from a flattened dict to single scalar dicts """ tensorboard_stats = {} for k, v in stats.items(): # for tensorboard compatibility - arrays and tensors are ignored with tensorboard # therefore we convert single element tensors to scalars if (isinstance(v, torch.Tensor) or isinstance(v, np.ndarray)) and ( len(v.shape) == 0 or (len(v.shape) == 1 and v.shape[0] == 1) ): v = v.item() tensorboard_stats[k] = v return tensorboard_stats def stack_dicts(stats_dicts: List[Dict]) -> Dict: """Stack the values of a dict.""" results = dict() for k in stats_dicts[0]: stats_list = [torch.flatten(d[k]) for d in stats_dicts] results[k] = pad_sequence(stats_list, batch_first=True, padding_value=WANDB_PADDING) return results def add_suffix(input_dict: Dict, suffix: str) -> Dict: """Add suffix to dict keys.""" return {k + suffix: v for k, v in input_dict.items()} def pad_to_size(tensor: torch.Tensor, size: int, dim: int = 1, padding: int = 50256) -> torch.Tensor: """Pad tensor to size.""" t_size = tensor.size()[dim] if t_size == size: return tensor else: return torch.nn.functional.pad(tensor, (0, size - t_size), "constant", padding) def logprobs_from_logits(logits: torch.Tensor, labels: torch.Tensor, gather: bool = True) -> torch.Tensor: """ See: https://github.com/pytorch/pytorch/issues/563#issuecomment-330103591 """ logp = F.log_softmax(logits, dim=2) if not gather: return logp logpy = torch.gather(logp, 2, labels.unsqueeze(2)).squeeze(-1) return logpy def whiten(values: torch.Tensor, shift_mean: bool = True) -> torch.Tensor: """Whiten values.""" mean, var = torch.mean(values), torch.var(values) whitened = (values - mean) * torch.rsqrt(var + 1e-8) if not shift_mean: whitened += mean return whitened def masked_mean(values: torch.Tensor, mask: torch.Tensor, axis: Optional[bool] = None) -> torch.Tensor: """Compute mean of tensor with a masked values.""" if axis is not None: return (values * mask).sum(axis=axis) / mask.sum(axis=axis) else: return (values * mask).sum() / mask.sum() def masked_var(values: torch.Tensor, mask: torch.Tensor, unbiased: bool = True) -> torch.Tensor: """Compute variance of tensor with masked values.""" mean = masked_mean(values, mask) centered_values = values - mean variance = masked_mean(centered_values**2, mask) if unbiased: mask_sum = mask.sum() if mask_sum == 0: raise ValueError( "The sum of the mask is zero, which can happen when `mini_batch_size=1`;" "try increase the `mini_batch_size` or `gradient_accumulation_steps`" ) # note that if mask_sum == 1, then there is a division by zero issue # to avoid it you just need to use a larger minibatch_size bessel_correction = mask_sum / (mask_sum - 1) variance = variance * bessel_correction return variance def masked_whiten(values: torch.Tensor, mask: torch.Tensor, shift_mean: bool = True) -> torch.Tensor: """Whiten values with masked values.""" mean, var = masked_mean(values, mask), masked_var(values, mask) whitened = (values - mean) * torch.rsqrt(var + 1e-8) if not shift_mean: whitened += mean return whitened def clip_by_value(x: torch.Tensor, tensor_min: float, tensor_max: float) -> torch.Tensor: """ Tensor extension to torch.clamp https://github.com/pytorch/pytorch/issues/2793#issuecomment-428784713 """ clipped = torch.max(torch.min(x, tensor_max), tensor_min) return clipped def entropy_from_logits(logits: torch.Tensor) -> torch.Tensor: """Calculate entropy from logits.""" pd = torch.nn.functional.softmax(logits, dim=-1) entropy = torch.logsumexp(logits, axis=-1) - torch.sum(pd * logits, axis=-1) return entropy def average_torch_dicts(list_of_dicts: List[Dict]) -> Dict: """Average values of a list of dicts with torch tensors.""" average_dict = dict() for key in list_of_dicts[0].keys(): average_dict[key] = torch.mean(torch.stack([d[key] for d in list_of_dicts]), axis=0) return average_dict def stats_to_np(stats_dict: Dict) -> Dict: """Cast all torch.tensors in dict to numpy arrays.""" new_dict = dict() for k, v in stats_dict.items(): if isinstance(v, torch.Tensor): new_dict[k] = v.detach().cpu() if new_dict[k].dtype == torch.bfloat16: new_dict[k] = new_dict[k].float() new_dict[k] = new_dict[k].numpy() else: new_dict[k] = v if np.isscalar(new_dict[k]): new_dict[k] = float(new_dict[k]) return new_dict def respond_to_batch( model: nn.Module, queries: List[torch.LongTensor], txt_len: int = 20, top_k: int = 0, top_p: float = 1.0 ) -> torch.LongTensor: """Sample text from language model.""" input_ids = queries for _i in range(txt_len): # Get Logits outputs = model(input_ids) next_token_logits = outputs[0][:, -1, :] next_token_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p) # Sample probs = F.softmax(next_token_logits, dim=-1) next_token = torch.multinomial(probs, num_samples=1).squeeze(1) input_ids = torch.cat([input_ids, next_token.unsqueeze(-1)], dim=-1) return input_ids[:, -txt_len:] def set_seed(seed: int) -> None: """ Helper function for reproducible behavior to set the seed in `random`, `numpy`, and `torch`. Args: seed (`int`): The seed to set. """ random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if is_xpu_available(): torch.xpu.manual_seed_all(seed) elif is_npu_available(): torch.npu.manual_seed_all(seed) else: torch.cuda.manual_seed_all(seed) class LengthSampler: """ Samples a length """ def __init__(self, min_value: int, max_value: int): self.values = list(range(min_value, max_value)) def __call__(self) -> int: return np.random.choice(self.values) class PPODecorators: optimize_device_cache = False @classmethod @contextmanager def empty_device_cache(cls): yield if cls.optimize_device_cache: if is_xpu_available(): gc.collect() torch.xpu.empty_cache() gc.collect() elif is_npu_available(): gc.collect() torch.npu.empty_cache() gc.collect() elif torch.cuda.is_available(): gc.collect() torch.cuda.empty_cache() gc.collect() def randn_tensor( shape: Union[Tuple, List], generator: Optional[Union[List[torch.Generator], torch.Generator]] = None, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, layout: Optional[torch.layout] = None, ) -> torch.Tensor: """A helper function to create random tensors on the desired `device` with the desired `dtype`. When passing a list of generators, you can seed each batch size individually. If CPU generators are passed, the tensor is always created on the CPU. """ # device on which tensor is created defaults to device rand_device = device batch_size = shape[0] layout = layout or torch.strided device = device or torch.device("cpu") if generator is not None: gen_device_type = generator.device.type if not isinstance(generator, list) else generator[0].device.type if gen_device_type != device.type and gen_device_type == "cpu": rand_device = "cpu" if device != "mps": warnings.warn( f"The passed generator was created on 'cpu' even though a tensor on {device} was expected." f" Tensors will be created on 'cpu' and then moved to {device}. Note that one can probably" f" slighly speed up this function by passing a generator that was created on the {device} device." ) elif gen_device_type != device.type and gen_device_type == "cuda": raise ValueError(f"Cannot generate a {device} tensor from a generator of type {gen_device_type}.") # make sure generator list of length 1 is treated like a non-list if isinstance(generator, list) and len(generator) == 1: generator = generator[0] if isinstance(generator, list): shape = (1,) + shape[1:] latents = [ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype, layout=layout) for i in range(batch_size) ] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype, layout=layout).to(device) return latents
trl/trl/core.py/0
{ "file_path": "trl/trl/core.py", "repo_id": "trl", "token_count": 5038 }
426
# Copyright 2023 DDPO-pytorch authors (Kevin Black), metric-space, The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import warnings from collections import defaultdict from concurrent import futures from typing import Any, Callable, Optional, Tuple from warnings import warn import torch from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from huggingface_hub import whoami from ..models import DDPOStableDiffusionPipeline from . import BaseTrainer, DDPOConfig from .utils import PerPromptStatTracker logger = get_logger(__name__) MODEL_CARD_TEMPLATE = """--- license: apache-2.0 tags: - trl - ddpo - diffusers - reinforcement-learning - text-to-image - stable-diffusion --- # {model_name} This is a diffusion model that has been fine-tuned with reinforcement learning to guide the model outputs according to a value, function, or human feedback. The model can be used for image generation conditioned with text. """ class DDPOTrainer(BaseTrainer): """ The DDPOTrainer uses Deep Diffusion Policy Optimization to optimise diffusion models. Note, this trainer is heavily inspired by the work here: https://github.com/kvablack/ddpo-pytorch As of now only Stable Diffusion based pipelines are supported Attributes: **config** (`DDPOConfig`) -- Configuration object for DDPOTrainer. Check the documentation of `PPOConfig` for more details. **reward_function** (Callable[[torch.Tensor, Tuple[str], Tuple[Any]], torch.Tensor]) -- Reward function to be used **prompt_function** (Callable[[], Tuple[str, Any]]) -- Function to generate prompts to guide model **sd_pipeline** (`DDPOStableDiffusionPipeline`) -- Stable Diffusion pipeline to be used for training. **image_samples_hook** (Optional[Callable[[Any, Any, Any], Any]]) -- Hook to be called to log images """ _tag_names = ["trl", "ddpo"] def __init__( self, config: DDPOConfig, reward_function: Callable[[torch.Tensor, Tuple[str], Tuple[Any]], torch.Tensor], prompt_function: Callable[[], Tuple[str, Any]], sd_pipeline: DDPOStableDiffusionPipeline, image_samples_hook: Optional[Callable[[Any, Any, Any], Any]] = None, ): if image_samples_hook is None: warn("No image_samples_hook provided; no images will be logged") self.prompt_fn = prompt_function self.reward_fn = reward_function self.config = config self.image_samples_callback = image_samples_hook accelerator_project_config = ProjectConfiguration(**self.config.project_kwargs) if self.config.resume_from: self.config.resume_from = os.path.normpath(os.path.expanduser(self.config.resume_from)) if "checkpoint_" not in os.path.basename(self.config.resume_from): # get the most recent checkpoint in this directory checkpoints = list( filter( lambda x: "checkpoint_" in x, os.listdir(self.config.resume_from), ) ) if len(checkpoints) == 0: raise ValueError(f"No checkpoints found in {self.config.resume_from}") checkpoint_numbers = sorted([int(x.split("_")[-1]) for x in checkpoints]) self.config.resume_from = os.path.join( self.config.resume_from, f"checkpoint_{checkpoint_numbers[-1]}", ) accelerator_project_config.iteration = checkpoint_numbers[-1] + 1 # number of timesteps within each trajectory to train on self.num_train_timesteps = int(self.config.sample_num_steps * self.config.train_timestep_fraction) self.accelerator = Accelerator( log_with=self.config.log_with, mixed_precision=self.config.mixed_precision, project_config=accelerator_project_config, # we always accumulate gradients across timesteps; we want config.train.gradient_accumulation_steps to be the # number of *samples* we accumulate across, so we need to multiply by the number of training timesteps to get # the total number of optimizer steps to accumulate across. gradient_accumulation_steps=self.config.train_gradient_accumulation_steps * self.num_train_timesteps, **self.config.accelerator_kwargs, ) is_okay, message = self._config_check() if not is_okay: raise ValueError(message) is_using_tensorboard = config.log_with is not None and config.log_with == "tensorboard" if self.accelerator.is_main_process: self.accelerator.init_trackers( self.config.tracker_project_name, config=dict(ddpo_trainer_config=config.to_dict()) if not is_using_tensorboard else config.to_dict(), init_kwargs=self.config.tracker_kwargs, ) logger.info(f"\n{config}") set_seed(self.config.seed, device_specific=True) self.sd_pipeline = sd_pipeline self.sd_pipeline.set_progress_bar_config( position=1, disable=not self.accelerator.is_local_main_process, leave=False, desc="Timestep", dynamic_ncols=True, ) # For mixed precision training we cast all non-trainable weights (vae, non-lora text_encoder and non-lora unet) to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. if self.accelerator.mixed_precision == "fp16": inference_dtype = torch.float16 elif self.accelerator.mixed_precision == "bf16": inference_dtype = torch.bfloat16 else: inference_dtype = torch.float32 self.sd_pipeline.vae.to(self.accelerator.device, dtype=inference_dtype) self.sd_pipeline.text_encoder.to(self.accelerator.device, dtype=inference_dtype) self.sd_pipeline.unet.to(self.accelerator.device, dtype=inference_dtype) trainable_layers = self.sd_pipeline.get_trainable_layers() self.accelerator.register_save_state_pre_hook(self._save_model_hook) self.accelerator.register_load_state_pre_hook(self._load_model_hook) # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if self.config.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True self.optimizer = self._setup_optimizer( trainable_layers.parameters() if not isinstance(trainable_layers, list) else trainable_layers ) self.neg_prompt_embed = self.sd_pipeline.text_encoder( self.sd_pipeline.tokenizer( [""] if self.config.negative_prompts is None else self.config.negative_prompts, return_tensors="pt", padding="max_length", truncation=True, max_length=self.sd_pipeline.tokenizer.model_max_length, ).input_ids.to(self.accelerator.device) )[0] if config.per_prompt_stat_tracking: self.stat_tracker = PerPromptStatTracker( config.per_prompt_stat_tracking_buffer_size, config.per_prompt_stat_tracking_min_count, ) # NOTE: for some reason, autocast is necessary for non-lora training but for lora training it isn't necessary and it uses # more memory self.autocast = self.sd_pipeline.autocast or self.accelerator.autocast if hasattr(self.sd_pipeline, "use_lora") and self.sd_pipeline.use_lora: unet, self.optimizer = self.accelerator.prepare(trainable_layers, self.optimizer) self.trainable_layers = list(filter(lambda p: p.requires_grad, unet.parameters())) else: self.trainable_layers, self.optimizer = self.accelerator.prepare(trainable_layers, self.optimizer) if self.config.async_reward_computation: self.executor = futures.ThreadPoolExecutor(max_workers=config.max_workers) if config.resume_from: logger.info(f"Resuming from {config.resume_from}") self.accelerator.load_state(config.resume_from) self.first_epoch = int(config.resume_from.split("_")[-1]) + 1 else: self.first_epoch = 0 def compute_rewards(self, prompt_image_pairs, is_async=False): if not is_async: rewards = [] for images, prompts, prompt_metadata in prompt_image_pairs: reward, reward_metadata = self.reward_fn(images, prompts, prompt_metadata) rewards.append( ( torch.as_tensor(reward, device=self.accelerator.device), reward_metadata, ) ) else: rewards = self.executor.map(lambda x: self.reward_fn(*x), prompt_image_pairs) rewards = [ (torch.as_tensor(reward.result(), device=self.accelerator.device), reward_metadata.result()) for reward, reward_metadata in rewards ] return zip(*rewards) def step(self, epoch: int, global_step: int): """ Perform a single step of training. Args: epoch (int): The current epoch. global_step (int): The current global step. Side Effects: - Model weights are updated - Logs the statistics to the accelerator trackers. - If `self.image_samples_callback` is not None, it will be called with the prompt_image_pairs, global_step, and the accelerator tracker. Returns: global_step (int): The updated global step. """ samples, prompt_image_data = self._generate_samples( iterations=self.config.sample_num_batches_per_epoch, batch_size=self.config.sample_batch_size, ) # collate samples into dict where each entry has shape (num_batches_per_epoch * sample.batch_size, ...) samples = {k: torch.cat([s[k] for s in samples]) for k in samples[0].keys()} rewards, rewards_metadata = self.compute_rewards( prompt_image_data, is_async=self.config.async_reward_computation ) for i, image_data in enumerate(prompt_image_data): image_data.extend([rewards[i], rewards_metadata[i]]) if self.image_samples_callback is not None: self.image_samples_callback(prompt_image_data, global_step, self.accelerator.trackers[0]) rewards = torch.cat(rewards) rewards = self.accelerator.gather(rewards).cpu().numpy() self.accelerator.log( { "reward": rewards, "epoch": epoch, "reward_mean": rewards.mean(), "reward_std": rewards.std(), }, step=global_step, ) if self.config.per_prompt_stat_tracking: # gather the prompts across processes prompt_ids = self.accelerator.gather(samples["prompt_ids"]).cpu().numpy() prompts = self.sd_pipeline.tokenizer.batch_decode(prompt_ids, skip_special_tokens=True) advantages = self.stat_tracker.update(prompts, rewards) else: advantages = (rewards - rewards.mean()) / (rewards.std() + 1e-8) # ungather advantages; keep the entries corresponding to the samples on this process samples["advantages"] = ( torch.as_tensor(advantages) .reshape(self.accelerator.num_processes, -1)[self.accelerator.process_index] .to(self.accelerator.device) ) del samples["prompt_ids"] total_batch_size, num_timesteps = samples["timesteps"].shape for inner_epoch in range(self.config.train_num_inner_epochs): # shuffle samples along batch dimension perm = torch.randperm(total_batch_size, device=self.accelerator.device) samples = {k: v[perm] for k, v in samples.items()} # shuffle along time dimension independently for each sample # still trying to understand the code below perms = torch.stack( [torch.randperm(num_timesteps, device=self.accelerator.device) for _ in range(total_batch_size)] ) for key in ["timesteps", "latents", "next_latents", "log_probs"]: samples[key] = samples[key][ torch.arange(total_batch_size, device=self.accelerator.device)[:, None], perms, ] original_keys = samples.keys() original_values = samples.values() # rebatch them as user defined train_batch_size is different from sample_batch_size reshaped_values = [v.reshape(-1, self.config.train_batch_size, *v.shape[1:]) for v in original_values] # Transpose the list of original values transposed_values = zip(*reshaped_values) # Create new dictionaries for each row of transposed values samples_batched = [dict(zip(original_keys, row_values)) for row_values in transposed_values] self.sd_pipeline.unet.train() global_step = self._train_batched_samples(inner_epoch, epoch, global_step, samples_batched) # ensure optimization step at the end of the inner epoch if not self.accelerator.sync_gradients: raise ValueError( "Optimization step should have been performed by this point. Please check calculated gradient accumulation settings." ) if epoch != 0 and epoch % self.config.save_freq == 0 and self.accelerator.is_main_process: self.accelerator.save_state() return global_step def calculate_loss(self, latents, timesteps, next_latents, log_probs, advantages, embeds): """ Calculate the loss for a batch of an unpacked sample Args: latents (torch.Tensor): The latents sampled from the diffusion model, shape: [batch_size, num_channels_latents, height, width] timesteps (torch.Tensor): The timesteps sampled from the diffusion model, shape: [batch_size] next_latents (torch.Tensor): The next latents sampled from the diffusion model, shape: [batch_size, num_channels_latents, height, width] log_probs (torch.Tensor): The log probabilities of the latents, shape: [batch_size] advantages (torch.Tensor): The advantages of the latents, shape: [batch_size] embeds (torch.Tensor): The embeddings of the prompts, shape: [2*batch_size or batch_size, ...] Note: the "or" is because if train_cfg is True, the expectation is that negative prompts are concatenated to the embeds Returns: loss (torch.Tensor), approx_kl (torch.Tensor), clipfrac (torch.Tensor) (all of these are of shape (1,)) """ with self.autocast(): if self.config.train_cfg: noise_pred = self.sd_pipeline.unet( torch.cat([latents] * 2), torch.cat([timesteps] * 2), embeds, ).sample noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.config.sample_guidance_scale * ( noise_pred_text - noise_pred_uncond ) else: noise_pred = self.sd_pipeline.unet( latents, timesteps, embeds, ).sample # compute the log prob of next_latents given latents under the current model scheduler_step_output = self.sd_pipeline.scheduler_step( noise_pred, timesteps, latents, eta=self.config.sample_eta, prev_sample=next_latents, ) log_prob = scheduler_step_output.log_probs advantages = torch.clamp( advantages, -self.config.train_adv_clip_max, self.config.train_adv_clip_max, ) ratio = torch.exp(log_prob - log_probs) loss = self.loss(advantages, self.config.train_clip_range, ratio) approx_kl = 0.5 * torch.mean((log_prob - log_probs) ** 2) clipfrac = torch.mean((torch.abs(ratio - 1.0) > self.config.train_clip_range).float()) return loss, approx_kl, clipfrac def loss( self, advantages: torch.Tensor, clip_range: float, ratio: torch.Tensor, ): unclipped_loss = -advantages * ratio clipped_loss = -advantages * torch.clamp( ratio, 1.0 - clip_range, 1.0 + clip_range, ) return torch.mean(torch.maximum(unclipped_loss, clipped_loss)) def _setup_optimizer(self, trainable_layers_parameters): if self.config.train_use_8bit_adam: import bitsandbytes optimizer_cls = bitsandbytes.optim.AdamW8bit else: optimizer_cls = torch.optim.AdamW return optimizer_cls( trainable_layers_parameters, lr=self.config.train_learning_rate, betas=(self.config.train_adam_beta1, self.config.train_adam_beta2), weight_decay=self.config.train_adam_weight_decay, eps=self.config.train_adam_epsilon, ) def _save_model_hook(self, models, weights, output_dir): self.sd_pipeline.save_checkpoint(models, weights, output_dir) weights.pop() # ensures that accelerate doesn't try to handle saving of the model def _load_model_hook(self, models, input_dir): self.sd_pipeline.load_checkpoint(models, input_dir) models.pop() # ensures that accelerate doesn't try to handle loading of the model def _generate_samples(self, iterations, batch_size): """ Generate samples from the model Args: iterations (int): Number of iterations to generate samples for batch_size (int): Batch size to use for sampling Returns: samples (List[Dict[str, torch.Tensor]]), prompt_image_pairs (List[List[Any]]) """ samples = [] prompt_image_pairs = [] self.sd_pipeline.unet.eval() sample_neg_prompt_embeds = self.neg_prompt_embed.repeat(batch_size, 1, 1) for _ in range(iterations): prompts, prompt_metadata = zip(*[self.prompt_fn() for _ in range(batch_size)]) prompt_ids = self.sd_pipeline.tokenizer( prompts, return_tensors="pt", padding="max_length", truncation=True, max_length=self.sd_pipeline.tokenizer.model_max_length, ).input_ids.to(self.accelerator.device) prompt_embeds = self.sd_pipeline.text_encoder(prompt_ids)[0] with self.autocast(): sd_output = self.sd_pipeline( prompt_embeds=prompt_embeds, negative_prompt_embeds=sample_neg_prompt_embeds, num_inference_steps=self.config.sample_num_steps, guidance_scale=self.config.sample_guidance_scale, eta=self.config.sample_eta, output_type="pt", ) images = sd_output.images latents = sd_output.latents log_probs = sd_output.log_probs latents = torch.stack(latents, dim=1) # (batch_size, num_steps + 1, ...) log_probs = torch.stack(log_probs, dim=1) # (batch_size, num_steps, 1) timesteps = self.sd_pipeline.scheduler.timesteps.repeat(batch_size, 1) # (batch_size, num_steps) samples.append( { "prompt_ids": prompt_ids, "prompt_embeds": prompt_embeds, "timesteps": timesteps, "latents": latents[:, :-1], # each entry is the latent before timestep t "next_latents": latents[:, 1:], # each entry is the latent after timestep t "log_probs": log_probs, "negative_prompt_embeds": sample_neg_prompt_embeds, } ) prompt_image_pairs.append([images, prompts, prompt_metadata]) return samples, prompt_image_pairs def _train_batched_samples(self, inner_epoch, epoch, global_step, batched_samples): """ Train on a batch of samples. Main training segment Args: inner_epoch (int): The current inner epoch epoch (int): The current epoch global_step (int): The current global step batched_samples (List[Dict[str, torch.Tensor]]): The batched samples to train on Side Effects: - Model weights are updated - Logs the statistics to the accelerator trackers. Returns: global_step (int): The updated global step """ info = defaultdict(list) for _i, sample in enumerate(batched_samples): if self.config.train_cfg: # concat negative prompts to sample prompts to avoid two forward passes embeds = torch.cat([sample["negative_prompt_embeds"], sample["prompt_embeds"]]) else: embeds = sample["prompt_embeds"] for j in range(self.num_train_timesteps): with self.accelerator.accumulate(self.sd_pipeline.unet): loss, approx_kl, clipfrac = self.calculate_loss( sample["latents"][:, j], sample["timesteps"][:, j], sample["next_latents"][:, j], sample["log_probs"][:, j], sample["advantages"], embeds, ) info["approx_kl"].append(approx_kl) info["clipfrac"].append(clipfrac) info["loss"].append(loss) self.accelerator.backward(loss) if self.accelerator.sync_gradients: self.accelerator.clip_grad_norm_( self.trainable_layers.parameters() if not isinstance(self.trainable_layers, list) else self.trainable_layers, self.config.train_max_grad_norm, ) self.optimizer.step() self.optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if self.accelerator.sync_gradients: # log training-related stuff info = {k: torch.mean(torch.stack(v)) for k, v in info.items()} info = self.accelerator.reduce(info, reduction="mean") info.update({"epoch": epoch, "inner_epoch": inner_epoch}) self.accelerator.log(info, step=global_step) global_step += 1 info = defaultdict(list) return global_step def _config_check(self) -> Tuple[bool, str]: samples_per_epoch = ( self.config.sample_batch_size * self.accelerator.num_processes * self.config.sample_num_batches_per_epoch ) total_train_batch_size = ( self.config.train_batch_size * self.accelerator.num_processes * self.config.train_gradient_accumulation_steps ) if not self.config.sample_batch_size >= self.config.train_batch_size: return ( False, f"Sample batch size ({self.config.sample_batch_size}) must be greater than or equal to the train batch size ({self.config.train_batch_size})", ) if not self.config.sample_batch_size % self.config.train_batch_size == 0: return ( False, f"Sample batch size ({self.config.sample_batch_size}) must be divisible by the train batch size ({self.config.train_batch_size})", ) if not samples_per_epoch % total_train_batch_size == 0: return ( False, f"Number of samples per epoch ({samples_per_epoch}) must be divisible by the total train batch size ({total_train_batch_size})", ) return True, "" def train(self, epochs: Optional[int] = None): """ Train the model for a given number of epochs """ global_step = 0 if epochs is None: epochs = self.config.num_epochs for epoch in range(self.first_epoch, epochs): global_step = self.step(epoch, global_step) def create_model_card(self, path: str, model_name: Optional[str] = "TRL DDPO Model") -> None: """Creates and saves a model card for a TRL model. Args: path (`str`): The path to save the model card to. model_name (`str`, *optional*): The name of the model, defaults to `TRL DDPO Model`. """ try: user = whoami()["name"] # handle the offline case except Exception: warnings.warn("Cannot retrieve user information assuming you are running in offline mode.") return if not os.path.exists(path): os.makedirs(path) model_card_content = MODEL_CARD_TEMPLATE.format(model_name=model_name, model_id=f"{user}/{path}") with open(os.path.join(path, "README.md"), "w", encoding="utf-8") as f: f.write(model_card_content) def _save_pretrained(self, save_directory): self.sd_pipeline.save_pretrained(save_directory) self.create_model_card(save_directory)
trl/trl/trainer/ddpo_trainer.py/0
{ "file_path": "trl/trl/trainer/ddpo_trainer.py", "repo_id": "trl", "token_count": 12350 }
427
<!--- Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Official Hugging Face Accelerate Docker Images Accelerate publishes a variety of docker versions as part of our CI that users can also use. These are stable images that Accelerate can run off of which comes with a variety of different setup configurations, all of which are officially hosted on [Docker Hub](https://hub.docker.com/r/huggingface/accelerate). A breakdown of each are given below ## Naming Conventions Accelerate docker images follow a tagging convention of: ```bash huggingface/accelerate:{accelerator}-{nightly,release} ``` `accelerator` in this instance is one of many applical pre-configured backend supports: * `gpu`: Comes compiled off of the `nvidia/cuda` image and includes everything such as `deepspeed`, `bitsandbytes`, etc. * `cpu`: Comes compiled off of `python:3.8-slim` and is designed for non-CUDA based workloads. * More to come soon ## Nightlies vs Releases Each release a new build is pushed with a version number included in the name. For a GPU-supported image of version 0.28.0 for instance, it would look like the following: ```bash huggingface/accelerate:gpu-release-0.28.0 ``` Nightlies contain two different image tags. There is a general `nightly` tag which is built each night, and a `nightly-YYYY-MM-DD` which corresponds to a build from a particular date. For instance, here is an example nightly CPU image from 3/14/2024 ```bash huggingface/accelerate:cpu-nightly-2024-03-14 ``` ## Running the images Each image comes compiled with `conda` and an `accelerate` environment contains all of the installed dependencies. To pull down the latest nightly run: ```bash docker pull huggingface/accelerate:gpu-nightly ``` To then run it in interactive mode with GPU-memory available, run: ```bash docker container run --gpus all -it huggingface/accelerate:gpu-nightly ``` ## DEPRECATED IMAGES CPU and GPU docker images were hosted at `huggingface/accelerate-gpu` and `huggingface/accelerate-cpu`. These builds are now outdated and will not receive updates. The builds at the corresponding `huggingface/accelerate:{gpu,cpu}` contain the same `Dockerfile`, so it's as simple as changing the docker image to the desired ones from above. We will not be deleting these images for posterity, but they will not be receiving updates going forward.
accelerate/docker/README.md/0
{ "file_path": "accelerate/docker/README.md", "repo_id": "accelerate", "token_count": 796 }
0
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Gradient Synchronization PyTorch's distributed module operates by communicating back and forth between all of the GPUs in your system. This communication takes time, and ensuring all processes know the states of each other happens at particular triggerpoints when using the `ddp` module. These triggerpoints are added to the PyTorch model, specifically their `forward()` and `backward()` methods. This happens when the model is wrapped with `DistributedDataParallel`: ```python import torch.nn as nn from torch.nn.parallel import DistributedDataParallel model = nn.Linear(10, 10) ddp_model = DistributedDataParallel(model) ``` In 🤗 Accelerate this conversion happens automatically when calling [`~Accelerator.prepare`] and passing in your model. ```diff + from accelerate import Accelerator + accelerator = Accelerator() import torch.nn as nn - from torch.nn.parallel import DistributedDataParallel model = nn.Linear(10,10) + model = accelerator.prepare(model) ``` ## The slowdown in gradient accumulation You now understand that PyTorch adds hooks to the `forward` and `backward` method of your PyTorch model when training in a distributed setup. But how does this risk slowing down your code? In DDP (distributed data parallel), the specific order in which processes are performed and ran are expected at specific points and these must also occur at roughly the same time before moving on. The most direct example is when you update model parameters through `optimizer.step()`. Without gradient accumulation, all instances of the model need to have updated their gradients computed, collated, and updated before moving on to the next batch of data. When performing gradient accumulation, you accumulate `n` loss gradients and skip `optimizer.step()` until `n` batches have been reached. As all training processes only need to synchronize by the time `optimizer.step()` is called, without any modification to your training step, this needless inter-process communication can cause a significant slowdown. How can you avoid this overhead? ## Solving the slowdown problem Since you are skipping model parameter updates when training on these batches, their gradients do not need to be synchronized until the point where `optimizer.step()` is actually called. PyTorch cannot automagically tell when you need to do this, but they do provide a tool to help through the [`no_sync`](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html#torch.nn.parallel.DistributedDataParallel.no_sync) context manager that is added to your model after converting it to DDP. Under this context manager, PyTorch will skip synchronizing the gradients when `.backward()` is called, and the first call to `.backward()` outside this context manager will trigger the synchronization. See an example below: ```python ddp_model, dataloader, optimizer = accelerator.prepare(model, dataloader, optimizer) for index, batch in enumerate(dataloader): inputs, targets = batch # Trigger gradient synchronization on the last batch if index != (len(dataloader) - 1): with ddp_model.no_sync(): # Gradients only accumulate outputs = ddp_model(inputs) loss = loss_func(outputs) accelerator.backward(loss) else: # Gradients finally sync outputs = ddp_model(inputs) loss = loss_func(outputs) accelerator.backward(loss) optimizer.step() ``` In 🤗 Accelerate to make this an API that can be called no matter the training device (though it may not do anything if you are not in a distributed system!), `ddp_model.no_sync` gets replaced with [`~Accelerator.no_sync`] and operates the same way: ```diff ddp_model, dataloader, optimizer = accelerator.prepare(model, dataloader, optimizer) for index, batch in enumerate(dataloader): inputs, targets = batch # Trigger gradient synchronization on the last batch if index != (len(dataloader)-1): - with ddp_model.no_sync(): + with accelerator.no_sync(model): # Gradients only accumulate outputs = ddp_model(inputs) loss = loss_func(outputs, targets) accelerator.backward(loss) else: # Gradients finally sync outputs = ddp_model(inputs) loss = loss_func(outputs) accelerator.backward(loss) optimizer.step() optimizer.zero_grad() ``` As you may expect, the [`~Accelerator.accumulate`] function wraps around this conditional check by keeping track of the current batch number, leaving you with the final gradient accumulation API: ```python ddp_model, dataloader, optimizer = accelerator.prepare(model, dataloader, optimizer) for batch in dataloader: with accelerator.accumulate(model): optimizer.zero_grad() inputs, targets = batch outputs = model(inputs) loss = loss_function(outputs, targets) accelerator.backward(loss) optimizer.step() optimizer.zero_grad() ``` As a result, you should either use *`accelerator.accumulate` or `accelerator.no_sync`* when it comes to API choice. ## Just how much of a slowdown is there, and easy mistakes you can make To set up a realistic example, consider the following setup: * Two single-GPU T4 nodes and one node with two GPUs * Each GPU is a T4, and are hosted on GCP * The script used is a modification of the [NLP Example](https://github.com/muellerzr/timing_experiments/blob/main/baseline.py) script * Batch size per GPU is 16, and gradients are accumulated every 4 steps All scripts are available in [this repository](https://github.com/muellerzr/timing_experiments). If not careful about gradient synchronization and GPU communication, a *large* amount of time can be wasted from when these GPUs communicate to each other during unnecessary periods. By how much? Reference: - Baseline: uses no synchronization practices discussed here - `no_sync` improperly: `no_sync` only around the `backward` call, not the `forward` - `no_sync`: using the `no_sync` pattern properly - `accumulate`: using [`~Accelerator.accumulate`] properly Below are the average seconds per batch iterating over 29 batches of data for each setup on both a single node and on the dual-node setup: | | Baseline | `no_sync` improperly | `no_sync` | `accumulate`| | :---------: | :-------: | :------------------: | :-------: | :---------: | | Multi-Node | 2±0.01s | 2.13±0.08s | **0.91±0.11s** | **0.91±0.11s** | | Single Node | 0.50±0.01s | 0.50±0.01s | **0.41±0.015s** | **0.41±0.015s** | As you can see, if you are not careful about how you set up your gradient synchronization, you can get upwards of more than a 2x slowdown during training! If you are worried about making sure everything is done properly, we highly recommend utilizing the [`~Accelerator.accumulate`] function and passing in `gradient_accumulation_steps` or `gradient_accumulation_plugin` to the [`Accelerator`] object so Accelerate can handle this for you. ### `no_sync` requires additional GPU memory when using FSDP Be aware that not syncing gradients can have adverse effects while performing FSDP training. As it has been warned in `torch`, the [`no_sync` context manager for FSDP](https://pytorch.org/docs/stable/fsdp.html#torch.distributed.fsdp.FullyShardedDataParallel.no_sync) will require additional memory. Therefore in memory intensive situations while using FSDP, we recommend to set `sync_each_batch` to `True` in the [`~utils.GradientAccumulationPlugin`] to disable `no_sync`. See the example below where we fine-tune Mixtral (47B parameters) on 8 A100-80GB GPUs. We see that even for a modest `gradient_accumulation_steps=2` we quickly go out-of-memory (OOM) if `no_sync` is enabled. Again, this is due to additional memory overheads due to FSDP's `no_sync`. However, if `no_sync` is disabled via `sync_each_batch=True`, then the memory consumption for `gradient_accumulation_steps=16` reverts to that of `gradient_accumulation_steps=1`. | Model | `no_sync` (accum=1) | `no_sync` (accum=2) | `no_sync` disabled (accum=16) | :-------------: | :-----------------: | :-----------------: | :-----------------: mixtral 8x7B | 69G | OOM | 69G > [!WARNING] > Disabling `no_sync` means there _will be slowdown_ due the extra data syncs, as explained by the earlier sections of this guide.
accelerate/docs/source/concept_guides/gradient_synchronization.md/0
{ "file_path": "accelerate/docs/source/concept_guides/gradient_synchronization.md", "repo_id": "accelerate", "token_count": 2842 }
1
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Low Precision Training Methods 🤗 Accelerate provides integrations to train on lower precision methods using specified supported hardware through the `TransformersEngine` and `MS-AMP` packages. This documentation will help guide you through what hardware is supported, how to configure your [`Accelerator`] to leverage the low precision methods, and what you can expect when training. ## What training on FP8 means To explore more of the nitty-gritty in training in FP8 with PyTorch and 🤗 Accelerate, check out the [concept_guide](../concept_guides/low_precision_training.md) on why this can be difficult. But essentially rather than training in BF16, some (or all) aspects of training a model can be performed using 8 bits instead of 16. The challenge is doing so without degrading final performance. This is only enabled on specific NVIDIA hardware, namely: * Anything after the 3000 series consumer graphics cards (such as the 4090) * Hopper-based GPU architectures (such as the `H100` and `H200`) What this will result in is some gain in the memory used (as we've cut the needed memory in half for some parts of training) and an increase in throughput *should* be seen as well for larger models that can replace certain layers with FP8-enabled ones. ## Configuring the Accelerator Currently two different backends for FP8 are supported (`TransformersEngine` and `MS-AMP`), each with different capabilities and configurations. To use either, the same core API is used. Just pass `mixed_precision="fp8"` to either the [`Accelerator`], during `accelerate config` when prompted about mixed precision, or as part of your `config.yaml` file in the `mixed_precision` key: ```{python} from accelerate import Accelerator accelerator = Accelerator(mixed_precision="fp8") ``` By default, if `MS-AMP` is available in your environment, 🤗 Accelerate will automatically utilize it as a backend. To specify it yourself (and customize other parts of the FP8 mixed precision setup), you can utilize the [`utils.FP8RecipeKwargs`]: ```{python} from accelerate import Accelerator from accelerate.utils import FP8RecipeKwargs kwargs = [FP8RecipeKwargs(backend="msamp")] # Or to specify the backend as `TransformersEngine` even if MS-AMP is installed # kwargs = [FP8RecipeKwargs(backend="te")] accelerator = Accelerator(mixed_precision="fp8", kwarg_handlers=kwargs) ``` ## Configuring MS-AMP Of the two, `MS-AMP` is traditionally the easier one to configure as there is only a single argument: the optimization level. Currently two levels of optimization are supported in the 🤗 Accelerate integration, `"O1"` and `"O2"` (using the letter 'o', not zero). * `"O1"` will cast the weight gradients and `all_reduce` communications to happen in 8-bit, while the rest are done in 16 bit. This reduces the general GPU memory usage and speeds up communication bandwidths. * `"O2"` will also cast first-order optimizer states into 8 bit, while the second order states are in FP16. (Currently just the `Adam` optimizer is supported). This tries its best to minimize final accuracy degradation and will save the highest potential memory. To specify an optimization level, pass it to the `FP8KwargsHandler` by setting the `optimization_level` argument: ```{python} from accelerate import Accelerator from accelerate.utils import FP8RecipeKwargs kwargs = [FP8RecipeKwargs(backend="msamp", optimization_level="O2")] accelerator = Accelerator(mixed_precision="fp8", kwarg_handlers=kwargs) ``` ## Configuring TransformersEngine TransformersEngine has much more available for customizing how and what FP8 calculations are performed. A full list of supported arguments and what they mean are available in [NVIDIA's documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html), however they are restated as part of [`FP8KwargsHandler`]'s docstring for your convenience. 🤗 Accelerate tries to set sensible defaults, but exploring and tweaking the various parameters yourself can lead to better performance potentially. To use it, specify `backend="te"` and modify any of the arguments you want as part of your kwarg handler: ```{python} from accelerate import Accelerator from accelerate.utils import FP8RecipeKwargs kwargs = [FP8RecipeKwargs(backend="te", ...)] accelerator = Accelerator(mixed_precision="fp8", kwarg_handlers=kwargs) ``` ## Further Reading To learn more about training in FP8 please check out the following resources: * [Our concept guide](../concept_guides/low_precision_training.md) detailing into more about both TransformersEngine and MS-AMP * [The `transformers-engine` documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html) * [The `MS-AMP` documentation](https://azure.github.io/MS-AMP/docs/)
accelerate/docs/source/usage_guides/low_precision_training.md/0
{ "file_path": "accelerate/docs/source/usage_guides/low_precision_training.md", "repo_id": "accelerate", "token_count": 1463 }
2
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## MAX_GPU_BATCH_SIZE = 16 EVAL_BATCH_SIZE = 32 def get_dataloaders(accelerator: Accelerator, batch_size: int = 16): """ Creates a set of `DataLoader`s for the `glue` dataset, using "bert-base-cased" as the tokenizer. Args: accelerator (`Accelerator`): An `Accelerator` object batch_size (`int`, *optional*): The batch size for the train and validation DataLoaders. """ tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") datasets = load_dataset("glue", "mrpc") def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): # On TPU it's best to pad everything to the same length or training will be very slow. max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": pad_to_multiple_of = 16 elif accelerator.mixed_precision != "no": pad_to_multiple_of = 8 else: pad_to_multiple_of = None return tokenizer.pad( examples, padding="longest", max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors="pt", ) # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders get_dataloaders = mocked_dataloaders # noqa: F811 def training_function(config, args): # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": config["num_epochs"] = 2 # New Code # gradient_accumulation_steps = int(args.gradient_accumulation_steps) # Initialize accelerator accelerator = Accelerator( cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=gradient_accumulation_steps ) if accelerator.distributed_type == DistributedType.XLA and gradient_accumulation_steps > 1: raise NotImplementedError( "Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lr = config["lr"] num_epochs = int(config["num_epochs"]) seed = int(config["seed"]) batch_size = int(config["batch_size"]) metric = evaluate.load("glue", "mrpc") set_seed(seed) train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size) # Instantiate the model (we build the model here so that the seed also control new weights initialization) model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). model = model.to(accelerator.device) # Instantiate optimizer optimizer = AdamW(params=model.parameters(), lr=lr) # Instantiate scheduler lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=100, num_training_steps=(len(train_dataloader) * num_epochs), ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # Now we train the model for epoch in range(num_epochs): model.train() for step, batch in enumerate(train_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(model): output = model(**batch) loss = output.loss accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(eval_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:", eval_metric) def main(): parser = argparse.ArgumentParser(description="Simple example of training script.") parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) # New Code # parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="The number of minibatches to be ran before gradients are accumulated.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") args = parser.parse_args() config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(config, args) if __name__ == "__main__": main()
accelerate/examples/by_feature/gradient_accumulation.py/0
{ "file_path": "accelerate/examples/by_feature/gradient_accumulation.py", "repo_id": "accelerate", "token_count": 3352 }
3
#!/usr/bin/env python # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import importlib import logging import os import subprocess import sys from pathlib import Path import psutil import torch from accelerate.commands.config import default_config_file, load_config_from_file from accelerate.commands.config.config_args import SageMakerConfig from accelerate.commands.config.config_utils import DYNAMO_BACKENDS from accelerate.commands.utils import CustomArgumentParser from accelerate.state import get_int_from_env from accelerate.utils import ( ComputeEnvironment, DistributedType, PrepareForLaunch, _filter_args, check_cuda_p2p_ib_support, convert_dict_to_env_variables, is_bf16_available, is_deepspeed_available, is_mlu_available, is_npu_available, is_rich_available, is_sagemaker_available, is_torch_version, is_torch_xla_available, is_xpu_available, patch_environment, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from accelerate.utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS, TORCH_DYNAMO_MODES if is_rich_available(): from rich import get_console from rich.logging import RichHandler FORMAT = "%(message)s" logging.basicConfig(format=FORMAT, datefmt="[%X]", handlers=[RichHandler()]) logger = logging.getLogger(__name__) options_to_group = { "multi_gpu": "Distributed GPUs", "tpu": "TPU", "use_deepspeed": "DeepSpeed Arguments", "use_fsdp": "FSDP Arguments", "use_megatron_lm": "Megatron-LM Arguments", } def clean_option(option): "Finds all cases of - after the first two characters and changes them to _" if option.startswith("--"): return option[2:].replace("-", "_") class CustomHelpFormatter(argparse.HelpFormatter): """ This is a custom help formatter that will hide all arguments that are not used in the command line when the help is called. This is useful for the case where the user is using a specific platform and only wants to see the arguments for that platform. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.titles = [ "Hardware Selection Arguments", "Resource Selection Arguments", "Training Paradigm Arguments", "positional arguments", "optional arguments", ] def add_argument(self, action: argparse.Action): if "accelerate" in sys.argv[0] and "launch" in sys.argv[1:]: args = sys.argv[2:] else: args = sys.argv[1:] if len(args) > 1: args = list(map(clean_option, args)) used_platforms = [arg for arg in args if arg in options_to_group.keys()] used_titles = [options_to_group[o] for o in used_platforms] if action.container.title not in self.titles + used_titles: action.help = argparse.SUPPRESS elif action.container.title == "Hardware Selection Arguments": if set(action.option_strings).isdisjoint(set(args)): action.help = argparse.SUPPRESS else: action.help = action.help + " (currently selected)" elif action.container.title == "Training Paradigm Arguments": if set(action.option_strings).isdisjoint(set(args)): action.help = argparse.SUPPRESS else: action.help = action.help + " (currently selected)" action.option_strings = [s for s in action.option_strings if "-" not in s[2:]] super().add_argument(action) def end_section(self): if len(self._current_section.items) < 2: self._current_section.items = [] self._current_section.heading = "" super().end_section() def launch_command_parser(subparsers=None): description = "Launch a python script in a distributed scenario. Arguments can be passed in with either hyphens (`--num-processes=2`) or underscores (`--num_processes=2`)" if subparsers is not None: parser = subparsers.add_parser( "launch", description=description, add_help=False, allow_abbrev=False, formatter_class=CustomHelpFormatter ) else: parser = CustomArgumentParser( "Accelerate launch command", description=description, add_help=False, allow_abbrev=False, formatter_class=CustomHelpFormatter, ) parser.add_argument("-h", "--help", action="help", help="Show this help message and exit.") parser.add_argument( "--config_file", default=None, help="The config file to use for the default values in the launching script.", ) parser.add_argument( "--quiet", "-q", action="store_true", help="Silence subprocess errors from the launch stack trace and only show the relevant tracebacks. (Only applicable to DeepSpeed and single-process configurations)", ) # Hardware selection arguments hardware_args = parser.add_argument_group( "Hardware Selection Arguments", "Arguments for selecting the hardware to be used." ) hardware_args.add_argument( "--cpu", default=False, action="store_true", help="Whether or not to force the training on the CPU." ) hardware_args.add_argument( "--multi_gpu", default=False, action="store_true", help="Whether or not this should launch a distributed GPU training.", ) hardware_args.add_argument( "--tpu", default=False, action="store_true", help="Whether or not this should launch a TPU training." ) hardware_args.add_argument( "--ipex", default=False, action="store_true", help="Whether or not this should launch a Intel PyTorch Extension (IPEX) training.", ) # Resource selection arguments resource_args = parser.add_argument_group( "Resource Selection Arguments", "Arguments for fine-tuning how available hardware should be used." ) resource_args.add_argument( "--mixed_precision", type=str, choices=["no", "fp16", "bf16", "fp8"], help="Whether or not to use mixed precision training. " "Choose between FP16 and BF16 (bfloat16) training. " "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.", ) resource_args.add_argument( "--num_processes", type=int, default=None, help="The total number of processes to be launched in parallel." ) resource_args.add_argument( "--num_machines", type=int, default=None, help="The total number of machines used in this training." ) resource_args.add_argument( "--num_cpu_threads_per_process", type=int, default=None, help="The number of CPU threads per process. Can be tuned for optimal performance.", ) resource_args.add_argument( "--enable_cpu_affinity", default=False, action="store_true", help="Whether or not CPU affinity and balancing should be enabled. Currently only supported on NVIDIA hardware.", ) # Dynamo arguments resource_args.add_argument( "--dynamo_backend", type=str, choices=["no"] + [b.lower() for b in DYNAMO_BACKENDS], help="Choose a backend to optimize your training with dynamo, see more at " "https://github.com/pytorch/torchdynamo.", ) resource_args.add_argument( "--dynamo_mode", type=str, default="default", choices=TORCH_DYNAMO_MODES, help="Choose a mode to optimize your training with dynamo.", ) resource_args.add_argument( "--dynamo_use_fullgraph", default=False, action="store_true", help="Whether to use full graph mode for dynamo or it is ok to break model into several subgraphs", ) resource_args.add_argument( "--dynamo_use_dynamic", default=False, action="store_true", help="Whether to enable dynamic shape tracing.", ) # Training Paradigm arguments paradigm_args = parser.add_argument_group( "Training Paradigm Arguments", "Arguments for selecting which training paradigm to be used." ) paradigm_args.add_argument( "--use_deepspeed", default=False, action="store_true", help="Whether to use deepspeed.", ) paradigm_args.add_argument( "--use_fsdp", default=False, action="store_true", help="Whether to use fsdp.", ) paradigm_args.add_argument( "--use_megatron_lm", default=False, action="store_true", help="Whether to use Megatron-LM.", ) paradigm_args.add_argument( "--use_xpu", default=False, action="store_true", help="Whether to use IPEX plugin to speed up training on XPU specifically.", ) # distributed GPU training arguments distributed_args = parser.add_argument_group("Distributed GPUs", "Arguments related to distributed GPU training.") distributed_args.add_argument( "--gpu_ids", default=None, help="What GPUs (by id) should be used for training on this machine as a comma-seperated list", ) distributed_args.add_argument( "--same_network", default=False, action="store_true", help="Whether all machines used for multinode training exist on the same local network.", ) distributed_args.add_argument( "--machine_rank", type=int, default=None, help="The rank of the machine on which this script is launched." ) distributed_args.add_argument( "--main_process_ip", type=str, default=None, help="The IP address of the machine of rank 0." ) distributed_args.add_argument( "--main_process_port", type=int, default=None, help="The port to use to communicate with the machine of rank 0.", ) distributed_args.add_argument( "-t", "--tee", default="0", type=str, help="Tee std streams into a log file and also to console.", ) distributed_args.add_argument( "--role", type=str, default="default", help="User-defined role for the workers.", ) # Rendezvous related arguments distributed_args.add_argument( "--rdzv_backend", type=str, default="static", help="The rendezvous method to use, such as 'static' (the default) or 'c10d'", ) distributed_args.add_argument( "--rdzv_conf", type=str, default="", help="Additional rendezvous configuration (<key1>=<value1>,<key2>=<value2>,...).", ) distributed_args.add_argument( "--max_restarts", type=int, default=0, help="Maximum number of worker group restarts before failing.", ) distributed_args.add_argument( "--monitor_interval", type=float, default=5, help="Interval, in seconds, to monitor the state of workers.", ) parser.add_argument( "-m", "--module", action="store_true", help="Change each process to interpret the launch script as a Python module, executing with the same behavior as 'python -m'.", ) parser.add_argument( "--no_python", action="store_true", help="Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.", ) # TPU arguments tpu_args = parser.add_argument_group("TPU", "Arguments related to TPU.") tpu_args.add_argument( "--tpu_cluster", action="store_true", dest="tpu_use_cluster", help="Whether to use a GCP TPU pod for training.", ) tpu_args.add_argument( "--no_tpu_cluster", action="store_false", dest="tpu_use_cluster", help="Should not be passed explicitly, this is for internal use only.", ) tpu_args.add_argument( "--tpu_use_sudo", action="store_true", help="Whether to use `sudo` when running the TPU training script in each pod.", ) tpu_args.add_argument( "--vm", type=str, action="append", help=( "List of single Compute VM instance names. " "If not provided we assume usage of instance groups. For TPU pods." ), ) tpu_args.add_argument( "--env", type=str, action="append", help="List of environment variables to set on the Compute VM instances. For TPU pods.", ) tpu_args.add_argument( "--main_training_function", type=str, default=None, help="The name of the main function to be executed in your script (only for TPU training).", ) tpu_args.add_argument( "--downcast_bf16", action="store_true", help="Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.", ) # DeepSpeed arguments deepspeed_args = parser.add_argument_group("DeepSpeed Arguments", "Arguments related to DeepSpeed.") deepspeed_args.add_argument( "--deepspeed_config_file", default=None, type=str, help="DeepSpeed config file.", ) deepspeed_args.add_argument( "--zero_stage", default=None, type=int, help="DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). " "If unspecified, will default to `2`.", ) deepspeed_args.add_argument( "--offload_optimizer_device", default=None, type=str, help="Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). " "If unspecified, will default to 'none'.", ) deepspeed_args.add_argument( "--offload_param_device", default=None, type=str, help="Decides where (none|cpu|nvme) to offload parameters (useful only when `use_deepspeed` flag is passed). " "If unspecified, will default to 'none'.", ) deepspeed_args.add_argument( "--offload_optimizer_nvme_path", default=None, type=str, help="Decides Nvme Path to offload optimizer states (useful only when `use_deepspeed` flag is passed). " "If unspecified, will default to 'none'.", ) deepspeed_args.add_argument( "--offload_param_nvme_path", default=None, type=str, help="Decides Nvme Path to offload parameters (useful only when `use_deepspeed` flag is passed). " "If unspecified, will default to 'none'.", ) deepspeed_args.add_argument( "--gradient_accumulation_steps", default=None, type=int, help="No of gradient_accumulation_steps used in your training script (useful only when `use_deepspeed` flag is passed). " "If unspecified, will default to `1`.", ) deepspeed_args.add_argument( "--gradient_clipping", default=None, type=float, help="gradient clipping value used in your training script (useful only when `use_deepspeed` flag is passed). " "If unspecified, will default to `1.0`.", ) deepspeed_args.add_argument( "--zero3_init_flag", default=None, type=str, help="Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. " "Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `true`.", ) deepspeed_args.add_argument( "--zero3_save_16bit_model", default=None, type=str, help="Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. " "Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `false`.", ) deepspeed_args.add_argument( "--deepspeed_hostfile", default=None, type=str, help="DeepSpeed hostfile for configuring multi-node compute resources.", ) deepspeed_args.add_argument( "--deepspeed_exclusion_filter", default=None, type=str, help="DeepSpeed exclusion filter string when using mutli-node setup.", ) deepspeed_args.add_argument( "--deepspeed_inclusion_filter", default=None, type=str, help="DeepSpeed inclusion filter string when using mutli-node setup.", ) deepspeed_args.add_argument( "--deepspeed_multinode_launcher", default=None, type=str, help="DeepSpeed multi-node launcher to use. If unspecified, will default to `pdsh`.", ) # fsdp arguments fsdp_args = parser.add_argument_group("FSDP Arguments", "Arguments related to Fully Shared Data Parallelism.") fsdp_args.add_argument( "--fsdp_offload_params", default="false", type=str, help="Decides Whether (true|false) to offload parameters and gradients to CPU. (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_min_num_params", type=int, default=1e8, help="FSDP's minimum number of parameters for Default Auto Wrapping. (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_sharding_strategy", type=str, default="FULL_SHARD", help="FSDP's Sharding Strategy. (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_auto_wrap_policy", type=str, default=None, help="FSDP's auto wrap policy. (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_transformer_layer_cls_to_wrap", default=None, type=str, help="Transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block` .... " "(useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_backward_prefetch_policy", default=None, type=str, help="This argument is deprecated and will be removed in version 0.27.0 of 🤗 Accelerate. Use `fsdp_backward_prefetch` instead.", ) fsdp_args.add_argument( "--fsdp_backward_prefetch", default=None, type=str, help="FSDP's backward prefetch policy. (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_state_dict_type", default=None, type=str, help="FSDP's state dict type. (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_forward_prefetch", default="false", type=str, help="If True, then FSDP explicitly prefetches the next upcoming " "all-gather while executing in the forward pass (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_use_orig_params", default="true", type=str, help="If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable paramteres." " (useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_cpu_ram_efficient_loading", default="true", type=str, help="If True, only the first process loads the pretrained model checkoint while all other processes have empty weights. " "Only applicable for 🤗 Transformers. When using this, `--fsdp_sync_module_states` needs to True. " "(useful only when `use_fsdp` flag is passed).", ) fsdp_args.add_argument( "--fsdp_sync_module_states", default="true", type=str, help="If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0." " (useful only when `use_fsdp` flag is passed).", ) # megatron_lm args megatron_lm_args = parser.add_argument_group("Megatron-LM Arguments", "Arguments related to Megatron-LM.") megatron_lm_args.add_argument( "--megatron_lm_tp_degree", type=int, default=1, help="Megatron-LM's Tensor Parallelism (TP) degree. (useful only when `use_megatron_lm` flag is passed).", ) megatron_lm_args.add_argument( "--megatron_lm_pp_degree", type=int, default=1, help="Megatron-LM's Pipeline Parallelism (PP) degree. (useful only when `use_megatron_lm` flag is passed).", ) megatron_lm_args.add_argument( "--megatron_lm_num_micro_batches", type=int, default=None, help="Megatron-LM's number of micro batches when PP degree > 1. (useful only when `use_megatron_lm` flag is passed).", ) megatron_lm_args.add_argument( "--megatron_lm_sequence_parallelism", default=None, type=str, help="Decides Whether (true|false) to enable Sequence Parallelism when TP degree > 1. " "(useful only when `use_megatron_lm` flag is passed).", ) megatron_lm_args.add_argument( "--megatron_lm_recompute_activations", default=None, type=str, help="Decides Whether (true|false) to enable Selective Activation Recomputation. " "(useful only when `use_megatron_lm` flag is passed).", ) megatron_lm_args.add_argument( "--megatron_lm_use_distributed_optimizer", default=None, type=str, help="Decides Whether (true|false) to use distributed optimizer " "which shards optimizer state and gradients across Data Pralellel (DP) ranks. " "(useful only when `use_megatron_lm` flag is passed).", ) megatron_lm_args.add_argument( "--megatron_lm_gradient_clipping", default=1.0, type=float, help="Megatron-LM's gradient clipping value based on global L2 Norm (0 to disable). " "(useful only when `use_megatron_lm` flag is passed).", ) # AWS arguments aws_args = parser.add_argument_group("AWS Arguments", "Arguments related to AWS.") aws_args.add_argument( "--aws_access_key_id", type=str, default=None, help="The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job", ) aws_args.add_argument( "--aws_secret_access_key", type=str, default=None, help="The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job.", ) parser.add_argument( "--debug", action="store_true", help="Whether to print out the torch.distributed stack trace when something fails.", ) parser.add_argument( "training_script", type=str, help=( "The full path to the script to be launched in parallel, followed by all the arguments for the training " "script." ), ) # MPI arguments mpirun_args = parser.add_argument_group("MPI Arguments", "Arguments related to mpirun for Multi-CPU") mpirun_args.add_argument( "--mpirun_hostfile", type=str, default=None, help="Location for a hostfile for using Accelerate to launch a multi-CPU training job with mpirun. This will " "get passed to the MPI --hostfile or -f parameter, depending on which MPI program is installed.", ) mpirun_args.add_argument( "--mpirun_ccl", type=int, default=1, help="The number of oneCCL worker threads when using Accelerate to launch multi-CPU training with mpirun.", ) # Other arguments of the training scripts parser.add_argument("training_script_args", nargs=argparse.REMAINDER, help="Arguments of the training script.") if subparsers is not None: parser.set_defaults(func=launch_command) return parser def simple_launcher(args): cmd, current_env = prepare_simple_launcher_cmd_env(args) process = subprocess.Popen(cmd, env=current_env) process.wait() if process.returncode != 0: if not args.quiet: raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd) else: sys.exit(1) def multi_gpu_launcher(args): import torch.distributed.run as distrib_run current_env = prepare_multi_gpu_env(args) if not check_cuda_p2p_ib_support(): message = "Using RTX 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled." warn = False if "NCCL_P2P_DISABLE" not in current_env: current_env["NCCL_P2P_DISABLE"] = "1" warn = True if "NCCL_IB_DISABLE" not in current_env: current_env["NCCL_IB_DISABLE"] = "1" warn = True if warn: logger.warning(message) debug = getattr(args, "debug", False) args = _filter_args( args, distrib_run.get_args_parser(), ["--training_script", args.training_script, "--training_script_args", args.training_script_args], ) with patch_environment(**current_env): try: distrib_run.run(args) except Exception: if is_rich_available() and debug: console = get_console() console.print("\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]") console.print_exception(suppress=[__file__], show_locals=False) else: raise def deepspeed_launcher(args): import torch.distributed.run as distrib_run if not is_deepspeed_available(): raise ImportError("DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.") cmd, current_env = prepare_deepspeed_cmd_env(args) if not check_cuda_p2p_ib_support(): message = "Using RTX 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled." warn = False if "NCCL_P2P_DISABLE" not in current_env: current_env["NCCL_P2P_DISABLE"] = "1" warn = True if "NCCL_IB_DISABLE" not in current_env: current_env["NCCL_IB_DISABLE"] = "1" warn = True if warn: logger.warning(message) if args.num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]: with open(".deepspeed_env", "a") as f: valid_env_items = convert_dict_to_env_variables(current_env) if len(valid_env_items) > 1: f.writelines(valid_env_items) process = subprocess.Popen(cmd, env=current_env) process.wait() if process.returncode != 0: if not args.quiet: raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd) else: sys.exit(1) else: debug = getattr(args, "debug", False) args = _filter_args( args, distrib_run.get_args_parser(), ["--training_script", args.training_script, "--training_script_args", args.training_script_args], ) with patch_environment(**current_env): try: distrib_run.run(args) except Exception: if is_rich_available() and debug: console = get_console() console.print("\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]") console.print_exception(suppress=[__file__], show_locals=False) else: raise def tpu_launcher(args): import torch_xla.distributed.xla_multiprocessing as xmp if args.no_python: raise ValueError("--no_python cannot be used with TPU launcher") args, current_env = prepare_tpu(args, {}) if args.module: mod_name = args.training_script else: # Import training_script as a module script_path = Path(args.training_script) sys.path.append(str(script_path.parent.resolve())) mod_name = script_path.stem mod = importlib.import_module(mod_name) if not hasattr(mod, args.main_training_function): raise ValueError( f"Your training script should have a function named {args.main_training_function}, or you should pass a " "different value to `--main_training_function`." ) # Patch sys.argv sys.argv = [mod.__file__] + args.training_script_args main_function = getattr(mod, args.main_training_function) with patch_environment(**current_env): xmp.spawn(PrepareForLaunch(main_function), args=(), nprocs=args.num_processes) def tpu_pod_launcher(args): from torch_xla.distributed import xla_dist current_env = {} args, current_env = prepare_tpu(args, current_env, True) debug = getattr(args, "debug", False) training_script = args.training_script training_script_args = args.training_script_args new_args = _filter_args( args, xla_dist.get_args_parser(), ["--tpu", args.tpu_name, "--positional", "", "--restart-tpuvm-pod-server"] ) if args.tpu_use_sudo: new_cmd = ["sudo"] else: new_cmd = [] new_cmd += [ "accelerate-launch", "--tpu", "--no_tpu_cluster", "--num_machines", "1", "--mixed_precision", "no", "--dynamo_backend", "no", "--num_processes", str(args.num_processes), "--main_training_function", str(args.main_training_function), training_script, ] + training_script_args new_args.positional = new_cmd bad_flags = "" for arg in vars(new_args): if arg.startswith("docker_"): value = getattr(new_args, arg) if value != "" and value is not None: bad_flags += f'{arg}="{value}"\n' if bad_flags != "": raise ValueError( f"Docker containers are not supported for TPU pod launcher currently, please remove the following flags:\n{bad_flags}" ) new_args.env = [f"{k}={v}" for k, v in current_env.items()] new_args.env.append("ACCELERATE_IN_TPU_POD=1") try: xla_dist.resolve_and_execute(new_args) except Exception: if is_rich_available() and debug: console = get_console() console.print("\n[bold red]Using --debug, `torch_xla.xla_dist` Stack Trace:[/bold red]") console.print_exception(suppress=[__file__], show_locals=False) else: raise def sagemaker_launcher(sagemaker_config: SageMakerConfig, args): if not is_sagemaker_available(): raise ImportError( "Please install sagemaker to be able to launch training on Amazon SageMaker with `pip install accelerate[sagemaker]`" ) if args.module or args.no_python: raise ValueError( "SageMaker requires a python training script file and cannot be used with --module or --no_python" ) from sagemaker.huggingface import HuggingFace args, sagemaker_inputs = prepare_sagemager_args_inputs(sagemaker_config, args) huggingface_estimator = HuggingFace(**args) huggingface_estimator.fit(inputs=sagemaker_inputs) print(f"You can find your model data at: {huggingface_estimator.model_data}") def _validate_launch_command(args): # Sanity checks if sum([args.multi_gpu, args.cpu, args.tpu, args.use_deepspeed, args.use_fsdp]) > 1: raise ValueError( "You can only use one of `--cpu`, `--multi_gpu`, `--tpu`, `--use_deepspeed`, `--use_fsdp` at a time." ) if args.multi_gpu and (args.num_processes is not None) and (args.num_processes < 2): raise ValueError("You need to use at least 2 processes to use `--multi_gpu`.") defaults = None warned = [] mp_from_config_flag = False # Get the default from the config file. if args.config_file is not None or os.path.isfile(default_config_file) and not args.cpu: defaults = load_config_from_file(args.config_file) if ( not args.multi_gpu and not args.tpu and not args.tpu_use_cluster and not args.use_deepspeed and not args.use_fsdp and not args.use_megatron_lm ): args.use_deepspeed = defaults.distributed_type == DistributedType.DEEPSPEED args.multi_gpu = ( True if defaults.distributed_type in ( DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_MLU, DistributedType.MULTI_XPU, ) else False ) args.tpu = defaults.distributed_type == DistributedType.XLA args.use_fsdp = defaults.distributed_type == DistributedType.FSDP args.use_megatron_lm = defaults.distributed_type == DistributedType.MEGATRON_LM args.tpu_use_cluster = defaults.tpu_use_cluster if args.tpu else False if args.gpu_ids is None: if defaults.gpu_ids is not None: args.gpu_ids = defaults.gpu_ids else: args.gpu_ids = "all" if args.multi_gpu and args.num_machines is None: args.num_machines = defaults.num_machines if len(args.gpu_ids.split(",")) < 2 and (args.gpu_ids != "all") and args.multi_gpu and args.num_machines <= 1: raise ValueError( "Less than two GPU ids were configured and tried to run on on multiple GPUs. " "Please ensure at least two are specified for `--gpu_ids`, or use `--gpu_ids='all'`." ) if defaults.compute_environment == ComputeEnvironment.LOCAL_MACHINE: # Update args with the defaults for name, attr in defaults.__dict__.items(): if isinstance(attr, dict): for k in defaults.deepspeed_config: setattr(args, k, defaults.deepspeed_config[k]) for k in defaults.fsdp_config: arg_to_set = k if "fsdp" not in arg_to_set: arg_to_set = "fsdp_" + arg_to_set setattr(args, arg_to_set, defaults.fsdp_config[k]) for k in defaults.megatron_lm_config: setattr(args, k, defaults.megatron_lm_config[k]) for k in defaults.dynamo_config: setattr(args, k, defaults.dynamo_config[k]) for k in defaults.ipex_config: setattr(args, k, defaults.ipex_config[k]) for k in defaults.mpirun_config: setattr(args, k, defaults.mpirun_config[k]) continue # Those args are handled separately if ( name not in ["compute_environment", "mixed_precision", "distributed_type"] and getattr(args, name, None) is None ): setattr(args, name, attr) if not args.debug: args.debug = defaults.debug if not args.mixed_precision: if defaults.mixed_precision is None: args.mixed_precision = "no" else: args.mixed_precision = defaults.mixed_precision mp_from_config_flag = True else: if args.use_cpu or (args.use_xpu and torch.xpu.is_available()): native_amp = is_torch_version(">=", "1.10") else: native_amp = is_bf16_available(True) if ( args.mixed_precision == "bf16" and not native_amp and not (args.tpu and is_torch_xla_available(check_is_tpu=True)) ): raise ValueError("bf16 mixed precision requires PyTorch >= 1.10 and a supported device.") # Silently set the default here if args.dynamo_backend is None: args.dynamo_backend = "no" else: if args.num_processes is None: if args.use_xpu and is_xpu_available(): args.num_processes = torch.xpu.device_count() elif is_mlu_available(): args.num_processes = torch.mlu.device_count() elif is_npu_available(): args.num_processes = torch.npu.device_count() else: args.num_processes = torch.cuda.device_count() warned.append(f"\t`--num_processes` was set to a value of `{args.num_processes}`") if args.debug is None: args.debug = False if not args.multi_gpu and ( (args.use_xpu and is_xpu_available() and torch.xpu.device_count() > 1) or (is_mlu_available() and torch.mlu.device_count() > 1) or (is_npu_available() and torch.npu.device_count() > 1) or (torch.cuda.device_count() > 1) ): warned.append( "\t\tMore than one GPU was found, enabling multi-GPU training.\n" "\t\tIf this was unintended please pass in `--num_processes=1`." ) args.multi_gpu = True if args.num_machines is None: warned.append("\t`--num_machines` was set to a value of `1`") args.num_machines = 1 if args.mixed_precision is None: warned.append("\t`--mixed_precision` was set to a value of `'no'`") args.mixed_precision = "no" if not hasattr(args, "use_cpu"): args.use_cpu = args.cpu if args.dynamo_backend is None: warned.append("\t`--dynamo_backend` was set to a value of `'no'`") args.dynamo_backend = "no" if args.debug: logger.debug("Running script in debug mode, expect distributed operations to be slightly slower.") is_aws_env_disabled = defaults is None or ( defaults is not None and defaults.compute_environment != ComputeEnvironment.AMAZON_SAGEMAKER ) if is_aws_env_disabled and args.num_cpu_threads_per_process is None: args.num_cpu_threads_per_process = 1 if args.use_cpu and args.num_processes >= 1: local_size = get_int_from_env( ["MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"], 1 ) threads_per_process = int(psutil.cpu_count(logical=False) / local_size) if threads_per_process > 1: args.num_cpu_threads_per_process = threads_per_process warned.append( f"\t`--num_cpu_threads_per_process` was set to `{args.num_cpu_threads_per_process}` to improve out-of-box performance when training on CPUs" ) if any(warned): message = "The following values were not passed to `accelerate launch` and had defaults used instead:\n" message += "\n".join(warned) message += ( "\nTo avoid this warning pass in values for each of the problematic parameters or run `accelerate config`." ) logger.warning(message) return args, defaults, mp_from_config_flag def launch_command(args): args, defaults, mp_from_config_flag = _validate_launch_command(args) # Use the proper launcher if args.use_deepspeed and not args.cpu: args.deepspeed_fields_from_accelerate_config = list(defaults.deepspeed_config.keys()) if defaults else [] if mp_from_config_flag: args.deepspeed_fields_from_accelerate_config.append("mixed_precision") args.deepspeed_fields_from_accelerate_config = ",".join(args.deepspeed_fields_from_accelerate_config) deepspeed_launcher(args) elif args.use_fsdp and not args.cpu: multi_gpu_launcher(args) elif args.use_megatron_lm and not args.cpu: multi_gpu_launcher(args) elif args.multi_gpu and not args.cpu: multi_gpu_launcher(args) elif args.tpu and not args.cpu: if args.tpu_use_cluster: tpu_pod_launcher(args) else: tpu_launcher(args) elif defaults is not None and defaults.compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: sagemaker_launcher(defaults, args) else: simple_launcher(args) def main(): parser = launch_command_parser() args = parser.parse_args() launch_command(args) if __name__ == "__main__": main()
accelerate/src/accelerate/commands/launch.py/0
{ "file_path": "accelerate/src/accelerate/commands/launch.py", "repo_id": "accelerate", "token_count": 17932 }
4
#!/usr/bin/env python # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.test_utils.testing import assert_exception from accelerate.utils.dataclasses import DistributedType from accelerate.utils.operations import ( DistributedOperationException, broadcast, copy_tensor_to_devices, gather, gather_object, pad_across_processes, reduce, ) def create_tensor(state): return (torch.arange(state.num_processes) + 1.0 + (state.num_processes * state.process_index)).to(state.device) def test_gather(state): tensor = create_tensor(state) gathered_tensor = gather(tensor) assert gathered_tensor.tolist() == list(range(1, state.num_processes**2 + 1)) def test_gather_object(state): # Gather objects in TorchXLA is not supported. if state.distributed_type == DistributedType.XLA: return obj = [state.process_index] gathered_obj = gather_object(obj) assert len(gathered_obj) == state.num_processes, f"{gathered_obj}, {len(gathered_obj)} != {state.num_processes}" assert gathered_obj == list(range(state.num_processes)), f"{gathered_obj} != {list(range(state.num_processes))}" def test_gather_non_contigous(state): # Skip this test because the 'is_contiguous' function of XLA tensor always returns True. if state.distributed_type == DistributedType.XLA: return # Create a non-contiguous tensor tensor = torch.arange(12).view(4, 3).t().to(state.device) assert not tensor.is_contiguous() # Shouldn't error out _ = gather(tensor) def test_broadcast(state): tensor = create_tensor(state) broadcasted_tensor = broadcast(tensor) assert broadcasted_tensor.shape == torch.Size([state.num_processes]) assert broadcasted_tensor.tolist() == list(range(1, state.num_processes + 1)) def test_pad_across_processes(state): # We need to pad the tensor with one more element if we are the main process # to ensure that we can pad if state.is_main_process: tensor = torch.arange(state.num_processes + 1).to(state.device) else: tensor = torch.arange(state.num_processes).to(state.device) padded_tensor = pad_across_processes(tensor) assert padded_tensor.shape == torch.Size([state.num_processes + 1]) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0, state.num_processes)) + [0] def test_reduce_sum(state): # For now runs on only two processes if state.num_processes != 2: return tensor = create_tensor(state) reduced_tensor = reduce(tensor, "sum") truth_tensor = torch.tensor([4.0, 6]).to(state.device) assert torch.allclose(reduced_tensor, truth_tensor), f"{reduced_tensor} != {truth_tensor}" def test_reduce_mean(state): # For now runs on only two processes if state.num_processes != 2: return tensor = create_tensor(state) reduced_tensor = reduce(tensor, "mean") truth_tensor = torch.tensor([2.0, 3]).to(state.device) assert torch.allclose(reduced_tensor, truth_tensor), f"{reduced_tensor} != {truth_tensor}" def test_op_checker(state): # Must be in a distributed state, and gathering is currently not supported in TorchXLA. if state.distributed_type in [DistributedType.NO, DistributedType.XLA]: return state.debug = True # `pad_across_processes` if state.process_index == 0: data = {"tensor": torch.tensor([[0.0, 1, 2, 3, 4]]).to(state.device)} else: data = {"tensor": torch.tensor([[[0.0, 1, 2, 3, 4, 5]]]).to(state.device)} with assert_exception(DistributedOperationException): pad_across_processes(data, dim=0) # `reduce` if state.process_index == 0: data = {"tensor": torch.tensor([[0.0, 1, 2, 3, 4]]).to(state.device)} else: data = {"tensor": torch.tensor([[[0.0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]]).to(state.device)} with assert_exception(DistributedOperationException): reduce(data) # `broadcast` if state.process_index == 0: data = {"tensor": torch.tensor([[0.0, 1, 2, 3, 4]]).to(state.device)} else: data = {"tensor": torch.tensor([[[0.0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]]).to(state.device)} with assert_exception(DistributedOperationException): broadcast(data) state.debug = False def test_copy_tensor_to_devices(state): if state.distributed_type not in [DistributedType.MULTI_GPU, DistributedType.XLA]: return if state.is_main_process: tensor = torch.tensor([1, 2, 3], dtype=torch.int).to(state.device) else: tensor = None tensor = copy_tensor_to_devices(tensor) assert torch.allclose(tensor, torch.tensor([1, 2, 3], dtype=torch.int, device=state.device)) def _mp_fn(index): # For xla_spawn (TPUs) main() def main(): state = PartialState() state.print(f"State: {state}") state.print("testing gather") test_gather(state) state.print("testing gather_object") test_gather_object(state) state.print("testing gather non-contigous") test_gather_non_contigous(state) state.print("testing broadcast") test_broadcast(state) state.print("testing pad_across_processes") test_pad_across_processes(state) state.print("testing reduce_sum") test_reduce_sum(state) state.print("testing reduce_mean") test_reduce_mean(state) state.print("testing op_checker") test_op_checker(state) state.print("testing sending tensors across devices") test_copy_tensor_to_devices(state) if __name__ == "__main__": main()
accelerate/src/accelerate/test_utils/scripts/test_ops.py/0
{ "file_path": "accelerate/src/accelerate/test_utils/scripts/test_ops.py", "repo_id": "accelerate", "token_count": 2347 }
5
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A collection of utilities for ensuring that training can always occur. Heavily influenced by the [toma](https://github.com/BlackHC/toma) library. """ import functools import gc import inspect import torch from .imports import is_mlu_available, is_mps_available, is_npu_available, is_xpu_available def release_memory(*objects): """ Releases memory from `objects` by setting them to `None` and calls `gc.collect()` and `torch.cuda.empty_cache()`. Returned objects should be reassigned to the same variables. Args: objects (`Iterable`): An iterable of objects Returns: A list of `None` objects to replace `objects` Example: ```python >>> import torch >>> from accelerate.utils import release_memory >>> a = torch.ones(1000, 1000).cuda() >>> b = torch.ones(1000, 1000).cuda() >>> a, b = release_memory(a, b) ``` """ if not isinstance(objects, list): objects = list(objects) for i in range(len(objects)): objects[i] = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_mlu_available(): torch.mlu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() elif is_mps_available(): torch.mps.empty_cache() else: torch.cuda.empty_cache() return objects def should_reduce_batch_size(exception: Exception) -> bool: """ Checks if `exception` relates to CUDA out-of-memory, CUDNN not supported, or CPU out-of-memory Args: exception (`Exception`): An exception """ _statements = [ "CUDA out of memory.", # CUDA OOM "cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU "DefaultCPUAllocator: can't allocate memory", # CPU OOM ] if isinstance(exception, RuntimeError) and len(exception.args) == 1: return any(err in exception.args[0] for err in _statements) return False def find_executable_batch_size(function: callable = None, starting_batch_size: int = 128): """ A basic decorator that will try to execute `function`. If it fails from exceptions related to out-of-memory or CUDNN, the batch size is cut in half and passed to `function` `function` must take in a `batch_size` parameter as its first argument. Args: function (`callable`, *optional*): A function to wrap starting_batch_size (`int`, *optional*): The batch size to try and fit into memory Example: ```python >>> from accelerate.utils import find_executable_batch_size >>> @find_executable_batch_size(starting_batch_size=128) ... def train(batch_size, model, optimizer): ... ... >>> train(model, optimizer) ``` """ if function is None: return functools.partial(find_executable_batch_size, starting_batch_size=starting_batch_size) batch_size = starting_batch_size def decorator(*args, **kwargs): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_mlu_available(): torch.mlu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() params = list(inspect.signature(function).parameters.keys()) # Guard against user error if len(params) < (len(args) + 1): arg_str = ", ".join([f"{arg}={value}" for arg, value in zip(params[1:], args[1:])]) raise TypeError( f"Batch size was passed into `{function.__name__}` as the first argument when called." f"Remove this as the decorator already does so: `{function.__name__}({arg_str})`" ) while True: if batch_size == 0: raise RuntimeError("No executable batch size found, reached zero.") try: return function(batch_size, *args, **kwargs) except Exception as e: if should_reduce_batch_size(e): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_mlu_available(): torch.mlu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
accelerate/src/accelerate/utils/memory.py/0
{ "file_path": "accelerate/src/accelerate/utils/memory.py", "repo_id": "accelerate", "token_count": 2210 }
6
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import gc import os import unittest from collections import OrderedDict from tempfile import TemporaryDirectory import torch import torch.nn as nn from transformers import AutoModelForCausalLM, AutoTokenizer from accelerate.big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from accelerate.hooks import remove_hook_from_submodules from accelerate.test_utils import ( require_bnb, require_cuda, require_mps, require_multi_gpu, require_non_torch_xla, slow, ) from accelerate.utils import is_torch_version, offload_state_dict class ModelForTest(nn.Module): def __init__(self): super().__init__() self.linear1 = nn.Linear(3, 4) self.batchnorm = nn.BatchNorm1d(4) self.linear2 = nn.Linear(4, 5) def forward(self, x): return self.linear2(self.batchnorm(self.linear1(x))) class LinearWithNonPersistentBuffers(nn.Module): def __init__(self, in_features: int, out_features: int, bias: bool = True, device=None, dtype=None) -> None: factory_kwargs = {"device": device, "dtype": dtype} super().__init__() self.in_features = in_features self.out_features = out_features self.register_buffer("weight", torch.ones((out_features, in_features), **factory_kwargs)) if bias: self.register_buffer("bias", torch.ones(out_features, **factory_kwargs), persistent=False) else: self.register_buffer("bias", None) def forward(self, input: torch.Tensor) -> torch.Tensor: return torch.nn.functional.linear(input, self.weight, self.bias) class ModelForTestNonPersistentBuffers(nn.Module): def __init__(self): super().__init__() self.linear1 = LinearWithNonPersistentBuffers(3, 4) self.batchnorm = nn.BatchNorm1d(4) self.linear2 = LinearWithNonPersistentBuffers(4, 5) def forward(self, x): return self.linear2(self.batchnorm(self.linear1(x))) class ModelForTestCopy(nn.Module): def __init__(self, id: int): super().__init__() self.id = id self.linear1 = nn.Linear(3, 4) self.batchnorm = nn.BatchNorm1d(4) self.linear2 = nn.Linear(4, 5) def forward(self, x): return self.linear2(self.batchnorm(self.linear1(x))), self.id class ModelForTestTiedWeights(nn.Module): def __init__(self): super().__init__() self.linear1 = nn.Linear(4, 4) self.batchnorm = nn.BatchNorm1d(4) self.linear2 = nn.Linear(4, 4) def forward(self, x): return self.linear2(self.batchnorm(self.linear1(x))) class BiggerModelForTest(nn.Module): def __init__(self): super().__init__() self.linear1 = nn.Linear(3, 4) self.linear2 = nn.Linear(4, 5) self.batchnorm = nn.BatchNorm1d(5) self.linear3 = nn.Linear(5, 6) self.linear4 = nn.Linear(6, 5) def forward(self, x): return self.linear4(self.linear3(self.batchnorm(self.linear2(self.linear1(x))))) # To test preload_module_classes class ModuleWithUnusedSubModules(nn.Module): def __init__(self, input_dim, output_dim): super().__init__() self.linear = nn.Linear(input_dim, output_dim) def forward(self, x): return x @ self.linear.weight.t() + self.linear.bias class ModelWithUnusedSubModulesForTest(nn.Module): def __init__(self): super().__init__() self.linear1 = ModuleWithUnusedSubModules(3, 4) self.linear2 = ModuleWithUnusedSubModules(4, 5) self.batchnorm = nn.BatchNorm1d(5) self.linear3 = ModuleWithUnusedSubModules(5, 6) self.linear4 = ModuleWithUnusedSubModules(6, 5) def forward(self, x): return self.linear4(self.linear3(self.batchnorm(self.linear2(self.linear1(x))))) class BigModelingTester(unittest.TestCase): def test_init_empty_weights(self): # base use with init_empty_weights(): module = nn.Linear(4, 5) assert module.weight.device == torch.device("meta") # base use with buffers, they are not touched with init_empty_weights(): module = nn.BatchNorm1d(4) assert module.weight.device == torch.device("meta") assert module.running_mean.device == torch.device("cpu") # Use with include_buffers=True register_parameter_func = nn.Module.register_parameter register_buffer_func = nn.Module.register_buffer with init_empty_weights(include_buffers=True): module = nn.BatchNorm1d(4) # nn.Module.register_parameter/buffer shouldn't be changed with torch >= 2.0 if is_torch_version(">=", "2.0"): assert register_parameter_func == nn.Module.register_parameter assert register_buffer_func == nn.Module.register_buffer assert module.weight.device == torch.device("meta") assert module.running_mean.device == torch.device("meta") # Double check we didn't break PyTorch module = nn.BatchNorm1d(4) assert module.weight.device == torch.device("cpu") assert module.running_mean.device == torch.device("cpu") def test_init_empty_weights_very_large_model(self): # This is a 100 billion parameters model. with init_empty_weights(): _ = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)]) @require_cuda def test_init_on_device_cuda(self): device = torch.device("cuda:0") with init_on_device(device): model = nn.Linear(10, 10) assert model.weight.device == device assert model.weight.device == device @require_mps def test_init_on_device_mps(self): device = torch.device("mps:0") with init_on_device(device): model = nn.Linear(10, 10) assert model.weight.device == device assert model.weight.device == device def test_cpu_offload(self): model = ModelForTest() x = torch.randn(2, 3) expected = model(x) device = torch.device(0 if torch.cuda.is_available() else "cpu") cpu_offload(model, execution_device=device) output = model(x) assert torch.allclose(expected, output.cpu(), 1e-4, 1e-5), f"Expected: {expected}, Actual: {output.cpu()}" # Clean up for next test. remove_hook_from_submodules(model) cpu_offload(model, execution_device=device, offload_buffers=True) output = model(x) assert torch.allclose(expected, output.cpu(), 1e-4, 1e-5), f"Expected: {expected}, Actual: {output.cpu()}" def test_cpu_offload_with_unused_submodules(self): model = ModelWithUnusedSubModulesForTest() x = torch.randn(2, 3) expected = model(x) device = torch.device(0 if torch.cuda.is_available() else "cpu") cpu_offload(model, execution_device=device, preload_module_classes=["ModuleWithUnusedSubModules"]) output = model(x) assert torch.allclose(expected, output.cpu(), 1e-4, 1e-5), f"Expected: {expected}, Actual: {output.cpu()}" # Clean up for next test. remove_hook_from_submodules(model) cpu_offload( model, execution_device=device, offload_buffers=True, preload_module_classes=["ModuleWithUnusedSubModules"], ) output = model(x) assert torch.allclose(expected, output.cpu(), 1e-4, 1e-5), f"Expected: {expected}, Actual: {output.cpu()}" @slow @require_cuda def test_cpu_offload_gpt2(self): tokenizer = AutoTokenizer.from_pretrained("gpt2") inputs = tokenizer("Hello world! My name is", return_tensors="pt").to(0) gpt2 = AutoModelForCausalLM.from_pretrained("gpt2") cpu_offload(gpt2, execution_device=0) outputs = gpt2.generate(inputs["input_ids"]) assert ( tokenizer.decode(outputs[0].tolist()) == "Hello world! My name is Kiyoshi, and I'm a student at the University of Tokyo" ) def test_disk_offload(self): model = ModelForTest() x = torch.randn(2, 3) expected = model(x) device = torch.device(0 if torch.cuda.is_available() else "cpu") with TemporaryDirectory() as tmp_dir: disk_offload(model, tmp_dir, execution_device=device) output = model(x) assert torch.allclose(expected, output.cpu(), 1e-4, 1e-5), f"Expected: {expected}, Actual: {output.cpu()}" # Clean up for next test. remove_hook_from_submodules(model) with TemporaryDirectory() as tmp_dir: disk_offload(model, tmp_dir, execution_device=device, offload_buffers=True) output = model(x) assert torch.allclose(expected, output.cpu(), 1e-4, 1e-5), f"Expected: {expected}, Actual: {output.cpu()}" def test_disk_offload_with_unused_submodules(self): model = ModelWithUnusedSubModulesForTest() x = torch.randn(2, 3) expected = model(x) device = torch.device(0 if torch.cuda.is_available() else "cpu") with TemporaryDirectory() as tmp_dir: disk_offload( model, tmp_dir, execution_device=device, preload_module_classes=["ModuleWithUnusedSubModules"] ) output = model(x) assert torch.allclose(expected, output.cpu(), 1e-4, 1e-5), f"Expected: {expected}, Actual: {output.cpu()}" # Clean up for next test. remove_hook_from_submodules(model) with TemporaryDirectory() as tmp_dir: disk_offload( model, tmp_dir, execution_device=device, offload_buffers=True, preload_module_classes=["ModuleWithUnusedSubModules"], ) output = model(x) assert torch.allclose(expected, output.cpu(), 1e-4, 1e-5), f"Expected: {expected}, Actual: {output.cpu()}" @slow @require_cuda def test_disk_offload_gpt2(self): tokenizer = AutoTokenizer.from_pretrained("gpt2") inputs = tokenizer("Hello world! My name is", return_tensors="pt").to(0) gpt2 = AutoModelForCausalLM.from_pretrained("gpt2") with TemporaryDirectory() as tmp_dir: disk_offload(gpt2, tmp_dir, execution_device=0) outputs = gpt2.generate(inputs["input_ids"]) assert ( tokenizer.decode(outputs[0].tolist()) == "Hello world! My name is Kiyoshi, and I'm a student at the University of Tokyo" ) @require_cuda def test_dispatch_model(self): model = ModelForTest() device_map = {"linear1": "disk", "batchnorm": "cpu", "linear2": 0} x = torch.randn(2, 3) expected = model(x) with TemporaryDirectory() as tmp_dir: dispatch_model(model, device_map, offload_dir=tmp_dir) output = model(x) assert torch.allclose(expected, output.cpu(), atol=1e-5) @require_cuda def test_dispatch_model_with_non_persistent_buffers(self): model = ModelForTestNonPersistentBuffers() device_map = {"linear1": 0, "batchnorm": "cpu", "linear2": "disk"} x = torch.randn(2, 3) expected = model(x) with TemporaryDirectory() as tmp_dir: dispatch_model(model, device_map, offload_dir=tmp_dir, offload_buffers=True) output = model(x) assert torch.allclose(expected, output.cpu(), atol=1e-5) @require_mps def test_dispatch_model_mps(self): model = ModelForTest() device_map = {"linear1": "mps", "batchnorm": "disk", "linear2": "disk"} x = torch.randn(2, 3) expected = model(x) with TemporaryDirectory() as tmp_dir: dispatch_model(model, device_map, offload_dir=tmp_dir) output = model(x) assert torch.allclose(expected, output.cpu(), atol=1e-5) @require_cuda def test_dispatch_model_tied_weights(self): model = ModelForTestTiedWeights() model.linear1.weight = model.linear2.weight device_map = {"linear1": 0, "batchnorm": 0, "linear2": 0} dispatch_model(model, device_map) assert model.linear2.weight is model.linear1.weight @require_multi_gpu def test_dispatch_model_tied_weights_memory(self): # Test that we do not duplicate tied weights at any point during dispatch_model call. torch.cuda.empty_cache() # Needed in case we run several tests in a row. model = nn.Sequential( OrderedDict( [ ("linear0", nn.Linear(5000, 5000, bias=False)), ("linear1", nn.Linear(5000, 5000, bias=False)), ("linear2", nn.Linear(5000, 5000, bias=False)), ("linear3", nn.Linear(5000, 5000, bias=False)), ("linear4", nn.Linear(5000, 5000, bias=False)), ] ) ) model.linear2.weight = model.linear0.weight model.linear3.weight = model.linear0.weight model.linear4.weight = model.linear0.weight x = torch.randn(5, 5000) with torch.no_grad(): expected = model(x) # We should need only 5000 * 5000 * 32 // 8 * 1e-6 = 100 MB on the device 0 for the four linear weights. device_map = {"linear0": 0, "linear1": 1, "linear2": 0, "linear3": 0, "linear4": 0} # Just to intialize CUDA context. a = torch.rand(5).to("cuda:0") # noqa: F841 free_memory_bytes = torch.cuda.mem_get_info("cuda:0")[0] required_memory_bytes = 5000 * 5000 * (32 // 8) # Leaving 50 MB of free memory for possible buffers, etc. n_vals = (free_memory_bytes - required_memory_bytes - int(50e6)) // (32 // 8) foo = torch.rand(n_vals, device="cuda:0") # noqa: F841 # If this does OOM: there is an issue in somewhere in dispatch_model, memory of tied weights is duplicated. try: dispatch_model(model, device_map) except torch.cuda.OutOfMemoryError as e: raise torch.cuda.OutOfMemoryError( f"OOM error in dispatch_model. This is a bug and should not happen, see test_dispatch_model_tied_weights_memory. {e}" ) except Exception as e: raise e with torch.no_grad(): output = model(x) assert torch.allclose(expected, output.cpu(), atol=1e-5) @require_cuda def test_dispatch_model_tied_weights_memory_with_nested_offload_cpu(self): # Test that we do not duplicate tied weights at any point during dispatch_model call. torch.cuda.empty_cache() # Needed in case we run several tests in a row. class SubModule(torch.nn.Module): def __init__(self, ref_to_parameter): super().__init__() self.parameter = ref_to_parameter def forward(self, x): return x + torch.max(self.parameter) class LinearModuleAndSubModule(torch.nn.Linear): def __init__(self, in_features, out_features): super().__init__(in_features, out_features, bias=False) self.weight_submodule = SubModule(self.weight) self.weight_submodule2 = SubModule(self.weight) self.weight_submodule3 = SubModule(self.weight) self.weight_submodule4 = SubModule(self.weight) def forward(self, x): a = torch.nn.functional.linear(self.weight_submodule(x), self.weight) b = torch.nn.functional.linear(self.weight_submodule2(x), self.weight) c = torch.nn.functional.linear(self.weight_submodule3(x), self.weight) d = torch.nn.functional.linear(self.weight_submodule4(x), self.weight) return a + b + c + d class ModelWithSubmodules(torch.nn.Module): def __init__(self): super().__init__() self.compute = LinearModuleAndSubModule(5000, 5000) self.compute1 = LinearModuleAndSubModule(5000, 5000) def forward(self, x): a = self.compute(x) b = self.compute1(x) return a + b # We should need only 2 * 5000 * 5000 * 32 // 8 * 1e-6 = 200 MB on the device 0 for the whole model forward, and not 600 MB. device_map = {"compute": 0, "compute1": "cpu"} model = ModelWithSubmodules() x = torch.randn(1, 5000) with torch.no_grad(): expected = model(x) # Just to intialize CUDA context. a = torch.rand(5).to("cuda:0") # noqa: F841 free_memory_bytes = torch.cuda.mem_get_info("cuda:0")[0] required_memory_bytes = 2 * 5000 * 5000 * (32 // 8) # 200 MB # Leaving 150 MB of free memory for possible buffers, etc. n_vals = (free_memory_bytes - required_memory_bytes - int(150e6)) // (32 // 8) foo = torch.rand(n_vals, device="cuda:0") # noqa: F841 free_memory_bytes_before_dispatch = torch.cuda.mem_get_info("cuda:0")[0] dispatch_model(model, device_map) free_memory_bytes_after_dispatch = torch.cuda.mem_get_info("cuda:0")[0] assert (free_memory_bytes_after_dispatch - free_memory_bytes_before_dispatch) * 1e-6 < 130 original_pointer = model.compute1._hf_hook.weights_map["weight"].data_ptr() with torch.no_grad(): try: output = model(x) except torch.cuda.OutOfMemoryError as e: raise torch.cuda.OutOfMemoryError( f"OOM error in dispatch_model. This is a bug and should not happen, see test_dispatch_model_tied_weights_memory_with_nested_offload_cpu. {e}" ) except Exception as e: raise e assert torch.allclose(expected, output.cpu(), atol=1e-5) torch.cuda.empty_cache() free_memory_bytes_after_infer = torch.cuda.mem_get_info("cuda:0")[0] # Check that we have no more references on GPU for the offloaded tied weight. assert len(model.compute1.weight_submodule._hf_hook.tied_params_map[original_pointer]) == 0 assert len(model.compute1._hf_hook.tied_params_map[original_pointer]) == 0 assert (free_memory_bytes_after_infer - free_memory_bytes_after_dispatch) * 1e-6 < 130 # Test is flacky otherwise. del model gc.collect() @require_cuda def test_dispatch_model_tied_weights_memory_with_nested_offload_disk(self): # Test that we do not duplicate tied weights at any point during dispatch_model call. torch.cuda.empty_cache() # Needed in case we run several tests in a row. class SubModule(torch.nn.Module): def __init__(self, ref_to_parameter): super().__init__() self.parameter = ref_to_parameter def forward(self, x): return x + torch.max(self.parameter) class LinearModuleAndSubModule(torch.nn.Linear): def __init__(self, in_features, out_features): super().__init__(in_features, out_features, bias=False) self.weight_submodule = SubModule(self.weight) self.weight_submodule2 = SubModule(self.weight) self.weight_submodule3 = SubModule(self.weight) self.weight_submodule4 = SubModule(self.weight) def forward(self, x): a = torch.nn.functional.linear(self.weight_submodule(x), self.weight) b = torch.nn.functional.linear(self.weight_submodule2(x), self.weight) c = torch.nn.functional.linear(self.weight_submodule3(x), self.weight) d = torch.nn.functional.linear(self.weight_submodule4(x), self.weight) return a + b + c + d class ModelWithSubmodules(torch.nn.Module): def __init__(self): super().__init__() self.compute = LinearModuleAndSubModule(5000, 5000) self.compute1 = LinearModuleAndSubModule(5000, 5000) def forward(self, x): a = self.compute(x) b = self.compute1(x) return a + b # We should need only 2 * 5000 * 5000 * 32 // 8 * 1e-6 = 200 MB on the device 0 for the whole model forward, and not 600 MB. device_map = {"compute": 0, "compute1": "disk"} model = ModelWithSubmodules() x = torch.randn(1, 5000) with torch.no_grad(): expected = model(x) # Just to intialize CUDA context. a = torch.rand(5).to("cuda:0") # noqa: F841 free_memory_bytes = torch.cuda.mem_get_info("cuda:0")[0] required_memory_bytes = 2 * 5000 * 5000 * (32 // 8) # 200 MB # Leaving 150 MB of free memory for possible buffers, etc. n_vals = (free_memory_bytes - required_memory_bytes - int(200e6)) // (32 // 8) foo = torch.rand(n_vals, device="cuda:0") # noqa: F841 free_memory_bytes_before_dispatch = torch.cuda.mem_get_info("cuda:0")[0] with TemporaryDirectory() as tmp_dir: dispatch_model(model, device_map, offload_dir=tmp_dir) free_memory_bytes_after_dispatch = torch.cuda.mem_get_info("cuda:0")[0] assert (free_memory_bytes_after_dispatch - free_memory_bytes_before_dispatch) * 1e-6 < 130 with torch.no_grad(): try: output = model(x) except torch.cuda.OutOfMemoryError as e: raise torch.cuda.OutOfMemoryError( f"OOM error in dispatch_model. This is a bug and should not happen, see test_dispatch_model_tied_weights_memory_with_nested_offload_disk. {e}" ) except Exception as e: raise e assert torch.allclose(expected, output.cpu(), atol=1e-5) torch.cuda.empty_cache() free_memory_bytes_after_infer = torch.cuda.mem_get_info("cuda:0")[0] # Check that we have no more references on GPU for the offloaded tied weight. n_non_empty = 0 for pointer, pointer_dict in model.compute1.weight_submodule._hf_hook.tied_params_map.items(): if len(pointer_dict) > 0: n_non_empty += 1 assert n_non_empty == 1 # `compute` layer one. n_non_empty = 0 for pointer, pointer_dict in model.compute1._hf_hook.tied_params_map.items(): if len(pointer_dict) > 0: n_non_empty += 1 assert n_non_empty == 1 # `compute` layer one. assert (free_memory_bytes_after_infer - free_memory_bytes_after_dispatch) * 1e-6 < 130 @require_multi_gpu def test_dispatch_model_multi_gpu(self): model = BiggerModelForTest() device_map = {"linear1": "cpu", "linear2": "disk", "batchnorm": "cpu", "linear3": 0, "linear4": 1} x = torch.randn(2, 3) expected = model(x) with TemporaryDirectory() as tmp_dir: dispatch_model(model, device_map, offload_dir=tmp_dir) output = model(x) assert torch.allclose(expected, output.cpu(), atol=1e-5) @require_cuda def test_dispatch_model_copy(self): original_model = ModelForTestCopy(id=1) device_map = {"linear1": 0, "batchnorm": "cpu", "linear2": 0} x = torch.randn(2, 3) expected, original_output_id = original_model(x) dispatch_model(original_model, device_map) copied_model = copy.deepcopy(original_model) copied_model.id = 2 output, copied_output_id = copied_model(x) assert original_model.id == original_output_id assert copied_model.id == copied_output_id assert copied_model.linear1.forward is not original_model.linear1.forward assert torch.allclose(expected, output.cpu(), atol=1e-5) @require_cuda def test_dispatch_model_move_offloaded_model(self): model = ModelForTest() device_map = {"linear1": "disk", "batchnorm": "cpu", "linear2": 0} with TemporaryDirectory() as tmp_dir: dispatch_model(model, device_map, offload_dir=tmp_dir) with self.assertRaises(RuntimeError): model.to(0) @require_multi_gpu def test_dispatch_model_move_model_warning(self): model = ModelForTest() device_map = {"linear1": 0, "batchnorm": 0, "linear2": 1} with TemporaryDirectory() as tmp_dir: dispatch_model(model, device_map, offload_dir=tmp_dir) with self.assertLogs("accelerate.big_modeling", level="WARNING"): model.to("cpu") with self.assertLogs("accelerate.big_modeling", level="WARNING"): model.cuda(0) with self.assertRaises(RuntimeError): x = torch.randn(2, 3) model(x) @slow @require_multi_gpu def test_dispatch_model_gpt2_on_two_gpus(self): tokenizer = AutoTokenizer.from_pretrained("gpt2") inputs = tokenizer("Hello world! My name is", return_tensors="pt").to(0) gpt2 = AutoModelForCausalLM.from_pretrained("gpt2") # Dispatch on GPUs 0 and 1 device_map = { "transformer.wte": 0, "transformer.wpe": 0, "transformer.ln_f": 1, "lm_head": 0, } for i in range(12): device_map[f"transformer.h.{i}"] = 0 if i <= 5 else 1 gpt2 = dispatch_model(gpt2, device_map) outputs = gpt2.generate(inputs["input_ids"]) assert ( tokenizer.decode(outputs[0].tolist()) == "Hello world! My name is Kiyoshi, and I'm a student at the University of Tokyo" ) # Dispatch with a bit of CPU offload gpt2 = AutoModelForCausalLM.from_pretrained("gpt2") for i in range(4): device_map[f"transformer.h.{i}"] = "cpu" gpt2 = dispatch_model(gpt2, device_map) outputs = gpt2.generate(inputs["input_ids"]) assert ( tokenizer.decode(outputs[0].tolist()) == "Hello world! My name is Kiyoshi, and I'm a student at the University of Tokyo" ) # Dispatch with a bit of CPU and disk offload gpt2 = AutoModelForCausalLM.from_pretrained("gpt2") for i in range(2): device_map[f"transformer.h.{i}"] = "disk" with TemporaryDirectory() as tmp_dir: state_dict = { k: p for k, p in gpt2.state_dict().items() if "transformer.h.0" in k or "transformer.h.1" in k } offload_state_dict(tmp_dir, state_dict) gpt2 = dispatch_model(gpt2, device_map, offload_dir=tmp_dir) outputs = gpt2.generate(inputs["input_ids"]) assert ( tokenizer.decode(outputs[0].tolist()) == "Hello world! My name is Kiyoshi, and I'm a student at the University of Tokyo" ) @require_cuda def test_dispatch_model_with_unused_submodules(self): model = ModelWithUnusedSubModulesForTest() device_map = {"linear1": "cpu", "linear2": "disk", "batchnorm": "cpu", "linear3": 0, "linear4": 0} x = torch.randn(2, 3) expected = model(x) with TemporaryDirectory() as tmp_dir: dispatch_model( model, device_map, offload_dir=tmp_dir, preload_module_classes=["ModuleWithUnusedSubModules"] ) output = model(x) assert torch.allclose(expected, output.cpu(), atol=1e-5) @require_mps def test_dispatch_model_with_unused_submodules_mps(self): model = ModelWithUnusedSubModulesForTest() device_map = {"linear1": "mps", "linear2": "mps", "batchnorm": "mps", "linear3": "mps", "linear4": "disk"} x = torch.randn(2, 3) expected = model(x) with TemporaryDirectory() as tmp_dir: dispatch_model( model, device_map, offload_dir=tmp_dir, preload_module_classes=["ModuleWithUnusedSubModules"] ) output = model(x) assert torch.allclose(expected, output.cpu(), atol=1e-5) @require_multi_gpu def test_dispatch_model_with_unused_submodules_multi_gpu(self): model = ModelWithUnusedSubModulesForTest() device_map = {"linear1": "cpu", "linear2": "disk", "batchnorm": "cpu", "linear3": 0, "linear4": 1} x = torch.randn(2, 3) expected = model(x) with TemporaryDirectory() as tmp_dir: dispatch_model( model, device_map, offload_dir=tmp_dir, preload_module_classes=["ModuleWithUnusedSubModules"] ) output = model(x) assert torch.allclose(expected, output.cpu(), atol=1e-5) @require_cuda def test_dispatch_model_force_hooks(self): model = ModelForTest() device_map = {"": 0} x = torch.randn(2, 3) expected = model(x) dispatch_model(model, device_map, force_hooks=True) output = model(x) assert torch.allclose(expected, output.cpu(), atol=1e-5) @require_cuda def test_load_checkpoint_and_dispatch(self): model = ModelForTest() device_map = {"linear1": "cpu", "batchnorm": "cpu", "linear2": 0} x = torch.randn(2, 3) expected = model(x) with TemporaryDirectory() as tmp_dir: checkpoint = os.path.join(tmp_dir, "pt_model.bin") torch.save(model.state_dict(), checkpoint) new_model = ModelForTest() new_model = load_checkpoint_and_dispatch(new_model, checkpoint, device_map=device_map) # CPU-offloaded weights are on the meta device while waiting for the forward pass. assert new_model.linear1.weight.device == torch.device("meta") assert new_model.linear2.weight.device == torch.device(0) output = new_model(x) assert torch.allclose(expected, output.cpu(), atol=1e-5) @require_mps def test_load_checkpoint_and_dispatch_mps(self): model = ModelForTest() device_map = {"linear1": "mps", "batchnorm": "mps", "linear2": "disk"} x = torch.randn(2, 3) expected = model(x) with TemporaryDirectory() as tmp_dir: checkpoint = os.path.join(tmp_dir, "pt_model.bin") torch.save(model.state_dict(), checkpoint) new_model = ModelForTest() new_model = load_checkpoint_and_dispatch( new_model, checkpoint, device_map=device_map, offload_folder=tmp_dir ) # CPU-offloaded weights are on the meta device while waiting for the forward pass. assert new_model.linear1.weight.device == torch.device("mps:0") assert new_model.linear2.weight.device == torch.device("meta") output = new_model(x) assert torch.allclose(expected, output.cpu(), atol=1e-5) @require_multi_gpu def test_load_checkpoint_and_dispatch_multi_gpu(self): model = BiggerModelForTest() device_map = {"linear1": "cpu", "linear2": "cpu", "batchnorm": 0, "linear3": 0, "linear4": 1} x = torch.randn(2, 3) expected = model(x) with TemporaryDirectory() as tmp_dir: checkpoint = os.path.join(tmp_dir, "pt_model.bin") torch.save(model.state_dict(), checkpoint) new_model = BiggerModelForTest() new_model = load_checkpoint_and_dispatch(new_model, checkpoint, device_map=device_map) # CPU-offloaded weights are on the meta device while waiting for the forward pass. assert new_model.linear1.weight.device == torch.device("meta") assert new_model.linear2.weight.device == torch.device("meta") assert new_model.linear3.weight.device == torch.device(0) assert new_model.linear4.weight.device == torch.device(1) output = new_model(x) assert torch.allclose(expected, output.cpu(), atol=1e-5) @require_cuda def test_load_checkpoint_and_dispatch_with_unused_submodules(self): model = ModelWithUnusedSubModulesForTest() device_map = {"linear1": "cpu", "linear2": "cpu", "batchnorm": 0, "linear3": 0, "linear4": 0} x = torch.randn(2, 3) expected = model(x) with TemporaryDirectory() as tmp_dir: checkpoint = os.path.join(tmp_dir, "pt_model.bin") torch.save(model.state_dict(), checkpoint) new_model = ModelWithUnusedSubModulesForTest() new_model = load_checkpoint_and_dispatch( new_model, checkpoint, device_map=device_map, preload_module_classes=["ModuleWithUnusedSubModules"] ) # CPU-offloaded weights are on the meta device while waiting for the forward pass. assert new_model.linear1.linear.weight.device == torch.device("meta") assert new_model.linear2.linear.weight.device == torch.device("meta") assert new_model.linear3.linear.weight.device == torch.device(0) assert new_model.linear4.linear.weight.device == torch.device(0) output = new_model(x) assert torch.allclose(expected, output.cpu(), atol=1e-5) @require_mps def test_load_checkpoint_and_dispatch_with_unused_submodules_mps(self): model = ModelWithUnusedSubModulesForTest() device_map = {"linear1": "mps", "linear2": "mps", "batchnorm": "mps", "linear3": "disk", "linear4": "disk"} x = torch.randn(2, 3) expected = model(x) with TemporaryDirectory() as tmp_dir: checkpoint = os.path.join(tmp_dir, "pt_model.bin") torch.save(model.state_dict(), checkpoint) new_model = ModelWithUnusedSubModulesForTest() new_model = load_checkpoint_and_dispatch( new_model, checkpoint, device_map=device_map, preload_module_classes=["ModuleWithUnusedSubModules"], offload_folder=tmp_dir, ) # CPU-offloaded weights are on the meta device while waiting for the forward pass. assert new_model.linear1.linear.weight.device == torch.device("mps:0") assert new_model.linear2.linear.weight.device == torch.device("mps:0") assert new_model.linear3.linear.weight.device == torch.device("meta") assert new_model.linear4.linear.weight.device == torch.device("meta") output = new_model(x) assert torch.allclose(expected, output.cpu(), atol=1e-5) @require_multi_gpu def test_load_checkpoint_and_dispatch_multi_gpu_with_unused_submodules(self): model = ModelWithUnusedSubModulesForTest() device_map = {"linear1": "cpu", "linear2": "cpu", "batchnorm": 0, "linear3": 0, "linear4": 1} x = torch.randn(2, 3) expected = model(x) with TemporaryDirectory() as tmp_dir: checkpoint = os.path.join(tmp_dir, "pt_model.bin") torch.save(model.state_dict(), checkpoint) new_model = ModelWithUnusedSubModulesForTest() new_model = load_checkpoint_and_dispatch( new_model, checkpoint, device_map=device_map, preload_module_classes=["ModuleWithUnusedSubModules"] ) # CPU-offloaded weights are on the meta device while waiting for the forward pass. assert new_model.linear1.linear.weight.device == torch.device("meta") assert new_model.linear2.linear.weight.device == torch.device("meta") assert new_model.linear3.linear.weight.device == torch.device(0) assert new_model.linear4.linear.weight.device == torch.device(1) output = new_model(x) assert torch.allclose(expected, output.cpu(), atol=1e-5) @require_cuda def test_cpu_offload_with_hook(self): model1 = torch.nn.Linear(4, 5) model1, hook1 = cpu_offload_with_hook(model1) assert model1.weight.device == torch.device("cpu") inputs = torch.randn(3, 4) outputs = model1(inputs) assert outputs.device == torch.device(0) assert model1.weight.device == torch.device(0) hook1.offload() assert model1.weight.device == torch.device("cpu") model2 = torch.nn.Linear(5, 5) model2, hook2 = cpu_offload_with_hook(model2, prev_module_hook=hook1) assert model2.weight.device == torch.device("cpu") outputs = model1(inputs) assert outputs.device == torch.device(0) assert model1.weight.device == torch.device(0) outputs = model2(outputs) assert outputs.device == torch.device(0) assert model1.weight.device == torch.device("cpu") assert model2.weight.device == torch.device(0) hook2.offload() assert model2.weight.device == torch.device("cpu") @require_non_torch_xla @slow @require_bnb @require_multi_gpu def test_dispatch_model_bnb(self): """Tests that `dispatch_model` quantizes int8 layers""" from huggingface_hub import hf_hub_download from transformers import AutoConfig, AutoModel, BitsAndBytesConfig from transformers.utils.bitsandbytes import replace_with_bnb_linear with init_empty_weights(): model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m")) quantization_config = BitsAndBytesConfig(load_in_8bit=True) model = replace_with_bnb_linear( model, modules_to_not_convert=["lm_head"], quantization_config=quantization_config ) model_path = hf_hub_download("bigscience/bloom-560m", "pytorch_model.bin") model = load_checkpoint_and_dispatch( model, checkpoint=model_path, device_map="balanced", ) assert model.h[0].self_attention.query_key_value.weight.dtype == torch.int8 assert model.h[0].self_attention.query_key_value.weight.device.index == 0 assert model.h[(-1)].self_attention.query_key_value.weight.dtype == torch.int8 assert model.h[(-1)].self_attention.query_key_value.weight.device.index == 1 @require_non_torch_xla @slow @require_bnb def test_dispatch_model_int8_simple(self): """Tests that `dispatch_model` quantizes int8 layers""" from huggingface_hub import hf_hub_download from transformers import AutoConfig, AutoModel, BitsAndBytesConfig from transformers.utils.bitsandbytes import replace_with_bnb_linear with init_empty_weights(): model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m")) quantization_config = BitsAndBytesConfig(load_in_8bit=True) model = replace_with_bnb_linear( model, modules_to_not_convert=["lm_head"], quantization_config=quantization_config ) model_path = hf_hub_download("bigscience/bloom-560m", "pytorch_model.bin") # test with auto model = load_checkpoint_and_dispatch( model, checkpoint=model_path, device_map="auto", ) assert model.h[0].self_attention.query_key_value.weight.dtype == torch.int8 assert model.h[0].self_attention.query_key_value.weight.device.index == 0 with init_empty_weights(): model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m")) model = replace_with_bnb_linear( model, modules_to_not_convert=["lm_head"], quantization_config=quantization_config ) # test with str device map model = load_checkpoint_and_dispatch( model, checkpoint=model_path, device_map={"": torch.device("cuda:0")}, ) assert model.h[0].self_attention.query_key_value.weight.dtype == torch.int8 assert model.h[0].self_attention.query_key_value.weight.device.index == 0 with init_empty_weights(): model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m")) model = replace_with_bnb_linear( model, modules_to_not_convert=["lm_head"], quantization_config=quantization_config ) # test with torch.device device map model = load_checkpoint_and_dispatch( model, checkpoint=model_path, device_map={"": "cuda:0"}, ) assert model.h[0].self_attention.query_key_value.weight.dtype == torch.int8 assert model.h[0].self_attention.query_key_value.weight.device.index == 0 @require_non_torch_xla @slow @require_bnb def test_dipatch_model_fp4_simple(self): """Tests that `dispatch_model` quantizes fp4 layers""" from huggingface_hub import hf_hub_download from transformers import AutoConfig, AutoModel, BitsAndBytesConfig from transformers.utils.bitsandbytes import replace_with_bnb_linear with init_empty_weights(): model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m")) quantization_config = BitsAndBytesConfig(load_in_4bit=True) model = replace_with_bnb_linear( model, modules_to_not_convert=["lm_head"], quantization_config=quantization_config ) model_path = hf_hub_download("bigscience/bloom-560m", "pytorch_model.bin") # test with auto model = load_checkpoint_and_dispatch( model, checkpoint=model_path, device_map="auto", ) assert model.h[0].self_attention.query_key_value.weight.dtype == torch.uint8 assert model.h[0].self_attention.query_key_value.weight.device.index == 0 with init_empty_weights(): model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m")) model = replace_with_bnb_linear( model, modules_to_not_convert=["lm_head"], quantization_config=quantization_config ) # test with str device map model = load_checkpoint_and_dispatch( model, checkpoint=model_path, device_map={"": torch.device("cuda:0")}, ) assert model.h[0].self_attention.query_key_value.weight.dtype == torch.uint8 assert model.h[0].self_attention.query_key_value.weight.device.index == 0 with init_empty_weights(): model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m")) model = replace_with_bnb_linear( model, modules_to_not_convert=["lm_head"], quantization_config=quantization_config ) # test with torch.device device map model = load_checkpoint_and_dispatch( model, checkpoint=model_path, device_map={"": "cuda:0"}, ) assert model.h[0].self_attention.query_key_value.weight.dtype == torch.uint8 assert model.h[0].self_attention.query_key_value.weight.device.index == 0
accelerate/tests/test_big_modeling.py/0
{ "file_path": "accelerate/tests/test_big_modeling.py", "repo_id": "accelerate", "token_count": 19558 }
7
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import tempfile import unittest import warnings from collections import OrderedDict import torch import torch.nn as nn from safetensors.torch import save_file from accelerate import init_empty_weights from accelerate.test_utils import require_cuda, require_huggingface_suite, require_multi_gpu from accelerate.utils.modeling import ( check_device_map, clean_device_map, compute_module_sizes, compute_module_total_buffer_size, convert_file_size_to_int, find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, ) class ModelForTest(nn.Module): def __init__(self): super().__init__() self.linear1 = nn.Linear(3, 4) self.batchnorm = nn.BatchNorm1d(4) self.linear2 = nn.Linear(4, 5) def forward(self, x): return self.linear2(self.batchnorm(self.linear1(x))) class LinearWithNonPersistentBuffers(nn.Module): def __init__(self, in_features: int, out_features: int, bias: bool = True, device=None, dtype=None) -> None: factory_kwargs = {"device": device, "dtype": dtype} super().__init__() self.in_features = in_features self.out_features = out_features self.register_buffer("weight", torch.empty((out_features, in_features), **factory_kwargs)) if bias: self.register_buffer("bias", torch.empty(out_features, **factory_kwargs), persistent=False) else: self.register_buffer("bias", None) def forward(self, input: torch.Tensor) -> torch.Tensor: return torch.nn.functional.linear(input, self.weight, self.bias) class ModelSeveralDtypes(nn.Module): def __init__(self): super().__init__() self.register_buffer("int_param", torch.randint(high=10, size=(15, 30))) self.register_parameter("float_param", torch.nn.Parameter(torch.rand(10, 5))) def forward(self, x): return x + 2 def sequential_model(num_layers): layers = OrderedDict([(f"linear{i}", nn.Linear(1000, 1000)) for i in range(1, num_layers + 1)]) return nn.Sequential(layers) class ModelingUtilsTester(unittest.TestCase): def check_set_module_tensor_for_device(self, model, device1, device2): assert model.linear1.weight.device == torch.device(device1) with self.subTest("Access by submodule and direct name for a parameter"): set_module_tensor_to_device(model.linear1, "weight", device2) assert model.linear1.weight.device == torch.device(device2) if torch.device(device2) == torch.device("meta"): with self.assertRaises(ValueError): # We need a `value` to set the weight back on device1 set_module_tensor_to_device(model.linear1, "weight", device1) set_module_tensor_to_device(model.linear1, "weight", device1, value=torch.randn(4, 3)) else: set_module_tensor_to_device(model.linear1, "weight", device1) assert model.linear1.weight.device == torch.device(device1) with self.subTest("Access by module and full name for a parameter"): set_module_tensor_to_device(model, "linear1.weight", device2) assert model.linear1.weight.device == torch.device(device2) if torch.device(device2) == torch.device("meta"): with self.assertRaises(ValueError): # We need a `value` to set the weight back on device1 set_module_tensor_to_device(model, "linear1.weight", device1) set_module_tensor_to_device(model, "linear1.weight", device1, value=torch.randn(4, 3)) else: set_module_tensor_to_device(model, "linear1.weight", device1) assert model.linear1.weight.device == torch.device(device1) assert model.batchnorm.running_mean.device == torch.device(device1) with self.subTest("Access by submodule and direct name for a buffer"): set_module_tensor_to_device(model.batchnorm, "running_mean", device2) assert model.batchnorm.running_mean.device == torch.device(device2) if torch.device(device2) == torch.device("meta"): with self.assertRaises(ValueError): # We need a `value` to set the weight back on device1 set_module_tensor_to_device(model.batchnorm, "running_mean", device1) set_module_tensor_to_device(model.batchnorm, "running_mean", device1, value=torch.randn(4)) else: set_module_tensor_to_device(model.batchnorm, "running_mean", device1) assert model.batchnorm.running_mean.device == torch.device(device1) with self.subTest("Access by module and full name for a parameter"): set_module_tensor_to_device(model, "batchnorm.running_mean", device2) assert model.batchnorm.running_mean.device == torch.device(device2) if torch.device(device2) == torch.device("meta"): with self.assertRaises(ValueError): # We need a `value` to set the weight back on CPU set_module_tensor_to_device(model, "batchnorm.running_mean", device1) set_module_tensor_to_device(model, "batchnorm.running_mean", device1, value=torch.randn(4)) else: set_module_tensor_to_device(model, "batchnorm.running_mean", device1) assert model.batchnorm.running_mean.device == torch.device(device1) def test_set_module_tensor_to_meta_and_cpu(self): model = ModelForTest() self.check_set_module_tensor_for_device(model, "cpu", "meta") @require_cuda def test_set_module_tensor_to_cpu_and_gpu(self): model = ModelForTest() self.check_set_module_tensor_for_device(model, "cpu", 0) @require_cuda def test_set_module_tensor_to_meta_and_gpu(self): model = ModelForTest().to(0) self.check_set_module_tensor_for_device(model, 0, "meta") @require_multi_gpu def test_set_module_tensor_between_gpus(self): model = ModelForTest().to(0) self.check_set_module_tensor_for_device(model, 0, 1) def test_set_module_tensor_sets_dtype(self): model = ModelForTest() set_module_tensor_to_device(model, "linear1.weight", "cpu", value=model.linear1.weight, dtype=torch.float16) assert model.linear1.weight.dtype == torch.float16 def test_set_module_tensor_checks_shape(self): model = ModelForTest() tensor = torch.zeros((2, 2)) with self.assertRaises(ValueError) as cm: set_module_tensor_to_device(model, "linear1.weight", "cpu", value=tensor) assert ( str(cm.exception) == 'Trying to set a tensor of shape torch.Size([2, 2]) in "weight" (which has shape torch.Size([4, 3])), this look incorrect.' ) def test_named_tensors(self): model = nn.BatchNorm1d(4) named_tensors = named_module_tensors(model) assert [name for name, _ in named_tensors] == [ "weight", "bias", "running_mean", "running_var", "num_batches_tracked", ] named_tensors = named_module_tensors(model, include_buffers=False) assert [name for name, _ in named_tensors] == ["weight", "bias"] model = ModelForTest() named_tensors = named_module_tensors(model) assert [name for name, _ in named_tensors] == [] named_tensors = named_module_tensors(model, recurse=True) assert [name for name, _ in named_tensors] == [ "linear1.weight", "linear1.bias", "batchnorm.weight", "batchnorm.bias", "linear2.weight", "linear2.bias", "batchnorm.running_mean", "batchnorm.running_var", "batchnorm.num_batches_tracked", ] named_tensors = named_module_tensors(model, include_buffers=False, recurse=True) assert [name for name, _ in named_tensors] == [ "linear1.weight", "linear1.bias", "batchnorm.weight", "batchnorm.bias", "linear2.weight", "linear2.bias", ] model = LinearWithNonPersistentBuffers(10, 10) named_tensors = named_module_tensors(model, include_buffers=True, remove_non_persistent=False) assert [name for name, _ in named_tensors] == ["weight", "bias"] named_tensors = named_module_tensors(model, include_buffers=True, remove_non_persistent=True) assert [name for name, _ in named_tensors] == ["weight"] def test_find_tied_parameters(self): model = sequential_model(4) assert find_tied_parameters(model) == [] model.linear2.weight = model.linear1.weight assert find_tied_parameters(model) == [["linear1.weight", "linear2.weight"]] model.linear4.weight = model.linear1.weight assert find_tied_parameters(model) == [["linear1.weight", "linear2.weight", "linear4.weight"]] model = sequential_model(5) model.linear1.weight = model.linear4.weight model.linear2.weight = model.linear3.weight model.linear5.weight = model.linear2.weight tied_params = sorted(find_tied_parameters(model), key=lambda x: len(x)) assert tied_params == [ ["linear1.weight", "linear4.weight"], ["linear2.weight", "linear3.weight", "linear5.weight"], ] model = nn.Sequential(OrderedDict([("block1", sequential_model(4)), ("block2", sequential_model(4))])) model.block1.linear1.weight = model.block2.linear1.weight assert find_tied_parameters(model) == [["block1.linear1.weight", "block2.linear1.weight"]] def test_retie_parameters(self): model = sequential_model(2) retie_parameters(model, [["linear1.weight", "linear2.weight"]]) assert model.linear1.weight is model.linear2.weight model = sequential_model(3) retie_parameters(model, [["linear1.weight", "linear2.weight", "linear3.weight"]]) assert model.linear1.weight is model.linear2.weight assert model.linear1.weight is model.linear3.weight model = sequential_model(5) retie_parameters( model, [["linear1.weight", "linear4.weight"], ["linear2.weight", "linear3.weight", "linear5.weight"]] ) assert model.linear1.weight is model.linear4.weight assert model.linear2.weight is model.linear3.weight assert model.linear2.weight is model.linear5.weight model = nn.Sequential(OrderedDict([("block1", sequential_model(4)), ("block2", sequential_model(4))])) retie_parameters(model, [["block1.linear1.weight", "block2.linear1.weight"]]) assert model.block1.linear1.weight is model.block2.linear1.weight def test_compute_module_sizes(self): model = ModelForTest() expected_sizes = {"": 236, "linear1": 64, "linear1.weight": 48, "linear1.bias": 16} expected_sizes.update({"linear2": 100, "linear2.weight": 80, "linear2.bias": 20}) expected_sizes.update({"batchnorm": 72, "batchnorm.weight": 16, "batchnorm.bias": 16}) expected_sizes.update( {"batchnorm.running_mean": 16, "batchnorm.running_var": 16, "batchnorm.num_batches_tracked": 8} ) module_sizes = compute_module_sizes(model) assert module_sizes == expected_sizes model.half() expected_sizes = {k: s // 2 for k, s in expected_sizes.items()} # This one is not converted to half. expected_sizes["batchnorm.num_batches_tracked"] = 8 # This impacts batchnorm and total expected_sizes["batchnorm"] += 4 expected_sizes[""] += 4 module_sizes = compute_module_sizes(model) assert module_sizes == expected_sizes def test_compute_module_total_buffer_size(self): model = ModelForTest() model.linear1.register_buffer("test_buffer", torch.zeros(10, 10)) model.register_buffer("test_buffer2", torch.zeros(20, 10)) buffer_size = compute_module_total_buffer_size(model) assert buffer_size == 1240 model.half() buffer_size = compute_module_total_buffer_size(model) assert buffer_size == 624 def test_check_device_map(self): model = ModelForTest() check_device_map(model, {"": 0}) with self.assertRaises(ValueError): check_device_map(model, {"linear1": 0, "linear2": 1}) check_device_map(model, {"linear1": 0, "linear2": 1, "batchnorm": 1}) def shard_test_model(self, model, tmp_dir): module_index = { "linear1": "checkpoint_part1.bin", "batchnorm": "checkpoint_part2.bin", "linear2": "checkpoint_part3.bin", } index = {} for name, _ in model.state_dict().items(): module = name.split(".")[0] index[name] = module_index[module] with open(os.path.join(tmp_dir, "weight_map.index.json"), "w") as f: json.dump(index, f) for module, fname in module_index.items(): state_dict = {k: v for k, v in model.state_dict().items() if k.startswith(module)} full_fname = os.path.join(tmp_dir, fname) torch.save(state_dict, full_fname) def test_load_checkpoint_in_model(self): # Check with whole checkpoint model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: fname = os.path.join(tmp_dir, "pt_model.bin") torch.save(model.state_dict(), fname) load_checkpoint_in_model(model, fname) # Check with sharded index model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: self.shard_test_model(model, tmp_dir) index_file = os.path.join(tmp_dir, "weight_map.index.json") load_checkpoint_in_model(model, index_file) # Check with sharded checkpoint model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: self.shard_test_model(model, tmp_dir) load_checkpoint_in_model(model, tmp_dir) @require_cuda def test_load_checkpoint_in_model_one_gpu(self): device_map = {"linear1": 0, "batchnorm": "cpu", "linear2": "cpu"} # Check with whole checkpoint model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: fname = os.path.join(tmp_dir, "pt_model.bin") torch.save(model.state_dict(), fname) load_checkpoint_in_model(model, fname, device_map=device_map) assert model.linear1.weight.device == torch.device(0) assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == torch.device("cpu") # Check with sharded index model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: self.shard_test_model(model, tmp_dir) index_file = os.path.join(tmp_dir, "weight_map.index.json") load_checkpoint_in_model(model, index_file, device_map=device_map) assert model.linear1.weight.device == torch.device(0) assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == torch.device("cpu") # Check with sharded checkpoint folder model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: self.shard_test_model(model, tmp_dir) load_checkpoint_in_model(model, tmp_dir, device_map=device_map) assert model.linear1.weight.device == torch.device(0) assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == torch.device("cpu") @require_cuda def test_load_checkpoint_in_model_disk_offload(self): device_map = {"linear1": "cpu", "batchnorm": "disk", "linear2": "cpu"} model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: fname = os.path.join(tmp_dir, "pt_model.bin") torch.save(model.state_dict(), fname) load_checkpoint_in_model(model, fname, device_map=device_map, offload_folder=tmp_dir) assert model.linear1.weight.device == torch.device("cpu") assert model.batchnorm.weight.device == torch.device("meta") # Buffers are not offloaded by default assert model.batchnorm.running_mean.device == torch.device("cpu") assert model.linear2.weight.device == torch.device("cpu") model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: fname = os.path.join(tmp_dir, "pt_model.bin") torch.save(model.state_dict(), fname) load_checkpoint_in_model(model, fname, device_map=device_map, offload_folder=tmp_dir, offload_buffers=True) assert model.linear1.weight.device == torch.device("cpu") assert model.batchnorm.weight.device == torch.device("meta") assert model.batchnorm.running_mean.device == torch.device("meta") assert model.linear2.weight.device == torch.device("cpu") @require_multi_gpu def test_load_checkpoint_in_model_two_gpu(self): device_map = {"linear1": 0, "batchnorm": "cpu", "linear2": 1} # Check with whole checkpoint model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: fname = os.path.join(tmp_dir, "pt_model.bin") torch.save(model.state_dict(), fname) load_checkpoint_in_model(model, fname, device_map=device_map) assert model.linear1.weight.device == torch.device(0) assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == torch.device(1) # Check with sharded index model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: self.shard_test_model(model, tmp_dir) index_file = os.path.join(tmp_dir, "weight_map.index.json") load_checkpoint_in_model(model, index_file, device_map=device_map) assert model.linear1.weight.device == torch.device(0) assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == torch.device(1) # Check with sharded checkpoint model = ModelForTest() with tempfile.TemporaryDirectory() as tmp_dir: self.shard_test_model(model, tmp_dir) load_checkpoint_in_model(model, tmp_dir, device_map=device_map) assert model.linear1.weight.device == torch.device(0) assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == torch.device(1) def test_load_checkpoint_in_model_dtype(self): with tempfile.NamedTemporaryFile(suffix=".pt") as tmpfile: model = ModelSeveralDtypes() torch.save(model.state_dict(), tmpfile.name) new_model = ModelSeveralDtypes() load_checkpoint_in_model( new_model, tmpfile.name, offload_state_dict=True, dtype=torch.float16, device_map={"": "cpu"} ) assert new_model.int_param.dtype == torch.int64 assert new_model.float_param.dtype == torch.float16 def test_clean_device_map(self): # Regroup everything if all is on the same device assert clean_device_map({"a": 0, "b": 0, "c": 0}) == {"": 0} # Regroups children of level 1 on the same device assert clean_device_map({"a.x": 0, "a.y": 0, "b.x": 1, "b.y": 1, "c": 1}) == {"a": 0, "b": 1, "c": 1} # Regroups children of level 2 on the same device assert clean_device_map({"a.x": 0, "a.y": 0, "b.x.0": 1, "b.x.1": 1, "b.y.0": 2, "b.y.1": 2, "c": 2}) == { "a": 0, "b.x": 1, "b.y": 2, "c": 2, } def test_infer_auto_device_map(self): model = ModelForTest() # model has size 236: linear1 64, batchnorm 72, linear2 100 device_map = infer_auto_device_map(model, max_memory={0: 200, 1: 200}) # only linear1 fits on device 0 as we keep memory available for the maximum layer in case of offload assert device_map == {"linear1": 0, "batchnorm": 1, "linear2": 1} device_map = infer_auto_device_map(model, max_memory={0: 200, 1: 172, 2: 200}) # On device 1, we don't care about keeping size available for the max layer, so even if there is just the # size available for batchnorm + linear2, they fit here. assert device_map == {"linear1": 0, "batchnorm": 1, "linear2": 1} model.linear1.weight = model.linear2.weight device_map = infer_auto_device_map(model, max_memory={0: 200, 1: 200}) # By tying weights, the whole model fits on device 0 assert device_map == {"": 0} # When splitting a bigger model, the split is done at the layer level model = nn.Sequential(ModelForTest(), ModelForTest(), ModelForTest()) device_map = infer_auto_device_map(model, max_memory={0: 500, 1: 500}) assert device_map == {"0": 0, "1.linear1": 0, "1.batchnorm": 0, "1.linear2": 1, "2": 1} # With no_split_module_classes, it's done at that module level model = nn.Sequential(ModelForTest(), ModelForTest(), ModelForTest()) device_map = infer_auto_device_map( model, max_memory={0: 500, 1: 500}, no_split_module_classes=["ModelForTest"] ) assert device_map == {"0": 0, "1": 1, "2": 1} def test_infer_auto_device_map_with_tied_weights(self): model = nn.Sequential( OrderedDict([("layer1", ModelForTest()), ("layer2", ModelForTest()), ("layer3", ModelForTest())]) ) model.layer3.linear2.weight = model.layer1.linear2.weight device_map = infer_auto_device_map(model, max_memory={0: 400, 1: 500}) expected = {"layer1": 0, "layer3.linear2": 0, "layer2": 1, "layer3.linear1": 1, "layer3.batchnorm": 1} assert device_map == expected # With three weights tied together model.layer2.linear2.weight = model.layer1.linear2.weight device_map = infer_auto_device_map(model, max_memory={0: 400, 1: 500}) expected = { "layer1": 0, "layer2.linear2": 0, "layer3.linear2": 0, "layer2.linear1": 1, "layer2.batchnorm": 1, "layer3.linear1": 1, "layer3.batchnorm": 1, } assert device_map == expected # With two groups of weights tied together model.layer2.linear1.weight = model.layer1.linear1.weight device_map = infer_auto_device_map(model, max_memory={0: 400, 1: 500}) expected = { "layer1": 0, "layer2.linear1": 0, "layer2.linear2": 0, "layer3.linear2": 0, "layer2.batchnorm": 1, "layer3.linear1": 1, "layer3.batchnorm": 1, } assert device_map == expected # With weights ties in the same module model = nn.Sequential( OrderedDict( [ ("linear1", nn.Linear(4, 4)), ("linear2", nn.Linear(6, 6)), ("linear3", nn.Linear(4, 4)), ("linear4", nn.Linear(6, 6)), ] ) ) model.linear3.weight = model.linear1.weight model.linear3.bias = model.linear1.bias device_map = infer_auto_device_map(model, max_memory={0: 250, 1: 400}) expected = {"linear1": 0, "linear2": 1, "linear3": 0, "linear4": 1} assert device_map == expected # With tied weights sharing a same prefix name (`compute.weight` vs `compute.weight_submodule.parameter`) class SubModule(torch.nn.Module): def __init__(self, ref_to_parameter): super().__init__() self.parameter = ref_to_parameter def forward(self, x): return self.x + torch.max(self.parameter) class LinearModuleAndSubModule(torch.nn.Linear): def __init__(self, in_features, out_features): super().__init__(in_features, out_features) self.weight_submodule = SubModule(self.weight) def forward(self, x): return torch.nn.functional.linear(self.weight_submodule(x), self.weight) class Model(torch.nn.Module): def __init__(self): super().__init__() self.compute = LinearModuleAndSubModule(3, 8) def forward(self, x): return self.compute(x) model = Model() device_memory = {0: 4, "cpu": 96000} # Low memory device, just to force splitting and trigger the error infer_auto_device_map(model, device_memory) @require_huggingface_suite def test_infer_auto_device_map_on_t0pp(self): from transformers import AutoConfig, AutoModelForSeq2SeqLM config = AutoConfig.from_pretrained("bigscience/T0pp") with init_empty_weights(): model = AutoModelForSeq2SeqLM.from_config(config) model.tie_weights() special_dtypes = {n: torch.float32 for n, _ in model.named_parameters() if "wo" in n} max_memory = {0: 10**10, 1: 10**10, "cpu": 10**10} device_map = infer_auto_device_map( model, no_split_module_classes=["T5Block"], dtype=torch.float16, max_memory=max_memory, special_dtypes=special_dtypes, ) # The 3 tied weights should all be on device 0 assert device_map["shared"] == 0 assert device_map["encoder.embed_tokens"] == 0 assert device_map["decoder.embed_tokens"] == 0 def test_infer_auto_device_map_with_buffer_check(self): model = ModelForTest() model.linear1.register_buffer("test_buffer1", torch.zeros(10, 2)) model.batchnorm.register_buffer("test_buffer2", torch.zeros(10, 3)) model.linear2.register_buffer("test_buffer3", torch.zeros(10, 3)) # model has size 236(parameters) + 360(buffers): linear1 64 + 80, batchnorm 72 + 160, linear2 100 + 120 # Only linear1 (144) fits on device 0, and remaining buffers (batchnorm's 160 + linear2's 120 = 280) won't fit # device 0, because they will also be loaded to device 0 all at once when inferencing without offload_buffers # Should print a warning as intended in such case with self.assertWarns(Warning): device_map = infer_auto_device_map(model, max_memory={0: 400, "cpu": "1GB"}) assert device_map == {"linear1": 0, "batchnorm": "cpu", "linear2": "cpu"} # Only linear1 (144) fits on device 0, and remaining buffers (batchnorm's 160 + linear2's 120 = 280) won't fit # device 0, but with offload_buffers they won't be loaded to device 0 all at once, so it's ok now # Should NOT print a warning in such case with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") device_map = infer_auto_device_map(model, max_memory={0: 400, "cpu": "1GB"}, offload_buffers=True) assert len(w) == 0 assert device_map == {"linear1": 0, "batchnorm": "cpu", "linear2": "cpu"} def test_infer_auto_device_map_with_buffer_check_and_multi_devices(self): model = ModelForTest() model.linear1.register_buffer("test_buffer1", torch.zeros(10, 2)) model.batchnorm.register_buffer("test_buffer2", torch.zeros(10, 3)) model.linear2.register_buffer("test_buffer3", torch.zeros(10, 3)) model.linear3 = nn.Linear(4, 5) model.linear3.register_buffer("test_buffer4", torch.zeros(10, 2)) # model has size 336(parameters) + 440(buffers): linear1 64 + 80, batchnorm 72 + 160, linear2 100 + 120, # linear3 100 + 80 # Now we have two devices, linear1 will fit on device 0, batchnorm will fit on device 1, and the second device # can hold all remaining buffers # Should NOT print a warning in such case with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") device_map = infer_auto_device_map(model, max_memory={0: 400, 1: 400, "cpu": "1GB"}) assert len(w) == 0 assert device_map == {"linear1": 0, "batchnorm": 1, "linear2": "cpu", "linear3": "cpu"} # Now we have two devices, but neither the first nor the second device can hold all remaining buffers # Should print a warning as intended in such case with self.assertWarns(Warning): device_map = infer_auto_device_map(model, max_memory={0: 400, 1: 200, "cpu": "1GB"}) assert device_map == {"linear1": 0, "batchnorm": 1, "linear2": "cpu", "linear3": "cpu"} # Now we have two devices, neither can hold all the buffers, but we are using the offload_buffers=True # Should NOT print a warning in such case with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") device_map = infer_auto_device_map(model, max_memory={0: 400, 1: 200, "cpu": "1GB"}, offload_buffers=True) assert len(w) == 0 assert device_map == {"linear1": 0, "batchnorm": 1, "linear2": "cpu", "linear3": "cpu"} @require_cuda def test_get_balanced_memory(self): model = ModelForTest() # model has size 236: linear1 64, batchnorm 72, linear2 100 max_memory = get_balanced_memory(model, max_memory={0: 200, 1: 200}) assert {0: 200, 1: 200} == max_memory # We should be able to set models on a non-contiguous sub-set of max_memory = get_balanced_memory(model, max_memory={0: 200, 2: 200}) assert {0: 200, 2: 200} == max_memory max_memory = get_balanced_memory(model, max_memory={0: 300, 1: 300}) assert {0: 215, 1: 300} == max_memory # Last device always get max memory to give more buffer and avoid accidental CPU offload max_memory = get_balanced_memory(model, max_memory={0: 300, 1: 500}) assert {0: 215, 1: 500} == max_memory # Last device always get max memory to give more buffer, even if CPU is provided max_memory = get_balanced_memory(model, max_memory={0: 300, "cpu": 1000}) assert {0: 300, "cpu": 1000} == max_memory # If we set a device to 0, it's not counted. max_memory = get_balanced_memory(model, max_memory={0: 0, 1: 300, 2: 300}) assert {0: 0, 1: 215, 2: 300} == max_memory # If we set a device to 0, it's not counted. max_memory = get_balanced_memory(model, max_memory={0: 0, "cpu": 100}) assert {0: 0, "cpu": 100} == max_memory @require_cuda def test_load_state_dict(self): state_dict = {k: torch.randn(4, 5) for k in ["a", "b", "c"]} device_maps = [{"a": "cpu", "b": 0, "c": "disk"}, {"a": 0, "b": 0, "c": "disk"}, {"a": 0, "b": 0, "c": 0}] for device_map in device_maps: with tempfile.TemporaryDirectory() as tmp_dir: checkpoint_file = os.path.join(tmp_dir, "model.safetensors") save_file(state_dict, checkpoint_file, metadata={"format": "pt"}) loaded_state_dict = load_state_dict(checkpoint_file, device_map=device_map) for param, device in device_map.items(): device = device if device != "disk" else "cpu" assert loaded_state_dict[param].device == torch.device(device) def test_convert_file_size(self): result = convert_file_size_to_int("0MB") assert result == 0 result = convert_file_size_to_int("100MB") assert result == (100 * (10**6)) result = convert_file_size_to_int("2GiB") assert result == (2 * (2**30)) result = convert_file_size_to_int("512KiB") assert result == (512 * (2**10)) result = convert_file_size_to_int("1.5GB") assert result == (1.5 * (10**9)) result = convert_file_size_to_int("100KB") assert result == (100 * (10**3)) result = convert_file_size_to_int(500) assert result == 500 with self.assertRaises(ValueError): convert_file_size_to_int("5MBB") with self.assertRaises(ValueError): convert_file_size_to_int("5k0MB") with self.assertRaises(ValueError): convert_file_size_to_int("-1GB")
accelerate/tests/test_modeling_utils.py/0
{ "file_path": "accelerate/tests/test_modeling_utils.py", "repo_id": "accelerate", "token_count": 14449 }
8
# Copyright 2022 The HuggingFace Team, the AllenNLP library authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Script to close stale issue. Taken in part from the AllenNLP repository. https://github.com/allenai/allennlp. """ import os from datetime import datetime as dt from datetime import timezone from github import Github LABELS_TO_EXEMPT = [ "good first issue", "feature request", "wip", ] def main(): g = Github(os.environ["GITHUB_TOKEN"]) repo = g.get_repo("huggingface/accelerate") open_issues = repo.get_issues(state="open") for issue in open_issues: comments = sorted([comment for comment in issue.get_comments()], key=lambda i: i.created_at, reverse=True) last_comment = comments[0] if len(comments) > 0 else None current_time = dt.now(timezone.utc) days_since_updated = (current_time - issue.updated_at).days days_since_creation = (current_time - issue.created_at).days if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and days_since_updated > 7 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels()) ): # Close issue since it has been 7 days of inactivity since bot mention. issue.edit(state="closed") elif ( days_since_updated > 23 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels()) ): # Add stale comment issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) if __name__ == "__main__": main()
accelerate/utils/stale.py/0
{ "file_path": "accelerate/utils/stale.py", "repo_id": "accelerate", "token_count": 1013 }
9
# Model arguments model_name_or_path: BramVanroy/gpt2-cpt-dutch model_revision: main torch_dtype: bfloat16 # Data training arguments chat_template: "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}" dataset_mixer: BramVanroy/ultrachat_200k_dutch: 1.0 dataset_splits: - train_sft - test_sft preprocessing_num_workers: 12 # SFT trainer config bf16: true do_eval: true evaluation_strategy: epoch gradient_accumulation_steps: 1 gradient_checkpointing: true gradient_checkpointing_kwargs: use_reentrant: False hub_model_id: gpt2-sft-dutch hub_strategy: every_save learning_rate: 2.0e-05 log_level: info logging_steps: 5 logging_strategy: steps lr_scheduler_type: cosine max_seq_length: 1024 max_steps: -1 num_train_epochs: 1 output_dir: data/gpt2-sft-dutch overwrite_output_dir: true per_device_eval_batch_size: 8 per_device_train_batch_size: 8 push_to_hub: true remove_unused_columns: true report_to: - wandb save_strategy: "steps" save_steps: 100 save_total_limit: 1 seed: 42 warmup_ratio: 0.1
alignment-handbook/recipes/gpt2-nl/sft/config_full.yaml/0
{ "file_path": "alignment-handbook/recipes/gpt2-nl/sft/config_full.yaml", "repo_id": "alignment-handbook", "token_count": 553 }
10
# Model arguments model_name_or_path: google/gemma-7b model_revision: main tokenizer_name_or_path: philschmid/gemma-tokenizer-chatml # Custom tokenizer with <|im_start|> and <|im_end|> tokens torch_dtype: bfloat16 use_flash_attention_2: true # Data training arguments dataset_mixer: HuggingFaceH4/deita-10k-v0-sft: 1.0 dataset_splits: - train_sft - test_sft preprocessing_num_workers: 12 # SFT trainer config bf16: true dataset_kwargs: add_special_tokens: false # We already wrap <bos> and <eos> in the chat template append_concat_token: false # No need to add <eos> across samples do_eval: true evaluation_strategy: epoch gradient_accumulation_steps: 4 gradient_checkpointing: true gradient_checkpointing_kwargs: use_reentrant: false hub_model_id: zephyr-7b-gemma-sft hub_strategy: every_save learning_rate: 2.0e-05 log_level: info logging_steps: 5 logging_strategy: steps lr_scheduler_type: cosine max_seq_length: 2048 max_steps: -1 num_train_epochs: 3 output_dir: data/zephyr-7b-gemma-sft overwrite_output_dir: true per_device_eval_batch_size: 4 per_device_train_batch_size: 4 push_to_hub: true remove_unused_columns: true report_to: - tensorboard - wandb save_strategy: "no" seed: 42 warmup_ratio: 0.1
alignment-handbook/recipes/zephyr-7b-gemma/sft/config_full.yaml/0
{ "file_path": "alignment-handbook/recipes/zephyr-7b-gemma/sft/config_full.yaml", "repo_id": "alignment-handbook", "token_count": 480 }
11
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from alignment import DataArguments, H4ArgumentParser, ModelArguments, SFTConfig class H4ArgumentParserTest(unittest.TestCase): def setUp(self): self.parser = H4ArgumentParser((ModelArguments, DataArguments, SFTConfig)) self.yaml_file_path = "tests/fixtures/config_sft_full.yaml" def test_load_yaml(self): model_args, data_args, training_args = self.parser.parse_yaml_file(os.path.abspath(self.yaml_file_path)) self.assertEqual(model_args.model_name_or_path, "mistralai/Mistral-7B-v0.1") def test_load_yaml_and_args(self): command_line_args = [ "--model_name_or_path=test", "--use_peft=true", "--lora_r=16", "--lora_dropout=0.5", ] model_args, data_args, training_args = self.parser.parse_yaml_and_args( os.path.abspath(self.yaml_file_path), command_line_args ) self.assertEqual(model_args.model_name_or_path, "test") self.assertEqual(model_args.use_peft, True) self.assertEqual(model_args.lora_r, 16) self.assertEqual(model_args.lora_dropout, 0.5)
alignment-handbook/tests/test_configs.py/0
{ "file_path": "alignment-handbook/tests/test_configs.py", "repo_id": "alignment-handbook", "token_count": 697 }
12
# Hello world! We will now create the hello world of the ML world, building a model capable of solving MNIST dataset. Open `src/main.rs` and fill in this content: ```rust # extern crate candle_core; use candle_core::{Device, Result, Tensor}; struct Model { first: Tensor, second: Tensor, } impl Model { fn forward(&self, image: &Tensor) -> Result<Tensor> { let x = image.matmul(&self.first)?; let x = x.relu()?; x.matmul(&self.second) } } fn main() -> Result<()> { // Use Device::new_cuda(0)?; to use the GPU. let device = Device::Cpu; let first = Tensor::randn(0f32, 1.0, (784, 100), &device)?; let second = Tensor::randn(0f32, 1.0, (100, 10), &device)?; let model = Model { first, second }; let dummy_image = Tensor::randn(0f32, 1.0, (1, 784), &device)?; let digit = model.forward(&dummy_image)?; println!("Digit {digit:?} digit"); Ok(()) } ``` Everything should now run with: ```bash cargo run --release ``` ## Using a `Linear` layer. Now that we have this, we might want to complexify things a bit, for instance by adding `bias` and creating the classical `Linear` layer. We can do as such ```rust # extern crate candle_core; # use candle_core::{Device, Result, Tensor}; struct Linear{ weight: Tensor, bias: Tensor, } impl Linear{ fn forward(&self, x: &Tensor) -> Result<Tensor> { let x = x.matmul(&self.weight)?; x.broadcast_add(&self.bias) } } struct Model { first: Linear, second: Linear, } impl Model { fn forward(&self, image: &Tensor) -> Result<Tensor> { let x = self.first.forward(image)?; let x = x.relu()?; self.second.forward(&x) } } ``` This will change the model running code into a new function ```rust # extern crate candle_core; # use candle_core::{Device, Result, Tensor}; # struct Linear{ # weight: Tensor, # bias: Tensor, # } # impl Linear{ # fn forward(&self, x: &Tensor) -> Result<Tensor> { # let x = x.matmul(&self.weight)?; # x.broadcast_add(&self.bias) # } # } # # struct Model { # first: Linear, # second: Linear, # } # # impl Model { # fn forward(&self, image: &Tensor) -> Result<Tensor> { # let x = self.first.forward(image)?; # let x = x.relu()?; # self.second.forward(&x) # } # } fn main() -> Result<()> { // Use Device::new_cuda(0)?; to use the GPU. // Use Device::Cpu; to use the CPU. let device = Device::cuda_if_available(0)?; // Creating a dummy model let weight = Tensor::randn(0f32, 1.0, (784, 100), &device)?; let bias = Tensor::randn(0f32, 1.0, (100, ), &device)?; let first = Linear{weight, bias}; let weight = Tensor::randn(0f32, 1.0, (100, 10), &device)?; let bias = Tensor::randn(0f32, 1.0, (10, ), &device)?; let second = Linear{weight, bias}; let model = Model { first, second }; let dummy_image = Tensor::randn(0f32, 1.0, (1, 784), &device)?; // Inference on the model let digit = model.forward(&dummy_image)?; println!("Digit {digit:?} digit"); Ok(()) } ``` Now it works, it is a great way to create your own layers. But most of the classical layers are already implemented in [candle-nn](https://github.com/huggingface/candle/tree/main/candle-nn). ## Using `candle_nn`. For instance [Linear](https://github.com/huggingface/candle/blob/main/candle-nn/src/linear.rs) is already there. This Linear is coded with PyTorch layout in mind, to reuse better existing models out there, so it uses the transpose of the weights and not the weights directly. So instead we can simplify our example: ```bash cargo add --git https://github.com/huggingface/candle.git candle-nn ``` And rewrite our examples using it ```rust # extern crate candle_core; # extern crate candle_nn; use candle_core::{Device, Result, Tensor}; use candle_nn::{Linear, Module}; struct Model { first: Linear, second: Linear, } impl Model { fn forward(&self, image: &Tensor) -> Result<Tensor> { let x = self.first.forward(image)?; let x = x.relu()?; self.second.forward(&x) } } fn main() -> Result<()> { // Use Device::new_cuda(0)?; to use the GPU. let device = Device::Cpu; // This has changed (784, 100) -> (100, 784) ! let weight = Tensor::randn(0f32, 1.0, (100, 784), &device)?; let bias = Tensor::randn(0f32, 1.0, (100, ), &device)?; let first = Linear::new(weight, Some(bias)); let weight = Tensor::randn(0f32, 1.0, (10, 100), &device)?; let bias = Tensor::randn(0f32, 1.0, (10, ), &device)?; let second = Linear::new(weight, Some(bias)); let model = Model { first, second }; let dummy_image = Tensor::randn(0f32, 1.0, (1, 784), &device)?; let digit = model.forward(&dummy_image)?; println!("Digit {digit:?} digit"); Ok(()) } ``` Feel free to modify this example to use `Conv2d` to create a classical convnet instead. Now that we have the running dummy code we can get to more advanced topics: - [For PyTorch users](../guide/cheatsheet.md) - [Running existing models](../inference/inference.md) - [Training models](../training/training.md)
candle/candle-book/src/guide/hello_world.md/0
{ "file_path": "candle/candle-book/src/guide/hello_world.md", "repo_id": "candle", "token_count": 2069 }
13
# candle Minimalist ML framework for Rust
candle/candle-core/README.md/0
{ "file_path": "candle/candle-core/README.md", "repo_id": "candle", "token_count": 11 }
14
use super::{Cpu, CpuF16}; #[cfg(target_arch = "x86")] use core::arch::x86::*; #[cfg(target_arch = "x86_64")] use core::arch::x86_64::*; use half::f16; pub struct CurrentCpu {} const STEP: usize = 32; const EPR: usize = 8; const ARR: usize = STEP / EPR; impl Cpu<ARR> for CurrentCpu { type Unit = __m256; type Array = [__m256; ARR]; const STEP: usize = STEP; const EPR: usize = EPR; fn n() -> usize { ARR } unsafe fn zero() -> Self::Unit { _mm256_setzero_ps() } unsafe fn zero_array() -> Self::Array { [Self::zero(); ARR] } unsafe fn from_f32(v: f32) -> Self::Unit { _mm256_set1_ps(v) } unsafe fn load(mem_addr: *const f32) -> Self::Unit { _mm256_loadu_ps(mem_addr) } unsafe fn vec_add(a: Self::Unit, b: Self::Unit) -> Self::Unit { _mm256_add_ps(a, b) } unsafe fn vec_fma(a: Self::Unit, b: Self::Unit, c: Self::Unit) -> Self::Unit { _mm256_add_ps(_mm256_mul_ps(b, c), a) } unsafe fn vec_store(mem_addr: *mut f32, a: Self::Unit) { _mm256_storeu_ps(mem_addr, a); } unsafe fn vec_reduce(mut x: Self::Array, y: *mut f32) { for i in 0..ARR / 2 { x[2 * i] = _mm256_add_ps(x[2 * i], x[2 * i + 1]); } for i in 0..ARR / 4 { x[4 * i] = _mm256_add_ps(x[4 * i], x[4 * i + 2]); } #[allow(clippy::reversed_empty_ranges)] for i in 0..ARR / 8 { x[8 * i] = _mm256_add_ps(x[8 * i], x[8 * i + 4]); } let t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), _mm256_extractf128_ps(x[0], 1)); let t1 = _mm_hadd_ps(t0, t0); *y = _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); } } pub struct CurrentCpuF16 {} impl CpuF16<ARR> for CurrentCpuF16 { type Unit = __m256; type Array = [__m256; ARR]; const STEP: usize = STEP; const EPR: usize = EPR; fn n() -> usize { ARR } unsafe fn zero() -> Self::Unit { _mm256_setzero_ps() } unsafe fn zero_array() -> Self::Array { [Self::zero(); ARR] } unsafe fn from_f32(v: f32) -> Self::Unit { _mm256_set1_ps(v) } #[cfg(target_feature = "f16c")] unsafe fn load(mem_addr: *const f16) -> Self::Unit { _mm256_cvtph_ps(_mm_loadu_si128(mem_addr as *const __m128i)) } #[cfg(not(target_feature = "f16c"))] unsafe fn load(mem_addr: *const f16) -> Self::Unit { let mut tmp = [0.0f32; 8]; for i in 0..8 { tmp[i] = (*mem_addr.add(i)).to_f32(); } _mm256_loadu_ps(tmp.as_ptr()) } unsafe fn vec_add(a: Self::Unit, b: Self::Unit) -> Self::Unit { _mm256_add_ps(a, b) } unsafe fn vec_fma(a: Self::Unit, b: Self::Unit, c: Self::Unit) -> Self::Unit { _mm256_add_ps(_mm256_mul_ps(b, c), a) } #[cfg(target_feature = "f16c")] unsafe fn vec_store(mem_addr: *mut f16, a: Self::Unit) { _mm_storeu_si128(mem_addr as *mut __m128i, _mm256_cvtps_ph(a, 0)) } #[cfg(not(target_feature = "f16c"))] unsafe fn vec_store(mem_addr: *mut f16, a: Self::Unit) { let mut tmp = [0.0f32; 8]; _mm256_storeu_ps(tmp.as_mut_ptr(), a); for i in 0..8 { *mem_addr.add(i) = f16::from_f32(tmp[i]); } } unsafe fn vec_reduce(mut x: Self::Array, y: *mut f32) { let mut offset = ARR >> 1; for i in 0..offset { x[i] = _mm256_add_ps(x[i], x[offset + i]); } offset >>= 1; for i in 0..offset { x[i] = _mm256_add_ps(x[i], x[offset + i]); } offset >>= 1; for i in 0..offset { x[i] = _mm256_add_ps(x[i], x[offset + i]); } let t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), _mm256_extractf128_ps(x[0], 1)); let t1 = _mm_hadd_ps(t0, t0); *y = _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); } }
candle/candle-core/src/cpu/avx.rs/0
{ "file_path": "candle/candle-core/src/cpu/avx.rs", "repo_id": "candle", "token_count": 2094 }
15
use crate::{Error, Result, Shape}; #[derive(Debug, PartialEq, Eq, Clone)] pub struct Layout { shape: Shape, // The strides are given in number of elements and not in bytes. stride: Vec<usize>, start_offset: usize, } impl Layout { pub fn new(shape: Shape, stride: Vec<usize>, start_offset: usize) -> Self { Self { shape, stride, start_offset, } } pub fn contiguous_with_offset<S: Into<Shape>>(shape: S, start_offset: usize) -> Self { let shape = shape.into(); let stride = shape.stride_contiguous(); Self { shape, stride, start_offset, } } pub fn contiguous<S: Into<Shape>>(shape: S) -> Self { Self::contiguous_with_offset(shape, 0) } pub fn dims(&self) -> &[usize] { self.shape.dims() } pub fn shape(&self) -> &Shape { &self.shape } pub fn stride(&self) -> &[usize] { &self.stride } pub fn start_offset(&self) -> usize { self.start_offset } /// Returns the appropriate start and stop offset if the data is stored in a C /// contiguous (aka row major) way. pub fn contiguous_offsets(&self) -> Option<(usize, usize)> { if self.is_contiguous() { let start_o = self.start_offset; Some((start_o, start_o + self.shape.elem_count())) } else { None } } /// Returns true if the data is stored in a C contiguous (aka row major) way. /// Note that this does not implies that the start offset is 0 or that there are no extra /// elements at the end of the storage. pub fn is_contiguous(&self) -> bool { self.shape.is_contiguous(&self.stride) } /// Returns true if the data is stored in a Fortran contiguous (aka column major) way. pub fn is_fortran_contiguous(&self) -> bool { self.shape.is_fortran_contiguous(&self.stride) } pub fn narrow(&self, dim: usize, start: usize, len: usize) -> Result<Self> { let dims = self.shape().dims(); if dim >= dims.len() { Err(Error::DimOutOfRange { shape: self.shape().clone(), dim: dim as i32, op: "narrow", } .bt())? } if start + len > dims[dim] { Err(Error::NarrowInvalidArgs { shape: self.shape.clone(), dim, start, len, msg: "start + len > dim_len", } .bt())? } let mut dims = dims.to_vec(); dims[dim] = len; Ok(Self { shape: Shape::from(dims), stride: self.stride.clone(), start_offset: self.start_offset + self.stride[dim] * start, }) } pub fn transpose(&self, dim1: usize, dim2: usize) -> Result<Self> { let rank = self.shape.rank(); if rank <= dim1 || rank <= dim2 { Err(Error::UnexpectedNumberOfDims { expected: usize::max(dim1, dim2), got: rank, shape: self.shape().clone(), } .bt())? } let mut stride = self.stride().to_vec(); let mut dims = self.shape().dims().to_vec(); dims.swap(dim1, dim2); stride.swap(dim1, dim2); Ok(Self { shape: Shape::from(dims), stride, start_offset: self.start_offset, }) } pub fn permute(&self, idxs: &[usize]) -> Result<Self> { let is_permutation = idxs.len() == self.shape.rank() && (0..idxs.len()).all(|i| idxs.contains(&i)); if !is_permutation { crate::bail!( "dimension mismatch in permute, tensor {:?}, dims: {:?}", self.dims(), idxs ) } let stride = self.stride(); let dims = self.shape().dims(); let mut perm_stride = stride.to_vec(); let mut perm_dims = dims.to_vec(); for (i, &idx) in idxs.iter().enumerate() { perm_stride[i] = stride[idx]; perm_dims[i] = dims[idx]; } Ok(Self { shape: Shape::from(perm_dims), stride: perm_stride, start_offset: self.start_offset, }) } pub fn broadcast_as<S: Into<Shape>>(&self, shape: S) -> Result<Self> { let shape = shape.into(); if shape.rank() < self.shape().rank() { return Err(Error::BroadcastIncompatibleShapes { src_shape: self.shape().clone(), dst_shape: shape, } .bt()); } let added_dims = shape.rank() - self.shape().rank(); let mut stride = vec![0; added_dims]; for (&dst_dim, (&src_dim, &src_stride)) in shape.dims()[added_dims..] .iter() .zip(self.dims().iter().zip(self.stride())) { let s = if dst_dim == src_dim { src_stride } else if src_dim != 1 { return Err(Error::BroadcastIncompatibleShapes { src_shape: self.shape().clone(), dst_shape: shape, } .bt()); } else { 0 }; stride.push(s) } Ok(Self { shape, stride, start_offset: self.start_offset, }) } pub(crate) fn strided_index(&self) -> crate::StridedIndex { crate::StridedIndex::from_layout(self) } pub(crate) fn strided_blocks(&self) -> crate::StridedBlocks { let mut block_len = 1; let mut contiguous_dims = 0; // These are counted from the right. for (&stride, &dim) in self.stride().iter().zip(self.dims().iter()).rev() { if stride != block_len { break; } block_len *= dim; contiguous_dims += 1; } let index_dims = self.dims().len() - contiguous_dims; if index_dims == 0 { crate::StridedBlocks::SingleBlock { start_offset: self.start_offset, len: block_len, } } else { let block_start_index = crate::StridedIndex::new( &self.dims()[..index_dims], &self.stride[..index_dims], self.start_offset, ); crate::StridedBlocks::MultipleBlocks { block_start_index, block_len, } } } // Returns the contiguous offsets with broadcast if applicable. pub(crate) fn offsets_b(&self) -> Option<ContiguousOffsetsWithBroadcast> { let mut left_broadcast = 1; let mut right_broadcast = 1; let strides = self.stride(); let dims = self.dims(); let mut start_cont = 0; let mut end_cont = dims.len(); for (&s, &d) in strides.iter().zip(dims.iter()) { if s != 0 { break; } start_cont += 1; left_broadcast *= d; } if start_cont == dims.len() { return Some(ContiguousOffsetsWithBroadcast { start: self.start_offset, len: 1, left_broadcast, right_broadcast: 1, }); } for (&s, &d) in strides.iter().zip(dims.iter()).rev() { if s != 0 { break; } end_cont -= 1; right_broadcast *= d; } // Check that the inner dims are contiguous let strides = &strides[start_cont..end_cont]; let dims = &dims[start_cont..end_cont]; let mut len = 1; for (&stride, &dim) in strides.iter().zip(dims.iter()).rev() { if stride != len { return None; } len *= dim; } Some(ContiguousOffsetsWithBroadcast { start: self.start_offset, len, left_broadcast, right_broadcast, }) } } #[derive(Debug, Clone, PartialEq, Eq)] pub struct ContiguousOffsetsWithBroadcast { pub start: usize, pub len: usize, pub left_broadcast: usize, pub right_broadcast: usize, }
candle/candle-core/src/layout.rs/0
{ "file_path": "candle/candle-core/src/layout.rs", "repo_id": "candle", "token_count": 4349 }
16
use super::k_quants::{ BlockQ2K, BlockQ3K, BlockQ4K, BlockQ4_0, BlockQ5K, BlockQ6K, BlockQ8K, BlockQ8_0, QK8_0, QK_K, }; use crate::Result; use byteorder::{ByteOrder, LittleEndian}; #[allow(unused_imports)] #[cfg(target_arch = "arm")] use core::arch::arm::*; #[allow(unused_imports)] #[cfg(target_arch = "aarch64")] use core::arch::aarch64::*; #[inline(always)] unsafe fn vdotq_s32(a: int8x16_t, b: int8x16_t) -> int32x4_t { // TODO: dotprod let p0 = vmull_s8(vget_low_s8(a), vget_low_s8(b)); let p1 = vmull_s8(vget_high_s8(a), vget_high_s8(b)); vaddq_s32(vpaddlq_s16(p0), vpaddlq_s16(p1)) } #[inline(always)] pub(crate) fn vec_dot_q4_0_q8_0(n: usize, xs: &[BlockQ4_0], ys: &[BlockQ8_0]) -> Result<f32> { let qk = QK8_0; let nb = n / qk; if n % QK8_0 != 0 { crate::bail!("vec_dot_q4_0_q8_0: {n} is not divisible by {qk}") } unsafe { let mut sumv0 = vdupq_n_f32(0.0f32); for i in 0..nb { let x0 = &xs[i]; let y0 = &ys[i]; let m4b = vdupq_n_u8(0x0F); let s8b = vdupq_n_s8(0x8); let v0_0 = vld1q_u8(x0.qs.as_ptr()); // 4-bit -> 8-bit let v0_0l = vreinterpretq_s8_u8(vandq_u8(v0_0, m4b)); let v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); // sub 8 let v0_0ls = vsubq_s8(v0_0l, s8b); let v0_0hs = vsubq_s8(v0_0h, s8b); // load y let v1_0l = vld1q_s8(y0.qs.as_ptr()); let v1_0h = vld1q_s8(y0.qs.as_ptr().add(16)); let pl0 = vdotq_s32(v0_0ls, v1_0l); let ph0 = vdotq_s32(v0_0hs, v1_0h); sumv0 = vmlaq_n_f32( sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), x0.d.to_f32() * y0.d.to_f32(), ); } Ok(vaddvq_f32(sumv0)) } } #[inline(always)] pub(crate) fn vec_dot_q8_0_q8_0(n: usize, xs: &[BlockQ8_0], ys: &[BlockQ8_0]) -> Result<f32> { let qk = QK8_0; if n % QK8_0 != 0 { crate::bail!("vec_dot_q8_0_q8_0: {n} is not divisible by {qk}") } let nb = n / QK8_0; unsafe { let mut sumv0 = vdupq_n_f32(0.0f32); for i in 0..nb { let x0 = &xs[i]; let y0 = &ys[i]; let x0_0 = vld1q_s8(x0.qs.as_ptr()); let x0_1 = vld1q_s8(x0.qs.as_ptr().add(16)); // load y let y0_0 = vld1q_s8(y0.qs.as_ptr()); let y0_1 = vld1q_s8(y0.qs.as_ptr().add(16)); let p0 = vdotq_s32(x0_0, y0_0); let p1 = vdotq_s32(x0_1, y0_1); sumv0 = vmlaq_n_f32( sumv0, vcvtq_f32_s32(vaddq_s32(p0, p1)), x0.d.to_f32() * y0.d.to_f32(), ); } Ok(vaddvq_f32(sumv0)) } } #[inline(always)] pub(crate) fn vec_dot_q8k_q8k(n: usize, xs: &[BlockQ8K], ys: &[BlockQ8K]) -> Result<f32> { let qk = QK_K; if n % QK_K != 0 { crate::bail!("vec_dot_q8k_q8k: {n} is not divisible by {qk}") } let mut sumf = 0f32; for (xs, ys) in xs.iter().zip(ys.iter()) { unsafe { let mut sum_i = vdupq_n_s32(0); let scale = xs.d * ys.d; let xs = xs.qs.as_ptr(); let ys = ys.qs.as_ptr(); for i in (0..QK_K).step_by(16) { let xs = vld1q_s8(xs.add(i)); let ys = vld1q_s8(ys.add(i)); let xy = vdotq_s32(xs, ys); sum_i = vaddq_s32(sum_i, xy) } sumf += vaddvq_s32(sum_i) as f32 * scale } } Ok(sumf) } #[inline(always)] pub(crate) fn vec_dot_q6k_q8k(n: usize, xs: &[BlockQ6K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q6k_q8k: {n} is not divisible by {QK_K}") } let mut sum = 0f32; unsafe { let m4b = vdupq_n_u8(0xF); let mone = vdupq_n_u8(3); for (x, y) in xs.iter().zip(ys.iter()) { let d_all = x.d.to_f32(); let mut q6 = x.ql.as_ptr(); let mut qh = x.qh.as_ptr(); let mut q8 = y.qs.as_ptr(); let mut scale = x.scales.as_ptr(); let q8sums = vld1q_s16_x2(y.bsums.as_ptr()); let scales = vld1q_s8(scale); let q6scales = int16x8x2_t( vmovl_s8(vget_low_s8(scales)), vmovl_s8(vget_high_s8(scales)), ); let prod = vaddq_s32( vaddq_s32( vmull_s16(vget_low_s16(q8sums.0), vget_low_s16(q6scales.0)), vmull_s16(vget_high_s16(q8sums.0), vget_high_s16(q6scales.0)), ), vaddq_s32( vmull_s16(vget_low_s16(q8sums.1), vget_low_s16(q6scales.1)), vmull_s16(vget_high_s16(q8sums.1), vget_high_s16(q6scales.1)), ), ); let isum_mins = vaddvq_s32(prod); let mut isum = 0i32; for _j in 0..QK_K / 128 { let qhbits = vld1q_u8_x2(qh); qh = qh.add(32); let q6bits = vld1q_u8_x4(q6); q6 = q6.add(64); let q8bytes = vld1q_s8_x4(q8); q8 = q8.add(64); let q6h_0 = vshlq_n_u8(vandq_u8(mone, qhbits.0), 4); let q6h_1 = vshlq_n_u8(vandq_u8(mone, qhbits.1), 4); let shifted = vshrq_n_u8(qhbits.0, 2); let q6h_2 = vshlq_n_u8(vandq_u8(mone, shifted), 4); let shifted = vshrq_n_u8(qhbits.1, 2); let q6h_3 = vshlq_n_u8(vandq_u8(mone, shifted), 4); let q6bytes_0 = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.0, m4b), q6h_0)); let q6bytes_1 = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.1, m4b), q6h_1)); let q6bytes_2 = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.2, m4b), q6h_2)); let q6bytes_3 = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.3, m4b), q6h_3)); let p0 = vdotq_s32(q6bytes_0, q8bytes.0); let p1 = vdotq_s32(q6bytes_1, q8bytes.1); let (scale0, scale1) = (*scale as i32, *scale.add(1) as i32); isum += vaddvq_s32(p0) * scale0 + vaddvq_s32(p1) * scale1; scale = scale.add(2); let p2 = vdotq_s32(q6bytes_2, q8bytes.2); let p3 = vdotq_s32(q6bytes_3, q8bytes.3); let (scale0, scale1) = (*scale as i32, *scale.add(1) as i32); isum += vaddvq_s32(p2) * scale0 + vaddvq_s32(p3) * scale1; scale = scale.add(2); let q8bytes = vld1q_s8_x4(q8); q8 = q8.add(64); let shifted = vshrq_n_u8(qhbits.0, 4); let q6h_0 = vshlq_n_u8(vandq_u8(mone, shifted), 4); let shifted = vshrq_n_u8(qhbits.1, 4); let q6h_1 = vshlq_n_u8(vandq_u8(mone, shifted), 4); let shifted = vshrq_n_u8(qhbits.0, 6); let q6h_2 = vshlq_n_u8(vandq_u8(mone, shifted), 4); let shifted = vshrq_n_u8(qhbits.1, 6); let q6h_3 = vshlq_n_u8(vandq_u8(mone, shifted), 4); let q6bytes_0 = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.0, 4), q6h_0)); let q6bytes_1 = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.1, 4), q6h_1)); let q6bytes_2 = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.2, 4), q6h_2)); let q6bytes_3 = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.3, 4), q6h_3)); let p0 = vdotq_s32(q6bytes_0, q8bytes.0); let p1 = vdotq_s32(q6bytes_1, q8bytes.1); let (scale0, scale1) = (*scale as i32, *scale.add(1) as i32); isum += vaddvq_s32(p0) * scale0 + vaddvq_s32(p1) * scale1; scale = scale.add(2); let p2 = vdotq_s32(q6bytes_2, q8bytes.2); let p3 = vdotq_s32(q6bytes_3, q8bytes.3); let (scale0, scale1) = (*scale as i32, *scale.add(1) as i32); isum += vaddvq_s32(p2) * scale0 + vaddvq_s32(p3) * scale1; scale = scale.add(2); } sum += d_all * y.d * ((isum - 32 * isum_mins) as f32); } } Ok(sum) } #[inline(always)] pub(crate) fn vec_dot_q5k_q8k(n: usize, xs: &[BlockQ5K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q5k_q8k: {n} is not divisible by {QK_K}") } let mut sumf = 0f32; let mut utmp = [0u32; 4]; const KMASK1: u32 = 0x3f3f3f3f; const KMASK2: u32 = 0x0f0f0f0f; const KMASK3: u32 = 0x03030303; unsafe { let m4b = vdupq_n_u8(0xF); let mone = vdupq_n_u8(1); let mtwo = vdupq_n_u8(2); for (x, y) in xs.iter().zip(ys.iter()) { let d = y.d * x.d.to_f32(); let dmin = y.d * x.dmin.to_f32(); let q8sums = vpaddq_s16( vld1q_s16(y.bsums.as_ptr()), vld1q_s16(y.bsums.as_ptr().add(8)), ); LittleEndian::read_u32_into(&x.scales, &mut utmp[0..3]); utmp[3] = ((utmp[2] >> 4) & KMASK2) | (((utmp[1] >> 6) & KMASK3) << 4); let uaux = utmp[1] & KMASK1; utmp[1] = (utmp[2] & KMASK2) | (((utmp[0] >> 6) & KMASK3) << 4); utmp[2] = uaux; utmp[0] &= KMASK1; let mins8 = vld1_u8((utmp.as_ptr() as *const u8).add(8)); let mins = vreinterpretq_s16_u16(vmovl_u8(mins8)); let prod = vaddq_s32( vmull_s16(vget_low_s16(q8sums), vget_low_s16(mins)), vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins)), ); let sumi_mins = vaddvq_s32(prod); let mut scales = utmp.as_ptr() as *const u8; let mut q5 = x.qs.as_ptr(); let mut q8 = y.qs.as_ptr(); let mut qhbits = vld1q_u8_x2(x.qh.as_ptr()); let mut sumi = 0i32; for _j in 0..QK_K / 64 { let q5bits = vld1q_u8_x2(q5); q5 = q5.add(32); let q8bytes = vld1q_s8_x4(q8); q8 = q8.add(64); let q5h_0 = vshlq_n_u8(vandq_u8(mone, qhbits.0), 4); let q5h_1 = vshlq_n_u8(vandq_u8(mone, qhbits.1), 4); let q5h_2 = vshlq_n_u8(vandq_u8(mtwo, qhbits.0), 3); let q5h_3 = vshlq_n_u8(vandq_u8(mtwo, qhbits.1), 3); qhbits.0 = vshrq_n_u8(qhbits.0, 2); qhbits.1 = vshrq_n_u8(qhbits.1, 2); let q5bytes_0 = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.0, m4b), q5h_0)); let q5bytes_1 = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.1, m4b), q5h_1)); let q5bytes_2 = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.0, 4), q5h_2)); let q5bytes_3 = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.1, 4), q5h_3)); let p0 = vdotq_s32(q5bytes_0, q8bytes.0); let p1 = vdotq_s32(q5bytes_1, q8bytes.1); sumi += vaddvq_s32(vaddq_s32(p0, p1)) * *scales as i32; scales = scales.add(1); let p2 = vdotq_s32(q5bytes_2, q8bytes.2); let p3 = vdotq_s32(q5bytes_3, q8bytes.3); sumi += vaddvq_s32(vaddq_s32(p2, p3)) * *scales as i32; scales = scales.add(1); } sumf += d * sumi as f32 - dmin * sumi_mins as f32; } } Ok(sumf) } #[inline(always)] pub(crate) fn vec_dot_q4k_q8k(n: usize, xs: &[BlockQ4K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q4k_q8k: {n} is not divisible by {QK_K}") } let mut sumf = 0f32; let mut utmp = [0u32; 4]; let mut scales = [0u8; 16]; const KMASK1: u32 = 0x3f3f3f3f; const KMASK2: u32 = 0x0f0f0f0f; const KMASK3: u32 = 0x03030303; unsafe { let m4b = vdupq_n_u8(0xF); for (x, y) in xs.iter().zip(ys.iter()) { let d = y.d * x.d.to_f32(); let dmin = y.d * x.dmin.to_f32(); let q8sums = vpaddq_s16( vld1q_s16(y.bsums.as_ptr()), vld1q_s16(y.bsums.as_ptr().add(8)), ); LittleEndian::read_u32_into(&x.scales, &mut utmp[0..3]); let mins8 = vld1_u32( [ utmp[1] & KMASK1, ((utmp[2] >> 4) & KMASK2) | (((utmp[1] >> 6) & KMASK3) << 4), ] .as_ptr(), ); utmp[1] = (utmp[2] & KMASK2) | (((utmp[0] >> 6) & KMASK3) << 4); utmp[0] &= KMASK1; let mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins8))); let prod = vaddq_s32( vmull_s16(vget_low_s16(q8sums), vget_low_s16(mins)), vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins)), ); sumf -= dmin * vaddvq_s32(prod) as f32; LittleEndian::write_u32_into(&utmp, &mut scales); let mut q4 = x.qs.as_ptr(); let mut q8 = y.qs.as_ptr(); let mut sumi1 = 0i32; let mut sumi2 = 0i32; for j in 0..QK_K / 64 { let q4bits = vld1q_u8_x2(q4); q4 = q4.add(32); let q8bytes = vld1q_s8_x2(q8); q8 = q8.add(32); let q4bytes = int8x16x2_t( vreinterpretq_s8_u8(vandq_u8(q4bits.0, m4b)), vreinterpretq_s8_u8(vandq_u8(q4bits.1, m4b)), ); let p0 = vdotq_s32(q4bytes.0, q8bytes.0); let p1 = vdotq_s32(q4bytes.1, q8bytes.1); sumi1 += vaddvq_s32(vaddq_s32(p0, p1)) * scales[2 * j] as i32; let q8bytes = vld1q_s8_x2(q8); q8 = q8.add(32); let q4bytes = int8x16x2_t( vreinterpretq_s8_u8(vshrq_n_u8(q4bits.0, 4)), vreinterpretq_s8_u8(vshrq_n_u8(q4bits.1, 4)), ); let p2 = vdotq_s32(q4bytes.0, q8bytes.0); let p3 = vdotq_s32(q4bytes.1, q8bytes.1); sumi2 += vaddvq_s32(vaddq_s32(p2, p3)) * scales[2 * j + 1] as i32; } sumf += d * (sumi1 + sumi2) as f32; } } Ok(sumf) } #[inline(always)] pub(crate) fn vec_dot_q3k_q8k(n: usize, xs: &[BlockQ3K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q3k_q8k: {n} is not divisible by {QK_K}") } let mut sumf = 0f32; let mut utmp = [0u32; 4]; let mut aux = [0u32; 3]; const KMASK1: u32 = 0x03030303; const KMASK2: u32 = 0x0f0f0f0f; unsafe { let m3b = vdupq_n_u8(0x3); let m0 = vdupq_n_u8(1); let m1 = vshlq_n_u8(m0, 1); let m2 = vshlq_n_u8(m0, 2); let m3 = vshlq_n_u8(m0, 3); for (x, y) in xs.iter().zip(ys.iter()) { let d = y.d * x.d.to_f32(); let mut q3 = x.qs.as_ptr(); let qh = x.hmask.as_ptr(); let mut q8 = y.qs.as_ptr(); let mut qhbits = vld1q_u8_x2(qh); let mut isum = 0i32; // Set up scales LittleEndian::read_u32_into(&x.scales, &mut aux); utmp[3] = ((aux[1] >> 4) & KMASK2) | (((aux[2] >> 6) & KMASK1) << 4); utmp[2] = ((aux[0] >> 4) & KMASK2) | (((aux[2] >> 4) & KMASK1) << 4); utmp[1] = (aux[1] & KMASK2) | (((aux[2] >> 2) & KMASK1) << 4); utmp[0] = (aux[0] & KMASK2) | ((aux[2] & KMASK1) << 4); let mut scale = utmp.as_mut_ptr() as *mut i8; for j in 0..16 { *scale.add(j) -= 32i8 } for j in 0..QK_K / 128 { let q3bits = vld1q_u8_x2(q3); q3 = q3.add(32); let q8bytes_1 = vld1q_s8_x4(q8); q8 = q8.add(64); let q8bytes_2 = vld1q_s8_x4(q8); q8 = q8.add(64); let q3h_0 = vshlq_n_u8(vbicq_u8(m0, qhbits.0), 2); let q3h_1 = vshlq_n_u8(vbicq_u8(m0, qhbits.1), 2); let q3h_2 = vshlq_n_u8(vbicq_u8(m1, qhbits.0), 1); let q3h_3 = vshlq_n_u8(vbicq_u8(m1, qhbits.1), 1); let q3bytes_0 = vsubq_s8( vreinterpretq_s8_u8(vandq_u8(q3bits.0, m3b)), vreinterpretq_s8_u8(q3h_0), ); let q3bytes_1 = vsubq_s8( vreinterpretq_s8_u8(vandq_u8(q3bits.1, m3b)), vreinterpretq_s8_u8(q3h_1), ); let q3bytes_2 = vsubq_s8( vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.0, 2), m3b)), vreinterpretq_s8_u8(q3h_2), ); let q3bytes_3 = vsubq_s8( vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.1, 2), m3b)), vreinterpretq_s8_u8(q3h_3), ); let p0 = vdotq_s32(q3bytes_0, q8bytes_1.0); let p1 = vdotq_s32(q3bytes_1, q8bytes_1.1); let p2 = vdotq_s32(q3bytes_2, q8bytes_1.2); let p3 = vdotq_s32(q3bytes_3, q8bytes_1.3); isum += vaddvq_s32(p0) * *scale as i32 + vaddvq_s32(p1) * *scale.add(1) as i32 + vaddvq_s32(p2) * *scale.add(2) as i32 + vaddvq_s32(p3) * *scale.add(3) as i32; scale = scale.add(4); let q3h_0 = vbicq_u8(m2, qhbits.0); let q3h_1 = vbicq_u8(m2, qhbits.1); let q3h_2 = vshrq_n_u8(vbicq_u8(m3, qhbits.0), 1); let q3h_3 = vshrq_n_u8(vbicq_u8(m3, qhbits.1), 1); let q3bytes_0 = vsubq_s8( vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.0, 4), m3b)), vreinterpretq_s8_u8(q3h_0), ); let q3bytes_1 = vsubq_s8( vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.1, 4), m3b)), vreinterpretq_s8_u8(q3h_1), ); let q3bytes_2 = vsubq_s8( vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.0, 6), m3b)), vreinterpretq_s8_u8(q3h_2), ); let q3bytes_3 = vsubq_s8( vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.1, 6), m3b)), vreinterpretq_s8_u8(q3h_3), ); let p0 = vdotq_s32(q3bytes_0, q8bytes_2.0); let p1 = vdotq_s32(q3bytes_1, q8bytes_2.1); let p2 = vdotq_s32(q3bytes_2, q8bytes_2.2); let p3 = vdotq_s32(q3bytes_3, q8bytes_2.3); isum += vaddvq_s32(p0) * *scale as i32 + vaddvq_s32(p1) * *scale.add(1) as i32 + vaddvq_s32(p2) * *scale.add(2) as i32 + vaddvq_s32(p3) * *scale.add(3) as i32; scale = scale.add(4); if j == 0 { qhbits.0 = vshrq_n_u8(qhbits.0, 4); qhbits.1 = vshrq_n_u8(qhbits.1, 4); } } sumf += d * isum as f32; } } Ok(sumf) } #[inline(always)] pub(crate) fn vec_dot_q2k_q8k(n: usize, xs: &[BlockQ2K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q2k_q8k: {n} is not divisible by {QK_K}") } let mut sumf = 0f32; let mut aux = [0u8; 16]; unsafe { let m3 = vdupq_n_u8(0x3); let m4 = vdupq_n_u8(0xF); for (x, y) in xs.iter().zip(ys.iter()) { let d = y.d * x.d.to_f32(); let dmin = -y.d * x.dmin.to_f32(); let mut q2 = x.qs.as_ptr(); let mut q8 = y.qs.as_ptr(); let sc = x.scales.as_ptr(); let mins_and_scales = vld1q_u8(sc); let scales = vandq_u8(mins_and_scales, m4); vst1q_u8(aux.as_mut_ptr(), scales); let mins = vshrq_n_u8(mins_and_scales, 4); let q8sums = vld1q_s16_x2(y.bsums.as_ptr()); let mins16 = int16x8x2_t( vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(mins))), vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(mins))), ); let s0 = vaddq_s32( vmull_s16(vget_low_s16(mins16.0), vget_low_s16(q8sums.0)), vmull_s16(vget_high_s16(mins16.0), vget_high_s16(q8sums.0)), ); let s1 = vaddq_s32( vmull_s16(vget_low_s16(mins16.1), vget_low_s16(q8sums.1)), vmull_s16(vget_high_s16(mins16.1), vget_high_s16(q8sums.1)), ); sumf += dmin * vaddvq_s32(vaddq_s32(s0, s1)) as f32; let mut isum = 0i32; let mut is = 0usize; // TODO: dotprod for _j in 0..QK_K / 128 { let q2bits = vld1q_u8_x2(q2); q2 = q2.add(32); let q8bytes = vld1q_s8_x2(q8); q8 = q8.add(32); let mut q2bytes = int8x16x2_t( vreinterpretq_s8_u8(vandq_u8(q2bits.0, m3)), vreinterpretq_s8_u8(vandq_u8(q2bits.1, m3)), ); isum += multiply_accum_with_scale(&aux, is, 0, q2bytes, q8bytes); let q8bytes = vld1q_s8_x2(q8); q8 = q8.add(32); q2bytes.0 = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.0, 2), m3)); q2bytes.1 = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.1, 2), m3)); isum += multiply_accum_with_scale(&aux, is, 2, q2bytes, q8bytes); let q8bytes = vld1q_s8_x2(q8); q8 = q8.add(32); q2bytes.0 = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.0, 4), m3)); q2bytes.1 = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.1, 4), m3)); isum += multiply_accum_with_scale(&aux, is, 4, q2bytes, q8bytes); let q8bytes = vld1q_s8_x2(q8); q8 = q8.add(32); q2bytes.0 = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.0, 6), m3)); q2bytes.1 = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.1, 6), m3)); isum += multiply_accum_with_scale(&aux, is, 6, q2bytes, q8bytes); is += 8; } sumf += d * isum as f32; } } Ok(sumf) } #[inline(always)] unsafe fn multiply_accum_with_scale( aux: &[u8; 16], is: usize, index: usize, q2bytes: int8x16x2_t, q8bytes: int8x16x2_t, ) -> i32 { let p1 = vdotq_s32(q2bytes.0, q8bytes.0); let p2 = vdotq_s32(q2bytes.1, q8bytes.1); vaddvq_s32(p1) * aux[is + index] as i32 + vaddvq_s32(p2) * aux[is + 1 + index] as i32 }
candle/candle-core/src/quantized/neon.rs/0
{ "file_path": "candle/candle-core/src/quantized/neon.rs", "repo_id": "candle", "token_count": 15290 }
17
# candle-datasets
candle/candle-datasets/README.md/0
{ "file_path": "candle/candle-datasets/README.md", "repo_id": "candle", "token_count": 7 }
18
# candle-blip The [blip-image-captioning](https://huggingface.co/Salesforce/blip-image-captioning-base) model can generate captions for an input image. ## Running on an example ```bash cargo run --example blip --release -- --image candle-examples/examples/yolo-v8/assets/bike.jpg ``` ``` Running on CPU, to run on GPU, build this example with `--features cuda` loaded image Tensor[dims 3, 384, 384; f32] model built several cyclists are riding down a road with cars behind them% ``` ![Leading group, Giro d'Italia 2021](../yolo-v8/assets/bike.jpg)
candle/candle-examples/examples/blip/README.md/0
{ "file_path": "candle/candle-examples/examples/blip/README.md", "repo_id": "candle", "token_count": 190 }
19
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use clap::{Parser, ValueEnum}; use candle::{DType, IndexOp, D}; use candle_nn::{Module, VarBuilder}; use candle_transformers::models::efficientvit; #[derive(Clone, Copy, Debug, ValueEnum)] enum Which { M0, M1, M2, M3, M4, M5, } impl Which { fn model_filename(&self) -> String { let name = match self { Self::M0 => "m0", Self::M1 => "m1", Self::M2 => "m2", Self::M3 => "m3", Self::M4 => "m4", Self::M5 => "m5", }; format!("timm/efficientvit_{}.r224_in1k", name) } fn config(&self) -> efficientvit::Config { match self { Self::M0 => efficientvit::Config::m0(), Self::M1 => efficientvit::Config::m1(), Self::M2 => efficientvit::Config::m2(), Self::M3 => efficientvit::Config::m3(), Self::M4 => efficientvit::Config::m4(), Self::M5 => efficientvit::Config::m5(), } } } #[derive(Parser)] struct Args { #[arg(long)] model: Option<String>, #[arg(long)] image: String, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, #[arg(value_enum, long, default_value_t=Which::M0)] which: Which, } pub fn main() -> anyhow::Result<()> { let args = Args::parse(); let device = candle_examples::device(args.cpu)?; let image = candle_examples::imagenet::load_image224(args.image)?.to_device(&device)?; println!("loaded image {image:?}"); let model_file = match args.model { None => { let model_name = args.which.model_filename(); let api = hf_hub::api::sync::Api::new()?; let api = api.model(model_name); api.get("model.safetensors")? } Some(model) => model.into(), }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, &device)? }; let model = efficientvit::efficientvit(&args.which.config(), 1000, vb)?; println!("model built"); let logits = model.forward(&image.unsqueeze(0)?)?; let prs = candle_nn::ops::softmax(&logits, D::Minus1)? .i(0)? .to_vec1::<f32>()?; let mut prs = prs.iter().enumerate().collect::<Vec<_>>(); prs.sort_by(|(_, p1), (_, p2)| p2.total_cmp(p1)); for &(category_idx, pr) in prs.iter().take(5) { println!( "{:24}: {:.2}%", candle_examples::imagenet::CLASSES[category_idx], 100. * pr ); } Ok(()) }
candle/candle-examples/examples/efficientvit/main.rs/0
{ "file_path": "candle/candle-examples/examples/efficientvit/main.rs", "repo_id": "candle", "token_count": 1278 }
20
# candle-mamba-minimal: minimal implementation of Mamba This is based on [mamba-minimal](https://github.com/johnma2006/mamba-minimal). Compared to the mamba example, this version can handle training but is much slower. ## Running the example ```bash $ cargo run --example mamba-minimal --release -- --prompt "Mamba is the" Mamba is the most popular and best-selling game in the world. It has been downloaded more than 1,000 times by over 1 million people worldwide since its release on March 18th 2016. The Mamba series of games are a collection that combines elements from all genres including action, adventure, strategy & puzzle games with some unique gameplay features such as stealth and survival. The game is also known for its innovative graphics and the ability to play in a variety of different modes like single player or multiplayer. ```
candle/candle-examples/examples/mamba-minimal/README.md/0
{ "file_path": "candle/candle-examples/examples/mamba-minimal/README.md", "repo_id": "candle", "token_count": 206 }
21
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use clap::{Parser, ValueEnum}; use candle::{DType, IndexOp, D}; use candle_nn::{Module, VarBuilder}; use candle_transformers::models::mobileone; #[derive(Clone, Copy, Debug, ValueEnum)] enum Which { S0, S1, S2, S3, S4, } impl Which { fn model_filename(&self) -> String { let name = match self { Self::S0 => "s0", Self::S1 => "s1", Self::S2 => "s2", Self::S3 => "s3", Self::S4 => "s4", }; format!("timm/mobileone_{}.apple_in1k", name) } fn config(&self) -> mobileone::Config { match self { Self::S0 => mobileone::Config::s0(), Self::S1 => mobileone::Config::s1(), Self::S2 => mobileone::Config::s2(), Self::S3 => mobileone::Config::s3(), Self::S4 => mobileone::Config::s4(), } } } #[derive(Parser)] struct Args { #[arg(long)] model: Option<String>, #[arg(long)] image: String, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, #[arg(value_enum, long, default_value_t=Which::S0)] which: Which, } pub fn main() -> anyhow::Result<()> { let args = Args::parse(); let device = candle_examples::device(args.cpu)?; let image = candle_examples::imagenet::load_image224(args.image)?.to_device(&device)?; println!("loaded image {image:?}"); let model_file = match args.model { None => { let model_name = args.which.model_filename(); let api = hf_hub::api::sync::Api::new()?; let api = api.model(model_name); api.get("model.safetensors")? } Some(model) => model.into(), }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, &device)? }; let model = mobileone::mobileone(&args.which.config(), 1000, vb)?; println!("model built"); let logits = model.forward(&image.unsqueeze(0)?)?; let prs = candle_nn::ops::softmax(&logits, D::Minus1)? .i(0)? .to_vec1::<f32>()?; let mut prs = prs.iter().enumerate().collect::<Vec<_>>(); prs.sort_by(|(_, p1), (_, p2)| p2.total_cmp(p1)); for &(category_idx, pr) in prs.iter().take(5) { println!( "{:24}: {:.2}%", candle_examples::imagenet::CLASSES[category_idx], 100. * pr ); } Ok(()) }
candle/candle-examples/examples/mobileone/main.rs/0
{ "file_path": "candle/candle-examples/examples/mobileone/main.rs", "repo_id": "candle", "token_count": 1213 }
22
use std::collections::VecDeque; use std::fmt::Display; use candle::{DType, Device, Error, Module, Result, Tensor, Var}; use candle_nn::{ func, linear, sequential::seq, Activation, AdamW, Optimizer, ParamsAdamW, Sequential, VarBuilder, VarMap, }; use rand::{distributions::Uniform, thread_rng, Rng}; use super::gym_env::GymEnv; pub struct OuNoise { mu: f64, theta: f64, sigma: f64, state: Tensor, } impl OuNoise { pub fn new(mu: f64, theta: f64, sigma: f64, size_action: usize) -> Result<Self> { Ok(Self { mu, theta, sigma, state: Tensor::ones(size_action, DType::F32, &Device::Cpu)?, }) } pub fn sample(&mut self) -> Result<Tensor> { let rand = Tensor::randn_like(&self.state, 0.0, 1.0)?; let dx = ((self.theta * (self.mu - &self.state)?)? + (self.sigma * rand)?)?; self.state = (&self.state + dx)?; Ok(self.state.clone()) } } #[derive(Clone)] struct Transition { state: Tensor, action: Tensor, reward: Tensor, next_state: Tensor, terminated: bool, truncated: bool, } impl Transition { fn new( state: &Tensor, action: &Tensor, reward: &Tensor, next_state: &Tensor, terminated: bool, truncated: bool, ) -> Self { Self { state: state.clone(), action: action.clone(), reward: reward.clone(), next_state: next_state.clone(), terminated, truncated, } } } pub struct ReplayBuffer { buffer: VecDeque<Transition>, capacity: usize, size: usize, } impl ReplayBuffer { pub fn new(capacity: usize) -> Self { Self { buffer: VecDeque::with_capacity(capacity), capacity, size: 0, } } pub fn push( &mut self, state: &Tensor, action: &Tensor, reward: &Tensor, next_state: &Tensor, terminated: bool, truncated: bool, ) { if self.size == self.capacity { self.buffer.pop_front(); } else { self.size += 1; } self.buffer.push_back(Transition::new( state, action, reward, next_state, terminated, truncated, )); } #[allow(clippy::type_complexity)] pub fn random_batch( &self, batch_size: usize, ) -> Result<Option<(Tensor, Tensor, Tensor, Tensor, Vec<bool>, Vec<bool>)>> { if self.size < batch_size { Ok(None) } else { let transitions: Vec<&Transition> = thread_rng() .sample_iter(Uniform::from(0..self.size)) .take(batch_size) .map(|i| self.buffer.get(i).unwrap()) .collect(); let states: Vec<Tensor> = transitions .iter() .map(|t| t.state.unsqueeze(0)) .collect::<Result<_>>()?; let actions: Vec<Tensor> = transitions .iter() .map(|t| t.action.unsqueeze(0)) .collect::<Result<_>>()?; let rewards: Vec<Tensor> = transitions .iter() .map(|t| t.reward.unsqueeze(0)) .collect::<Result<_>>()?; let next_states: Vec<Tensor> = transitions .iter() .map(|t| t.next_state.unsqueeze(0)) .collect::<Result<_>>()?; let terminateds: Vec<bool> = transitions.iter().map(|t| t.terminated).collect(); let truncateds: Vec<bool> = transitions.iter().map(|t| t.truncated).collect(); Ok(Some(( Tensor::cat(&states, 0)?, Tensor::cat(&actions, 0)?, Tensor::cat(&rewards, 0)?, Tensor::cat(&next_states, 0)?, terminateds, truncateds, ))) } } } fn track( varmap: &mut VarMap, vb: &VarBuilder, target_prefix: &str, network_prefix: &str, dims: &[(usize, usize)], tau: f64, ) -> Result<()> { for (i, &(in_dim, out_dim)) in dims.iter().enumerate() { let target_w = vb.get((out_dim, in_dim), &format!("{target_prefix}-fc{i}.weight"))?; let network_w = vb.get((out_dim, in_dim), &format!("{network_prefix}-fc{i}.weight"))?; varmap.set_one( format!("{target_prefix}-fc{i}.weight"), ((tau * network_w)? + ((1.0 - tau) * target_w)?)?, )?; let target_b = vb.get(out_dim, &format!("{target_prefix}-fc{i}.bias"))?; let network_b = vb.get(out_dim, &format!("{network_prefix}-fc{i}.bias"))?; varmap.set_one( format!("{target_prefix}-fc{i}.bias"), ((tau * network_b)? + ((1.0 - tau) * target_b)?)?, )?; } Ok(()) } struct Actor<'a> { varmap: VarMap, vb: VarBuilder<'a>, network: Sequential, target_network: Sequential, size_state: usize, size_action: usize, dims: Vec<(usize, usize)>, } impl Actor<'_> { fn new(device: &Device, dtype: DType, size_state: usize, size_action: usize) -> Result<Self> { let mut varmap = VarMap::new(); let vb = VarBuilder::from_varmap(&varmap, dtype, device); let dims = vec![(size_state, 400), (400, 300), (300, size_action)]; let make_network = |prefix: &str| { let seq = seq() .add(linear( dims[0].0, dims[0].1, vb.pp(format!("{prefix}-fc0")), )?) .add(Activation::Relu) .add(linear( dims[1].0, dims[1].1, vb.pp(format!("{prefix}-fc1")), )?) .add(Activation::Relu) .add(linear( dims[2].0, dims[2].1, vb.pp(format!("{prefix}-fc2")), )?) .add(func(|xs| xs.tanh())); Ok::<Sequential, Error>(seq) }; let network = make_network("actor")?; let target_network = make_network("target-actor")?; // this sets the two networks to be equal to each other using tau = 1.0 track(&mut varmap, &vb, "target-actor", "actor", &dims, 1.0); Ok(Self { varmap, vb, network, target_network, size_state, size_action, dims, }) } fn forward(&self, state: &Tensor) -> Result<Tensor> { self.network.forward(state) } fn target_forward(&self, state: &Tensor) -> Result<Tensor> { self.target_network.forward(state) } fn track(&mut self, tau: f64) -> Result<()> { track( &mut self.varmap, &self.vb, "target-actor", "actor", &self.dims, tau, ) } } struct Critic<'a> { varmap: VarMap, vb: VarBuilder<'a>, network: Sequential, target_network: Sequential, size_state: usize, size_action: usize, dims: Vec<(usize, usize)>, } impl Critic<'_> { fn new(device: &Device, dtype: DType, size_state: usize, size_action: usize) -> Result<Self> { let mut varmap = VarMap::new(); let vb = VarBuilder::from_varmap(&varmap, dtype, device); let dims: Vec<(usize, usize)> = vec![(size_state + size_action, 400), (400, 300), (300, 1)]; let make_network = |prefix: &str| { let seq = seq() .add(linear( dims[0].0, dims[0].1, vb.pp(format!("{prefix}-fc0")), )?) .add(Activation::Relu) .add(linear( dims[1].0, dims[1].1, vb.pp(format!("{prefix}-fc1")), )?) .add(Activation::Relu) .add(linear( dims[2].0, dims[2].1, vb.pp(format!("{prefix}-fc2")), )?); Ok::<Sequential, Error>(seq) }; let network = make_network("critic")?; let target_network = make_network("target-critic")?; // this sets the two networks to be equal to each other using tau = 1.0 track(&mut varmap, &vb, "target-critic", "critic", &dims, 1.0); Ok(Self { varmap, vb, network, target_network, size_state, size_action, dims, }) } fn forward(&self, state: &Tensor, action: &Tensor) -> Result<Tensor> { let xs = Tensor::cat(&[action, state], 1)?; self.network.forward(&xs) } fn target_forward(&self, state: &Tensor, action: &Tensor) -> Result<Tensor> { let xs = Tensor::cat(&[action, state], 1)?; self.target_network.forward(&xs) } fn track(&mut self, tau: f64) -> Result<()> { track( &mut self.varmap, &self.vb, "target-critic", "critic", &self.dims, tau, ) } } #[allow(clippy::upper_case_acronyms)] pub struct DDPG<'a> { actor: Actor<'a>, actor_optim: AdamW, critic: Critic<'a>, critic_optim: AdamW, gamma: f64, tau: f64, replay_buffer: ReplayBuffer, ou_noise: OuNoise, size_state: usize, size_action: usize, pub train: bool, } impl DDPG<'_> { #[allow(clippy::too_many_arguments)] pub fn new( device: &Device, size_state: usize, size_action: usize, train: bool, actor_lr: f64, critic_lr: f64, gamma: f64, tau: f64, buffer_capacity: usize, ou_noise: OuNoise, ) -> Result<Self> { let filter_by_prefix = |varmap: &VarMap, prefix: &str| { varmap .data() .lock() .unwrap() .iter() .filter_map(|(name, var)| name.starts_with(prefix).then_some(var.clone())) .collect::<Vec<Var>>() }; let actor = Actor::new(device, DType::F32, size_state, size_action)?; let actor_optim = AdamW::new( filter_by_prefix(&actor.varmap, "actor"), ParamsAdamW { lr: actor_lr, ..Default::default() }, )?; let critic = Critic::new(device, DType::F32, size_state, size_action)?; let critic_optim = AdamW::new( filter_by_prefix(&critic.varmap, "critic"), ParamsAdamW { lr: critic_lr, ..Default::default() }, )?; Ok(Self { actor, actor_optim, critic, critic_optim, gamma, tau, replay_buffer: ReplayBuffer::new(buffer_capacity), ou_noise, size_state, size_action, train, }) } pub fn remember( &mut self, state: &Tensor, action: &Tensor, reward: &Tensor, next_state: &Tensor, terminated: bool, truncated: bool, ) { self.replay_buffer .push(state, action, reward, next_state, terminated, truncated) } pub fn actions(&mut self, state: &Tensor) -> Result<f32> { let actions = self .actor .forward(&state.detach().unsqueeze(0)?)? .squeeze(0)?; let actions = if self.train { (actions + self.ou_noise.sample()?)? } else { actions }; actions.squeeze(0)?.to_scalar::<f32>() } pub fn train(&mut self, batch_size: usize) -> Result<()> { let (states, actions, rewards, next_states, _, _) = match self.replay_buffer.random_batch(batch_size)? { Some(v) => v, _ => return Ok(()), }; let q_target = self .critic .target_forward(&next_states, &self.actor.target_forward(&next_states)?)?; let q_target = (rewards + (self.gamma * q_target)?.detach())?; let q = self.critic.forward(&states, &actions)?; let diff = (q_target - q)?; let critic_loss = diff.sqr()?.mean_all()?; self.critic_optim.backward_step(&critic_loss)?; let actor_loss = self .critic .forward(&states, &self.actor.forward(&states)?)? .mean_all()? .neg()?; self.actor_optim.backward_step(&actor_loss)?; self.critic.track(self.tau)?; self.actor.track(self.tau)?; Ok(()) } } // The impact of the q value of the next state on the current state's q value. const GAMMA: f64 = 0.99; // The weight for updating the target networks. const TAU: f64 = 0.005; // The capacity of the replay buffer used for sampling training data. const REPLAY_BUFFER_CAPACITY: usize = 100_000; // The training batch size for each training iteration. const TRAINING_BATCH_SIZE: usize = 100; // The total number of episodes. const MAX_EPISODES: usize = 100; // The maximum length of an episode. const EPISODE_LENGTH: usize = 200; // The number of training iterations after one episode finishes. const TRAINING_ITERATIONS: usize = 200; // Ornstein-Uhlenbeck process parameters. const MU: f64 = 0.0; const THETA: f64 = 0.15; const SIGMA: f64 = 0.1; const ACTOR_LEARNING_RATE: f64 = 1e-4; const CRITIC_LEARNING_RATE: f64 = 1e-3; pub fn run() -> Result<()> { let env = GymEnv::new("Pendulum-v1")?; println!("action space: {}", env.action_space()); println!("observation space: {:?}", env.observation_space()); let size_state = env.observation_space().iter().product::<usize>(); let size_action = env.action_space(); let mut agent = DDPG::new( &Device::Cpu, size_state, size_action, true, ACTOR_LEARNING_RATE, CRITIC_LEARNING_RATE, GAMMA, TAU, REPLAY_BUFFER_CAPACITY, OuNoise::new(MU, THETA, SIGMA, size_action)?, )?; let mut rng = rand::thread_rng(); for episode in 0..MAX_EPISODES { // let mut state = env.reset(episode as u64)?; let mut state = env.reset(rng.gen::<u64>())?; let mut total_reward = 0.0; for _ in 0..EPISODE_LENGTH { let mut action = 2.0 * agent.actions(&state)?; action = action.clamp(-2.0, 2.0); let step = env.step(vec![action])?; total_reward += step.reward; agent.remember( &state, &Tensor::new(vec![action], &Device::Cpu)?, &Tensor::new(vec![step.reward as f32], &Device::Cpu)?, &step.state, step.terminated, step.truncated, ); if step.terminated || step.truncated { break; } state = step.state; } println!("episode {episode} with total reward of {total_reward}"); for _ in 0..TRAINING_ITERATIONS { agent.train(TRAINING_BATCH_SIZE)?; } } println!("Testing..."); agent.train = false; for episode in 0..10 { // let mut state = env.reset(episode as u64)?; let mut state = env.reset(rng.gen::<u64>())?; let mut total_reward = 0.0; for _ in 0..EPISODE_LENGTH { let mut action = 2.0 * agent.actions(&state)?; action = action.clamp(-2.0, 2.0); let step = env.step(vec![action])?; total_reward += step.reward; if step.terminated || step.truncated { break; } state = step.state; } println!("episode {episode} with total reward of {total_reward}"); } Ok(()) }
candle/candle-examples/examples/reinforcement-learning/ddpg.rs/0
{ "file_path": "candle/candle-examples/examples/reinforcement-learning/ddpg.rs", "repo_id": "candle", "token_count": 8524 }
23
[ { "index": 1, "color": "#787878", "label": "wall" }, { "index": 2, "color": "#B47878", "label": "building;edifice" }, { "index": 3, "color": "#06E6E6", "label": "sky" }, { "index": 4, "color": "#503232", "label": "floor;flooring" }, { "index": 5, "color": "#04C803", "label": "tree" }, { "index": 6, "color": "#787850", "label": "ceiling" }, { "index": 7, "color": "#8C8C8C", "label": "road;route" }, { "index": 8, "color": "#CC05FF", "label": "bed" }, { "index": 9, "color": "#E6E6E6", "label": "windowpane;window" }, { "index": 10, "color": "#04FA07", "label": "grass" }, { "index": 11, "color": "#E005FF", "label": "cabinet" }, { "index": 12, "color": "#EBFF07", "label": "sidewalk;pavement" }, { "index": 13, "color": "#96053D", "label": "person;individual;someone;somebody;mortal;soul" }, { "index": 14, "color": "#787846", "label": "earth;ground" }, { "index": 15, "color": "#08FF33", "label": "door;double;door" }, { "index": 16, "color": "#FF0652", "label": "table" }, { "index": 17, "color": "#8FFF8C", "label": "mountain;mount" }, { "index": 18, "color": "#CCFF04", "label": "plant;flora;plant;life" }, { "index": 19, "color": "#FF3307", "label": "curtain;drape;drapery;mantle;pall" }, { "index": 20, "color": "#CC4603", "label": "chair" }, { "index": 21, "color": "#0066C8", "label": "car;auto;automobile;machine;motorcar" }, { "index": 22, "color": "#3DE6FA", "label": "water" }, { "index": 23, "color": "#FF0633", "label": "painting;picture" }, { "index": 24, "color": "#0B66FF", "label": "sofa;couch;lounge" }, { "index": 25, "color": "#FF0747", "label": "shelf" }, { "index": 26, "color": "#FF09E0", "label": "house" }, { "index": 27, "color": "#0907E6", "label": "sea" }, { "index": 28, "color": "#DCDCDC", "label": "mirror" }, { "index": 29, "color": "#FF095C", "label": "rug;carpet;carpeting" }, { "index": 30, "color": "#7009FF", "label": "field" }, { "index": 31, "color": "#08FFD6", "label": "armchair" }, { "index": 32, "color": "#07FFE0", "label": "seat" }, { "index": 33, "color": "#FFB806", "label": "fence;fencing" }, { "index": 34, "color": "#0AFF47", "label": "desk" }, { "index": 35, "color": "#FF290A", "label": "rock;stone" }, { "index": 36, "color": "#07FFFF", "label": "wardrobe;closet;press" }, { "index": 37, "color": "#E0FF08", "label": "lamp" }, { "index": 38, "color": "#6608FF", "label": "bathtub;bathing;tub;bath;tub" }, { "index": 39, "color": "#FF3D06", "label": "railing;rail" }, { "index": 40, "color": "#FFC207", "label": "cushion" }, { "index": 41, "color": "#FF7A08", "label": "base;pedestal;stand" }, { "index": 42, "color": "#00FF14", "label": "box" }, { "index": 43, "color": "#FF0829", "label": "column;pillar" }, { "index": 44, "color": "#FF0599", "label": "signboard;sign" }, { "index": 45, "color": "#0633FF", "label": "chest;of;drawers;chest;bureau;dresser" }, { "index": 46, "color": "#EB0CFF", "label": "counter" }, { "index": 47, "color": "#A09614", "label": "sand" }, { "index": 48, "color": "#00A3FF", "label": "sink" }, { "index": 49, "color": "#8C8C8C", "label": "skyscraper" }, { "index": 50, "color": "#FA0A0F", "label": "fireplace;hearth;open;fireplace" }, { "index": 51, "color": "#14FF00", "label": "refrigerator;icebox" }, { "index": 52, "color": "#1FFF00", "label": "grandstand;covered;stand" }, { "index": 53, "color": "#FF1F00", "label": "path" }, { "index": 54, "color": "#FFE000", "label": "stairs;steps" }, { "index": 55, "color": "#99FF00", "label": "runway" }, { "index": 56, "color": "#0000FF", "label": "case;display;case;showcase;vitrine" }, { "index": 57, "color": "#FF4700", "label": "pool;table;billiard;table;snooker;table" }, { "index": 58, "color": "#00EBFF", "label": "pillow" }, { "index": 59, "color": "#00ADFF", "label": "screen;door;screen" }, { "index": 60, "color": "#1F00FF", "label": "stairway;staircase" }, { "index": 61, "color": "#0BC8C8", "label": "river" }, { "index": 62, "color": "#FF5200", "label": "bridge;span" }, { "index": 63, "color": "#00FFF5", "label": "bookcase" }, { "index": 64, "color": "#003DFF", "label": "blind;screen" }, { "index": 65, "color": "#00FF70", "label": "coffee;table;cocktail;table" }, { "index": 66, "color": "#00FF85", "label": "toilet;can;commode;crapper;pot;potty;stool;throne" }, { "index": 67, "color": "#FF0000", "label": "flower" }, { "index": 68, "color": "#FFA300", "label": "book" }, { "index": 69, "color": "#FF6600", "label": "hill" }, { "index": 70, "color": "#C2FF00", "label": "bench" }, { "index": 71, "color": "#008FFF", "label": "countertop" }, { "index": 72, "color": "#33FF00", "label": "stove;kitchen;stove;range;kitchen;range;cooking;stove" }, { "index": 73, "color": "#0052FF", "label": "palm;palm;tree" }, { "index": 74, "color": "#00FF29", "label": "kitchen;island" }, { "index": 75, "color": "#00FFAD", "label": "computer;computing;machine;computing;device;data;processor;electronic;computer;information;processing;system" }, { "index": 76, "color": "#0A00FF", "label": "swivel;chair" }, { "index": 77, "color": "#ADFF00", "label": "boat" }, { "index": 78, "color": "#00FF99", "label": "bar" }, { "index": 79, "color": "#FF5C00", "label": "arcade;machine" }, { "index": 80, "color": "#FF00FF", "label": "hovel;hut;hutch;shack;shanty" }, { "index": 81, "color": "#FF00F5", "label": "bus;autobus;coach;charabanc;double-decker;jitney;motorbus;motorcoach;omnibus;passenger;vehicle" }, { "index": 82, "color": "#FF0066", "label": "towel" }, { "index": 83, "color": "#FFAD00", "label": "light;light;source" }, { "index": 84, "color": "#FF0014", "label": "truck;motortruck" }, { "index": 85, "color": "#FFB8B8", "label": "tower" }, { "index": 86, "color": "#001FFF", "label": "chandelier;pendant;pendent" }, { "index": 87, "color": "#00FF3D", "label": "awning;sunshade;sunblind" }, { "index": 88, "color": "#0047FF", "label": "streetlight;street;lamp" }, { "index": 89, "color": "#FF00CC", "label": "booth;cubicle;stall;kiosk" }, { "index": 90, "color": "#00FFC2", "label": "television;television;receiver;television;set;tv;tv;set;idiot;box;boob;tube;telly;goggle;box" }, { "index": 91, "color": "#00FF52", "label": "airplane;aeroplane;plane" }, { "index": 92, "color": "#000AFF", "label": "dirt;track" }, { "index": 93, "color": "#0070FF", "label": "apparel;wearing;apparel;dress;clothes" }, { "index": 94, "color": "#3300FF", "label": "pole" }, { "index": 95, "color": "#00C2FF", "label": "land;ground;soil" }, { "index": 96, "color": "#007AFF", "label": "bannister;banister;balustrade;balusters;handrail" }, { "index": 97, "color": "#00FFA3", "label": "escalator;moving;staircase;moving;stairway" }, { "index": 98, "color": "#FF9900", "label": "ottoman;pouf;pouffe;puff;hassock" }, { "index": 99, "color": "#00FF0A", "label": "bottle" }, { "index": 100, "color": "#FF7000", "label": "buffet;counter;sideboard" }, { "index": 101, "color": "#8FFF00", "label": "poster;posting;placard;notice;bill;card" }, { "index": 102, "color": "#5200FF", "label": "stage" }, { "index": 103, "color": "#A3FF00", "label": "van" }, { "index": 104, "color": "#FFEB00", "label": "ship" }, { "index": 105, "color": "#08B8AA", "label": "fountain" }, { "index": 106, "color": "#8500FF", "label": "conveyer;belt;conveyor;belt;conveyer;conveyor;transporter" }, { "index": 107, "color": "#00FF5C", "label": "canopy" }, { "index": 108, "color": "#B800FF", "label": "washer;automatic;washer;washing;machine" }, { "index": 109, "color": "#FF001F", "label": "plaything;toy" }, { "index": 110, "color": "#00B8FF", "label": "swimming;pool;swimming;bath;natatorium" }, { "index": 111, "color": "#00D6FF", "label": "stool" }, { "index": 112, "color": "#FF0070", "label": "barrel;cask" }, { "index": 113, "color": "#5CFF00", "label": "basket;handbasket" }, { "index": 114, "color": "#00E0FF", "label": "waterfall;falls" }, { "index": 115, "color": "#70E0FF", "label": "tent;collapsible;shelter" }, { "index": 116, "color": "#46B8A0", "label": "bag" }, { "index": 117, "color": "#A300FF", "label": "minibike;motorbike" }, { "index": 118, "color": "#9900FF", "label": "cradle" }, { "index": 119, "color": "#47FF00", "label": "oven" }, { "index": 120, "color": "#FF00A3", "label": "ball" }, { "index": 121, "color": "#FFCC00", "label": "food;solid;food" }, { "index": 122, "color": "#FF008F", "label": "step;stair" }, { "index": 123, "color": "#00FFEB", "label": "tank;storage;tank" }, { "index": 124, "color": "#85FF00", "label": "trade;name;brand;name;brand;marque" }, { "index": 125, "color": "#FF00EB", "label": "microwave;microwave;oven" }, { "index": 126, "color": "#F500FF", "label": "pot;flowerpot" }, { "index": 127, "color": "#FF007A", "label": "animal;animate;being;beast;brute;creature;fauna" }, { "index": 128, "color": "#FFF500", "label": "bicycle;bike;wheel;cycle" }, { "index": 129, "color": "#0ABED4", "label": "lake" }, { "index": 130, "color": "#D6FF00", "label": "dishwasher;dish;washer;dishwashing;machine" }, { "index": 131, "color": "#00CCFF", "label": "screen;silver;screen;projection;screen" }, { "index": 132, "color": "#1400FF", "label": "blanket;cover" }, { "index": 133, "color": "#FFFF00", "label": "sculpture" }, { "index": 134, "color": "#0099FF", "label": "hood;exhaust;hood" }, { "index": 135, "color": "#0029FF", "label": "sconce" }, { "index": 136, "color": "#00FFCC", "label": "vase" }, { "index": 137, "color": "#2900FF", "label": "traffic;light;traffic;signal;stoplight" }, { "index": 138, "color": "#29FF00", "label": "tray" }, { "index": 139, "color": "#AD00FF", "label": "ashcan;trash;can;garbage;can;wastebin;ash;bin;ash-bin;ashbin;dustbin;trash;barrel;trash;bin" }, { "index": 140, "color": "#00F5FF", "label": "fan" }, { "index": 141, "color": "#4700FF", "label": "pier;wharf;wharfage;dock" }, { "index": 142, "color": "#7A00FF", "label": "crt;screen" }, { "index": 143, "color": "#00FFB8", "label": "plate" }, { "index": 144, "color": "#005CFF", "label": "monitor;monitoring;device" }, { "index": 145, "color": "#B8FF00", "label": "bulletin;board;notice;board" }, { "index": 146, "color": "#0085FF", "label": "shower" }, { "index": 147, "color": "#FFD600", "label": "radiator" }, { "index": 148, "color": "#19C2C2", "label": "glass;drinking;glass" }, { "index": 149, "color": "#66FF00", "label": "clock" }, { "index": 150, "color": "#5C00FF", "label": "flag" } ]
candle/candle-examples/examples/segformer/assets/labels.json/0
{ "file_path": "candle/candle-examples/examples/segformer/assets/labels.json", "repo_id": "candle", "token_count": 6397 }
24
use symphonia::core::audio::{AudioBufferRef, Signal}; use symphonia::core::codecs::{DecoderOptions, CODEC_TYPE_NULL}; use symphonia::core::conv::FromSample; fn conv<T>(samples: &mut Vec<f32>, data: std::borrow::Cow<symphonia::core::audio::AudioBuffer<T>>) where T: symphonia::core::sample::Sample, f32: symphonia::core::conv::FromSample<T>, { samples.extend(data.chan(0).iter().map(|v| f32::from_sample(*v))) } pub(crate) fn pcm_decode<P: AsRef<std::path::Path>>(path: P) -> anyhow::Result<(Vec<f32>, u32)> { // Open the media source. let src = std::fs::File::open(path)?; // Create the media source stream. let mss = symphonia::core::io::MediaSourceStream::new(Box::new(src), Default::default()); // Create a probe hint using the file's extension. [Optional] let hint = symphonia::core::probe::Hint::new(); // Use the default options for metadata and format readers. let meta_opts: symphonia::core::meta::MetadataOptions = Default::default(); let fmt_opts: symphonia::core::formats::FormatOptions = Default::default(); // Probe the media source. let probed = symphonia::default::get_probe().format(&hint, mss, &fmt_opts, &meta_opts)?; // Get the instantiated format reader. let mut format = probed.format; // Find the first audio track with a known (decodeable) codec. let track = format .tracks() .iter() .find(|t| t.codec_params.codec != CODEC_TYPE_NULL) .expect("no supported audio tracks"); // Use the default options for the decoder. let dec_opts: DecoderOptions = Default::default(); // Create a decoder for the track. let mut decoder = symphonia::default::get_codecs() .make(&track.codec_params, &dec_opts) .expect("unsupported codec"); let track_id = track.id; let sample_rate = track.codec_params.sample_rate.unwrap_or(0); let mut pcm_data = Vec::new(); // The decode loop. while let Ok(packet) = format.next_packet() { // Consume any new metadata that has been read since the last packet. while !format.metadata().is_latest() { format.metadata().pop(); } // If the packet does not belong to the selected track, skip over it. if packet.track_id() != track_id { continue; } match decoder.decode(&packet)? { AudioBufferRef::F32(buf) => pcm_data.extend(buf.chan(0)), AudioBufferRef::U8(data) => conv(&mut pcm_data, data), AudioBufferRef::U16(data) => conv(&mut pcm_data, data), AudioBufferRef::U24(data) => conv(&mut pcm_data, data), AudioBufferRef::U32(data) => conv(&mut pcm_data, data), AudioBufferRef::S8(data) => conv(&mut pcm_data, data), AudioBufferRef::S16(data) => conv(&mut pcm_data, data), AudioBufferRef::S24(data) => conv(&mut pcm_data, data), AudioBufferRef::S32(data) => conv(&mut pcm_data, data), AudioBufferRef::F64(data) => conv(&mut pcm_data, data), } } Ok((pcm_data, sample_rate)) }
candle/candle-examples/examples/whisper/pcm_decode.rs/0
{ "file_path": "candle/candle-examples/examples/whisper/pcm_decode.rs", "repo_id": "candle", "token_count": 1267 }
25
use candle::{Result, Tensor}; // https://github.com/facebookresearch/audiocraft/blob/69fea8b290ad1b4b40d28f92d1dfc0ab01dbab85/audiocraft/data/audio_utils.py#L57 pub fn normalize_loudness( wav: &Tensor, sample_rate: u32, loudness_compressor: bool, ) -> Result<Tensor> { let energy = wav.sqr()?.mean_all()?.sqrt()?.to_vec0::<f32>()?; if energy < 2e-3 { return Ok(wav.clone()); } let wav_array = wav.to_vec1::<f32>()?; let mut meter = crate::bs1770::ChannelLoudnessMeter::new(sample_rate); meter.push(wav_array.into_iter()); let power = meter.as_100ms_windows(); let loudness = match crate::bs1770::gated_mean(power) { None => return Ok(wav.clone()), Some(gp) => gp.loudness_lkfs() as f64, }; let delta_loudness = -14. - loudness; let gain = 10f64.powf(delta_loudness / 20.); let wav = (wav * gain)?; if loudness_compressor { wav.tanh() } else { Ok(wav) } }
candle/candle-examples/src/audio.rs/0
{ "file_path": "candle/candle-examples/src/audio.rs", "repo_id": "candle", "token_count": 458 }
26
/****************************************************************************** * Copyright (c) 2023, Tri Dao. ******************************************************************************/ #pragma once #include "cute/algorithm/copy.hpp" #include "cutlass/cutlass.h" #include "cutlass/layout/layout.h" #include <cutlass/numeric_types.h> using namespace cute; template<int kHeadDim_, int kBlockM_, int kBlockN_, int kNWarps_, typename elem_type=cutlass::half_t> struct Flash_kernel_traits { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 using Element = elem_type; static constexpr bool Has_cp_async = true; #else using Element = cutlass::half_t; static constexpr bool Has_cp_async = false; #endif using ElementAccum = float; using index_t = uint32_t; #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 using MMA_Atom_Arch = std::conditional_t< std::is_same_v<elem_type, cutlass::half_t>, MMA_Atom<SM80_16x8x16_F32F16F16F32_TN>, MMA_Atom<SM80_16x8x16_F32BF16BF16F32_TN> >; using ValLayoutMNK = Layout<Shape<_1, _2, _1>>; #else using MMA_Atom_Arch = MMA_Atom<SM75_16x8x8_F32F16F16F32_TN>; using ValLayoutMNK = Layout<Shape<_1, _2, _2>>; #endif #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 750 using SmemCopyAtom = Copy_Atom<SM75_U32x4_LDSM_N, elem_type>; using SmemCopyAtomTransposed = Copy_Atom<SM75_U16x8_LDSM_T, elem_type>; #else using SmemCopyAtom = Copy_Atom<DefaultCopy, elem_type>; using SmemCopyAtomTransposed = Copy_Atom<DefaultCopy, elem_type>; #endif }; // If Share_Q_K_smem is true, that forces Is_Q_in_regs to be true template<int kHeadDim_, int kBlockM_, int kBlockN_, int kNWarps_, bool Is_Q_in_regs_=false, bool Share_Q_K_smem_=false, typename elem_type=cutlass::half_t, typename Base=Flash_kernel_traits<kHeadDim_, kBlockM_, kBlockN_, kNWarps_, elem_type> > struct Flash_fwd_kernel_traits : public Base { using Element = typename Base::Element; using ElementAccum = typename Base::ElementAccum; using index_t = typename Base::index_t; static constexpr bool Has_cp_async = Base::Has_cp_async; using SmemCopyAtom = typename Base::SmemCopyAtom; using SmemCopyAtomTransposed = typename Base::SmemCopyAtomTransposed; static constexpr bool Share_Q_K_smem = Share_Q_K_smem_; static constexpr bool Is_Q_in_regs = Is_Q_in_regs_ || Share_Q_K_smem; // The number of threads. static constexpr int kNWarps = kNWarps_; static constexpr int kNThreads = kNWarps * 32; static constexpr int kBlockM = kBlockM_; static constexpr int kBlockN = kBlockN_; static constexpr int kHeadDim = kHeadDim_; static_assert(kHeadDim % 32 == 0); static constexpr int kBlockKSmem = kHeadDim % 64 == 0 ? 64 : 32; static constexpr int kBlockKGmem = kHeadDim % 128 == 0 ? 128 : (kHeadDim % 64 == 0 ? 64 : 32); static constexpr int kSwizzle = kBlockKSmem == 32 ? 2 : 3; using TiledMma = TiledMMA< typename Base::MMA_Atom_Arch, Layout<Shape<Int<kNWarps>,_1,_1>>, // 4x1x1 or 8x1x1 thread group typename Base::ValLayoutMNK>; // 1x2x1 or 1x2x2 value group for 16x16x16 MMA and LDSM using SmemLayoutAtomQ = decltype( composition(Swizzle<kSwizzle, 3, 3>{}, // This has to be kBlockKSmem, using kHeadDim gives wrong results for d=128 Layout<Shape<_8, Int<kBlockKSmem>>, Stride<Int<kBlockKSmem>, _1>>{})); using SmemLayoutQ = decltype(tile_to_shape( SmemLayoutAtomQ{}, Shape<Int<kBlockM>, Int<kHeadDim>>{})); using SmemLayoutKV = decltype(tile_to_shape( SmemLayoutAtomQ{}, Shape<Int<kBlockN>, Int<kHeadDim>>{})); // This has to be kBlockN and not 8, otherwise we get wrong results for d=128 using SmemLayoutAtomVtransposedNoSwizzle = Layout<Shape<Int<kBlockKSmem>, Int<kBlockN>>, Stride<_1, Int<kBlockKSmem>>>; using SmemLayoutAtomVtransposed = decltype( composition(Swizzle<kSwizzle, 3, 3>{}, SmemLayoutAtomVtransposedNoSwizzle{})); using SmemLayoutVtransposed = decltype(tile_to_shape( SmemLayoutAtomVtransposed{}, Shape<Int<kHeadDim>, Int<kBlockN>>{})); // Maybe the VtransposeNoSwizzle just needs to have the right shape // And the strides don't matter? using SmemLayoutVtransposedNoSwizzle = decltype(tile_to_shape( SmemLayoutAtomVtransposedNoSwizzle{}, Shape<Int<kHeadDim>, Int<kBlockN>>{})); // using SmemLayoutVtransposedNoSwizzle = decltype(SmemLayoutVtransposed{}.layout_fn()); using SmemLayoutAtomO = decltype( composition(Swizzle<kSwizzle, 3, 3>{}, Layout<Shape<Int<8>, Int<kBlockKSmem>>, Stride<Int<kBlockKSmem>, _1>>{})); using SmemLayoutO = decltype(tile_to_shape( SmemLayoutAtomO{}, Shape<Int<kBlockM>, Int<kHeadDim>>{})); using SmemCopyAtomO = Copy_Atom<DefaultCopy, Element>; using SmemCopyAtomOaccum = Copy_Atom<DefaultCopy, ElementAccum>; static constexpr int kSmemQCount = size(SmemLayoutQ{}); static constexpr int kSmemKVCount = size(SmemLayoutKV{}) * 2; static constexpr int kSmemQSize = kSmemQCount * sizeof(Element); static constexpr int kSmemKVSize = kSmemKVCount * sizeof(Element); static constexpr int kSmemSize = Share_Q_K_smem ? std::max(kSmemQSize, kSmemKVSize) : kSmemQSize + kSmemKVSize; static constexpr int kGmemElemsPerLoad = sizeof(cute::uint128_t) / sizeof(Element); static_assert(kHeadDim % kGmemElemsPerLoad == 0, "kHeadDim must be a multiple of kGmemElemsPerLoad"); // Using kBlockKSmem here is 6-10% faster than kBlockKGmem for d=128 because of bank conflicts. // For example, for d=128, smem is split into 2 "pages", each page takes care of columns // 0-63 and 64-127. If we have 16 threads per row for gmem read, when we write to smem, // thread 0 - 7 will write to the first page and thread 8 - 15 will write to the second page, // to the same banks. static constexpr int kGmemThreadsPerRow = kBlockKSmem / kGmemElemsPerLoad; static_assert(kNThreads % kGmemThreadsPerRow == 0, "kNThreads must be a multiple of kGmemThreadsPerRow"); using GmemLayoutAtom = Layout<Shape <Int<kNThreads / kGmemThreadsPerRow>, Int<kGmemThreadsPerRow>>, Stride<Int<kGmemThreadsPerRow>, _1>>; // We use CACHEGLOBAL instead of CACHEALWAYS for both Q and K/V, since we won't be reading // from the same address by the same threadblock. This is slightly faster. using Gmem_copy_struct = std::conditional_t< Has_cp_async, SM80_CP_ASYNC_CACHEGLOBAL<cute::uint128_t>, DefaultCopy >; using GmemTiledCopyQKV = decltype( make_tiled_copy(Copy_Atom<Gmem_copy_struct, Element>{}, GmemLayoutAtom{}, Layout<Shape<_1, _8>>{})); // Val layout, 8 vals per read using GmemTiledCopyO = decltype( make_tiled_copy(Copy_Atom<DefaultCopy, Element>{}, GmemLayoutAtom{}, Layout<Shape<_1, _8>>{})); // Val layout, 8 vals per store static constexpr int kGmemThreadsPerRowP = kBlockN / kGmemElemsPerLoad; static_assert(kNThreads % kGmemThreadsPerRowP == 0, "kNThreads must be a multiple of kGmemThreadsPerRowP"); using GmemLayoutAtomP = Layout<Shape <Int<kNThreads / kGmemThreadsPerRowP>, Int<kGmemThreadsPerRowP>>, Stride<Int<kGmemThreadsPerRowP>, _1>>; using GmemTiledCopyP = decltype( make_tiled_copy(Copy_Atom<DefaultCopy, Element>{}, GmemLayoutAtomP{}, Layout<Shape<_1, _8>>{})); // Val layout, 8 vals per store using GmemLayoutAtomOaccum = std::conditional_t< kBlockKSmem == 32, Layout<Shape <_16, _8>, // Thread layout, 8 threads per row Stride< _8, _1>>, Layout<Shape <_8, _16>, // Thread layout, 16 threads per row Stride< _16, _1>> >; using GmemTiledCopyOaccum = decltype( make_tiled_copy(Copy_Atom<DefaultCopy, ElementAccum>{}, GmemLayoutAtomOaccum{}, Layout<Shape < _1, _4>>{})); // Val layout, 4 vals per store using GmemLayoutAtomRotcossin = GmemLayoutAtom; using GmemTiledCopyRotcossin = decltype( make_tiled_copy(Copy_Atom<UniversalCopy<uint64_t>, Element>{}, GmemLayoutAtomRotcossin{}, Layout<Shape < _1, _4>>{})); // Val layout, 4 vals per load using GmemTiledCopyRotcossinCont = decltype( make_tiled_copy(Copy_Atom<DefaultCopy, Element>{}, GmemLayoutAtomRotcossin{}, Layout<Shape < _1, _8>>{})); // Val layout, 8 vals per load }; // Is_V_in_regs is an option to reduce smem usage, but will increase register pressue. // No_double_buffer is another option to reduce smem usage, but will slow things down. template<int kHeadDim_, int kBlockM_, int kBlockN_, int kNWarps_, int AtomLayoutMSdP_=1, int AtomLayoutNdKV=2, int AtomLayoutMdQ=2, bool Is_V_in_regs_=false, bool No_double_buffer_=false, typename elem_type=cutlass::half_t, typename Base=Flash_kernel_traits<kHeadDim_, kBlockM_, kBlockN_, kNWarps_, elem_type> > struct Flash_bwd_kernel_traits : public Base { using Element = typename Base::Element; using ElementAccum = typename Base::ElementAccum; using index_t = typename Base::index_t; static constexpr bool Has_cp_async = Base::Has_cp_async; using SmemCopyAtom = typename Base::SmemCopyAtom; using SmemCopyAtomTransposed = typename Base::SmemCopyAtomTransposed; static constexpr bool Is_V_in_regs = Is_V_in_regs_; static constexpr bool No_double_buffer = No_double_buffer_; // The number of threads. static constexpr int kNWarps = kNWarps_; static constexpr int kNThreads = kNWarps * 32; static constexpr int kBlockM = kBlockM_; static constexpr int kBlockN = kBlockN_; static constexpr int kHeadDim = kHeadDim_; static_assert(kHeadDim % 32 == 0); static constexpr int kBlockKSmem = kHeadDim % 64 == 0 ? 64 : 32; static constexpr int kBlockKGmem = kHeadDim % 128 == 0 ? 128 : (kHeadDim % 64 == 0 ? 64 : 32); static constexpr int kSwizzle = kBlockKSmem == 32 ? 2 : 3; static constexpr int AtomLayoutMSdP = AtomLayoutMSdP_; static_assert(kNWarps % AtomLayoutMSdP == 0); static_assert(kNWarps % AtomLayoutNdKV == 0); static_assert(kNWarps % AtomLayoutMdQ == 0); using TiledMmaSdP = TiledMMA< typename Base::MMA_Atom_Arch, Layout<Shape<Int<AtomLayoutMSdP>, Int<kNWarps / AtomLayoutMSdP>, _1>>, typename Base::ValLayoutMNK>; // 1x2x1 or 1x2x2 value group for 16x16x16 MMA and LDSM using TiledMmadKV = TiledMMA< typename Base::MMA_Atom_Arch, Layout<Shape<Int<AtomLayoutNdKV>, Int<kNWarps / AtomLayoutNdKV>, _1>>, typename Base::ValLayoutMNK>; // 1x2x1 or 1x2x2 value group for 16x16x16 MMA and LDSM using TiledMmadQ = TiledMMA< typename Base::MMA_Atom_Arch, Layout<Shape<Int<AtomLayoutMdQ>, Int<kNWarps / AtomLayoutMdQ>, _1>>, // 2x4x1 or 4x2x1 thread group typename Base::ValLayoutMNK>; // 1x2x1 or 1x2x2 value group for 16x16x16 MMA and LDSM using SmemLayoutAtomQdO = decltype( composition(Swizzle<kSwizzle, 3, 3>{}, Layout<Shape<_8, Int<kBlockKSmem>>, Stride<Int<kBlockKSmem>, _1>>{})); using SmemLayoutQdO = decltype(tile_to_shape( SmemLayoutAtomQdO{}, make_shape(Int<kBlockM>{}, Int<kHeadDim>{}))); using SmemLayoutAtomKV = decltype( composition(Swizzle<kSwizzle, 3, 3>{}, Layout<Shape<Int<kBlockM / kNWarps>, Int<kBlockKSmem>>, Stride<Int<kBlockKSmem>, _1>>{})); using SmemLayoutKV = decltype(tile_to_shape( // SmemLayoutAtomQdO{}, SmemLayoutAtomKV{}, make_shape(Int<kBlockN>{}, Int<kHeadDim>{}))); using SmemLayoutAtomKtransposedNoSwizzle = Layout<Shape<Int<kBlockKSmem>, Int<kBlockN>>, Stride<_1, Int<kBlockKSmem>>>; using SmemLayoutAtomKtransposed = decltype( composition(Swizzle<kSwizzle, 3, 3>{}, SmemLayoutAtomKtransposedNoSwizzle{})); using SmemLayoutKtransposed = decltype(tile_to_shape( SmemLayoutAtomKtransposed{}, make_shape(Int<kHeadDim>{}, Int<kBlockN>{}))); // Maybe the KtransposeNoSwizzle just needs to have the right shape // And the strides don't matter? using SmemLayoutKtransposedNoSwizzle = decltype(tile_to_shape( SmemLayoutAtomKtransposedNoSwizzle{}, make_shape(Int<kHeadDim>{}, Int<kBlockN>{}))); // using SmemLayoutKtransposedNoSwizzle = decltype(SmemLayoutKtransposed{}.layout_fn()); // TODO: generalize to other values of kBlockN // TODO: what should be the Swizzle here? 3 is faster than 1, and 1 is faster than 2 // static constexpr int kPBlockN = kBlockN; static_assert(kBlockN >= 64); // TD [2023-03-19]: Idk why kPBlockN = 16 and kSwizzlePdS=3 is the fastest. static constexpr int kPBlockN = 64; static_assert(kPBlockN == 16 || kPBlockN == 32 || kPBlockN == 64); // static constexpr int kSwizzlePdS = kPBlockN == 16 ? 1 : (kPBlockN == 32 ? 2 : 3); static constexpr int kSwizzlePdS = 3; using SmemLayoutAtomPdS = decltype( composition(Swizzle<kSwizzlePdS, 3, 3>{}, Layout<Shape<Int<kBlockM>, Int<kPBlockN>>, Stride<Int<kPBlockN>, _1>>{})); using SmemLayoutPdS = decltype(tile_to_shape( SmemLayoutAtomPdS{}, make_shape(Int<kBlockM>{}, Int<kBlockN>{}))); using SmemLayoutAtomPdStransposedNoSwizzle = Layout<Shape<Int<kPBlockN>, Int<kBlockM>>, Stride<_1, Int<kPBlockN>>>; using SmemLayoutAtomPdStransposed = decltype( composition(Swizzle<kSwizzlePdS, 3, 3>{}, SmemLayoutAtomPdStransposedNoSwizzle{})); using SmemLayoutPdStransposed = decltype(tile_to_shape( SmemLayoutAtomPdStransposed{}, make_shape(Int<kBlockN>{}, Int<kBlockM>{}))); using SmemLayoutPdStransposedNoSwizzle = decltype(tile_to_shape( SmemLayoutAtomPdStransposedNoSwizzle{}, make_shape(Int<kBlockN>{}, Int<kBlockM>{}))); // using SmemLayoutPdStransposedNoSwizzle = decltype(SmemLayoutPdStransposed{}.layout_fn()); using SmemCopyAtomPdS = Copy_Atom<DefaultCopy, elem_type>; using SmemLayoutAtomQdOtransposedNoSwizzle = Layout<Shape<Int<kBlockKSmem>, Int<kBlockM>>, Stride<_1, Int<kBlockKSmem>>>; using SmemLayoutAtomQdOtransposed = decltype( composition(Swizzle<kSwizzle, 3, 3>{}, SmemLayoutAtomQdOtransposedNoSwizzle{})); using SmemLayoutQdOtransposed = decltype(tile_to_shape( SmemLayoutAtomQdOtransposed{}, make_shape(Int<kHeadDim>{}, Int<kBlockM>{}))); using SmemLayoutQdOtransposedNoSwizzle = decltype(tile_to_shape( SmemLayoutAtomQdOtransposedNoSwizzle{}, make_shape(Int<kHeadDim>{}, Int<kBlockM>{}))); // using SmemLayoutQdOtransposedNoSwizzle = decltype(SmemLayoutQdOtransposed{}.layout_fn()); using SmemLayoutAtomdKV = decltype( composition(Swizzle<kSwizzle, 3, 3>{}, Layout<Shape<_8, Int<kBlockKSmem>>, Stride<Int<kBlockKSmem>, _1>>{})); using SmemLayoutdKV = decltype(tile_to_shape( SmemLayoutAtomdKV{}, make_shape(Int<kBlockN>{}, Int<kHeadDim>{}))); using SmemCopyAtomdKV = Copy_Atom<DefaultCopy, elem_type>; using SmemLayoutAtomdQ = decltype( composition(Swizzle<kSwizzle, 3, 3>{}, Layout<Shape<_8, Int<kBlockKSmem>>, Stride<Int<kBlockKSmem>, _1>>{})); using SmemLayoutdQ = decltype(tile_to_shape( SmemLayoutAtomdQ{}, make_shape(Int<kBlockM>{}, Int<kHeadDim>{}))); using SmemCopyAtomdQ = Copy_Atom<DefaultCopy, elem_type>; static constexpr int kSmemQdOCount = size(SmemLayoutQdO{}) * (No_double_buffer ? 2 : 3); // Double buffer for sQ static constexpr int kSmemKVCount = size(SmemLayoutKV{}) * 2; static constexpr int kSmemdSCount = size(SmemLayoutPdS{}); static constexpr int kSmemPCount = size(SmemLayoutPdS{}); static constexpr int kSmemdQCount = size(SmemLayoutdQ{}); static constexpr int kSmemQdOSize = kSmemQdOCount * sizeof(Element); static constexpr int kSmemKVSize = kSmemKVCount * sizeof(Element); static constexpr int kSmemdSSize = kSmemdSCount * sizeof(Element); static constexpr int kSmemPSize = kSmemPCount * sizeof(Element); static constexpr int kSmemdQSize = kSmemdQCount * sizeof(Element); static constexpr int kSmemSize = kSmemQdOSize + (!Is_V_in_regs ? kSmemKVSize + kSmemdSSize + std::max(kSmemPSize, kSmemdQSize) : std::max(kSmemKVSize, kSmemKVSize / 2 + kSmemdSSize + std::max(kSmemPSize, kSmemdQSize))); static constexpr int kSmemSize1colblock = kSmemQdOSize + (!Is_V_in_regs ? kSmemKVSize + kSmemdSSize + kSmemPSize : std::max(kSmemKVSize, kSmemKVSize / 2 + kSmemdSSize + kSmemPSize)); static constexpr int kSmemSize1rowblock = kSmemQdOSize / 3 * 2 + kSmemKVSize / 2 * 3 + kSmemdSSize + kSmemPSize; static constexpr int kGmemElemsPerLoad = sizeof(cute::uint128_t) / sizeof(Element); static_assert(kHeadDim % kGmemElemsPerLoad == 0, "kHeadDim must be a multiple of kGmemElemsPerLoad"); // Using kBlockKSmem instead of kHeadDim here to avoid bank conflicts, but doesn't seem // to affect speed in practice. static constexpr int kGmemThreadsPerRow = kBlockKSmem / kGmemElemsPerLoad; static_assert(kNThreads % kGmemThreadsPerRow == 0, "kNThreads must be a multiple of kGmemThreadsPerRow"); using GmemLayoutAtom = Layout<Shape <Int<kNThreads / kGmemThreadsPerRow>, Int<kGmemThreadsPerRow>>, Stride<Int<kGmemThreadsPerRow>, _1>>; // We use CACHEGLOBAL instead of CACHEALWAYS for both Q and K/V, since we won't be reading // from the same address by the same threadblock. This is slightly faster. using Gmem_copy_struct = std::conditional_t< Has_cp_async, SM80_CP_ASYNC_CACHEGLOBAL<cute::uint128_t>, DefaultCopy >; using GmemTiledCopyQKV = decltype( make_tiled_copy(Copy_Atom<Gmem_copy_struct, elem_type>{}, GmemLayoutAtom{}, Layout<Shape<_1, _8>>{})); // Val layout, 8 vals per read using GmemTiledCopydO = decltype( make_tiled_copy(Copy_Atom<DefaultCopy, elem_type>{}, GmemLayoutAtom{}, Layout<Shape < _1, _8>>{})); // Val layout, 8 vals per store using GmemTiledCopydKV = decltype( make_tiled_copy(Copy_Atom<DefaultCopy, elem_type>{}, GmemLayoutAtom{}, Layout<Shape < _1, _8>>{})); // Val layout, 8 vals per store using GmemTiledCopydQ = decltype( make_tiled_copy(Copy_Atom<DefaultCopy, elem_type>{}, GmemLayoutAtom{}, Layout<Shape < _1, _8>>{})); // Val layout, 8 vals per store using GmemLayoutAtomdQaccum = std::conditional_t< kBlockKSmem == 32, Layout<Shape <_32, _8>, // Thread layout, 8 threads per row Stride< _8, _1>>, Layout<Shape <_16, _16>, // Thread layout, 16 threads per row Stride< _16, _1>> >; using GmemTiledCopydQaccum = decltype( make_tiled_copy(Copy_Atom<DefaultCopy, ElementAccum>{}, GmemLayoutAtomdQaccum{}, Layout<Shape < _1, _4>>{})); // Val layout, 4 vals per store using GmemTiledCopydQaccumAtomicAdd = decltype( make_tiled_copy(Copy_Atom<DefaultCopy, ElementAccum>{}, Layout<Shape <_8, _32>, // Thread layout, 8 threads per row Stride<_32, _1>>{}, Layout<Shape < _1, _1>>{})); // Val layout, 1 val per store }; ////////////////////////////////////////////////////////////////////////////////////////////////////
candle/candle-flash-attn/kernels/kernel_traits.h/0
{ "file_path": "candle/candle-flash-attn/kernels/kernel_traits.h", "repo_id": "candle", "token_count": 9290 }
27
#include "cuda_fp16.h" #include "cuda_bf16.h" // Table showing which features are supported on which compute capability // https://docs.nvidia.com/cuda/cuda-c-programming-guide/#features-and-technical-specifications // FIXME: the minimum compute capabilities are just guesses since the table is not specific enough #if (__CUDACC_VER_MAJOR__ < 12 || __CUDACC_VER_MINOR__ < 2) && __CUDA_ARCH__ < 800 __device__ __forceinline__ __half __hmax_nan(__half a, __half b) { return __hisnan(a) ? a : (__hisnan(b) ? b : __hmax(a, b)); } __device__ __forceinline__ __half __hmin_nan(__half a, __half b) { return __hisnan(a) ? a : (__hisnan(b) ? b : __hmin(a, b)); } #endif #if __CUDA_ARCH__ < 600 // Copied from https://docs.nvidia.com/cuda/cuda-c-programming-guide/#atomic-functions __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } #endif #if __CUDA_ARCH__ < 700 // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomicadd // The 16-bit __half floating-point version of atomicAdd() is only supported by devices of compute capability 7.x and higher. // Solution adapted from https://github.com/torch/cutorch/blob/master/lib/THC/THCAtomics.cuh#L96-L119 __device__ __half atomicAdd(__half *address, __half val) { // unsigned int *address_as_ui = (unsigned int *) ((char *)address - ((size_t)address & 2)); // unsigned int old = *address_as_ui; // unsigned int assumed; // bool unaligned = (size_t) address & 2; // do { // assumed = old; // unsigned int hsum; // hsum = unaligned ? (old >> 16) : (old & 0xffff); // hsum = __half_as_ushort(__ushort_as_half(hsum) + val); // old = atomicCAS(address_as_ui, assumed, // unaligned ? (old & 0xffff) | (hsum << 16) : (old & 0xffff0000) | hsum // ); // } while (assumed != old); // return __ushort_as_half(unaligned ? (old >> 16) : (old & 0xffff)); } #endif __device__ __forceinline__ __half atomicMaxf(__half* address, __half val) { #if __CUDA_ARCH__ < 700 // On older GPUs we do not have access to atomicCAS for shorts, so we have to do some trickery. // Solution adapted from https://github.com/torch/cutorch/blob/master/lib/THC/THCAtomics.cuh#L96-L119 unsigned int *address_as_ui = (unsigned int *) ((char *)address - ((size_t)address & 2)); unsigned int old = *address_as_ui; unsigned int assumed; bool unaligned = (size_t) address & 2; do { assumed = old; unsigned int hmax; hmax = unaligned ? (old >> 16) : (old & 0xffff); hmax = __half_as_ushort(__hmax_nan(val, __ushort_as_half(hmax))); old = atomicCAS(address_as_ui, assumed, unaligned ? (old & 0xffff) | (hmax << 16) : (old & 0xffff0000) | hmax ); } while (assumed != old); return __ushort_as_half(unaligned ? (old >> 16) : (old & 0xffff)); #else // Based on https://docs.nvidia.com/cuda/cuda-c-programming-guide/#atomic-functions unsigned short int* casted_address = (unsigned short int*)address; unsigned short int old = *casted_address; unsigned short int assumed; do { assumed = old; old = atomicCAS(casted_address, assumed, __half_as_ushort(__hmax_nan(val, __ushort_as_half(assumed)))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __ushort_as_half(old); #endif } // atomicMax is not implemented for floats, // solution copied https://stackoverflow.com/questions/17399119/how-do-i-use-atomicmax-on-floating-point-values-in-cuda __device__ __forceinline__ float atomicMaxf(float * addr, float value) { if (signbit(value)) { return __uint_as_float(atomicMin((unsigned int *)addr, __float_as_uint(value))); } else { return __int_as_float(atomicMax((int *)addr, __float_as_int(value))); } } __device__ __forceinline__ double atomicMaxf(double * addr, double value) { if (signbit(value)) { return __longlong_as_double(atomicMin((unsigned long long int *)addr, __double_as_longlong(value))); } else { return __longlong_as_double(atomicMax((long long int *)addr, __double_as_longlong(value))); } } __device__ __forceinline__ __half atomicMinf(__half* address, __half val) { #if __CUDA_ARCH__ < 700 // On older GPUs we do not have access to atomicCAS for shorts, so we have to do some trickery. // Solution adapted from https://github.com/torch/cutorch/blob/master/lib/THC/THCAtomics.cuh#L96-L119 unsigned int *address_as_ui = (unsigned int *) ((char *)address - ((size_t)address & 2)); unsigned int old = *address_as_ui; unsigned int assumed; bool unaligned = (size_t) address & 2; do { assumed = old; unsigned int hmin; hmin = unaligned ? (old >> 16) : (old & 0xffff); hmin = __half_as_ushort(__hmin_nan(val, __ushort_as_half(hmin))); old = atomicCAS(address_as_ui, assumed, unaligned ? (old & 0xffff) | (hmin << 16) : (old & 0xffff0000) | hmin ); } while (assumed != old); return __ushort_as_half(unaligned ? (old >> 16) : (old & 0xffff)); #else // Based on https://docs.nvidia.com/cuda/cuda-c-programming-guide/#atomic-functions unsigned short int* casted_address = (unsigned short int*)address; unsigned short int old = *casted_address; unsigned short int assumed; do { assumed = old; old = atomicCAS(casted_address, assumed, __half_as_ushort(__hmin_nan(val, __ushort_as_half(assumed)))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __ushort_as_half(old); #endif } // atomicMin is not implemented for floats, // solution copied https://stackoverflow.com/questions/17399119/how-do-i-use-atomicmax-on-floating-point-values-in-cuda __device__ __forceinline__ float atomicMinf(float * addr, float value) { if (signbit(value)) { return __uint_as_float(atomicMax((unsigned int *)addr, __float_as_uint(value))); } else { return __int_as_float(atomicMin((int *)addr, __float_as_int(value))); } } __device__ __forceinline__ double atomicMinf(double * addr, double value) { if (signbit(value)) { return __longlong_as_double(atomicMax((unsigned long long int *)addr, __double_as_longlong(value))); } else { return __longlong_as_double(atomicMin((long long int *)addr, __double_as_longlong(value))); } }
candle/candle-kernels/src/compatibility.cuh/0
{ "file_path": "candle/candle-kernels/src/compatibility.cuh", "repo_id": "candle", "token_count": 2734 }
28
#include <metal_stdlib> using namespace metal; template<typename TYPENAME, typename INDEX_TYPENAME> METAL_FUNC void index( constant size_t &dst_size, constant size_t &left_size, constant size_t &src_dim_size, constant size_t &right_size, constant size_t &ids_size, const device TYPENAME *input, const device INDEX_TYPENAME *input_ids, device TYPENAME *output, uint tid [[ thread_position_in_grid ]] ) { if (tid >= dst_size) { return; } const size_t id_i = (tid / right_size) % ids_size; const INDEX_TYPENAME input_i = min(input_ids[id_i], (INDEX_TYPENAME)(src_dim_size - 1)); const size_t right_rank_i = tid % right_size; const size_t left_rank_i = tid / right_size / ids_size; /* // Force prevent out of bounds indexing // since there doesn't seem to be a good way to force crash // No need to check for zero we're only allowing unsized. */ const size_t src_i = left_rank_i * src_dim_size * right_size + input_i * right_size + right_rank_i; output[tid] = input[src_i]; } # define INDEX_OP(NAME, INDEX_TYPENAME, TYPENAME) \ kernel void NAME( \ constant size_t &dst_size, \ constant size_t &left_size, \ constant size_t &src_dim_size, \ constant size_t &right_size, \ constant size_t &ids_size, \ const device TYPENAME *input, \ const device INDEX_TYPENAME *input_ids, \ device TYPENAME *output, \ uint tid [[ thread_position_in_grid ]] \ ) { \ index<TYPENAME, INDEX_TYPENAME>(dst_size, left_size, src_dim_size, right_size, ids_size, input, input_ids, output, tid); \ } template<typename TYPENAME, typename INDEX_TYPENAME> METAL_FUNC void gather( constant size_t &dst_size, constant size_t &left_size, constant size_t &src_dim_size, constant size_t &right_size, constant size_t &ids_size, const device TYPENAME *input, const device INDEX_TYPENAME *input_ids, device TYPENAME *output, uint tid [[ thread_position_in_grid ]] ) { if (tid >= dst_size) { return; } const INDEX_TYPENAME input_i = input_ids[tid]; const size_t right_rank_i = tid % right_size; const size_t left_rank_i = tid / right_size / ids_size; const size_t src_i = (left_rank_i * src_dim_size + input_i) * right_size + right_rank_i; output[tid] = input[src_i]; } # define GATHER_OP(NAME, INDEX_TYPENAME, TYPENAME) \ kernel void NAME( \ constant size_t &dst_size, \ constant size_t &left_size, \ constant size_t &src_dim_size, \ constant size_t &right_size, \ constant size_t &ids_size, \ const device TYPENAME *input, \ const device INDEX_TYPENAME *input_ids, \ device TYPENAME *output, \ uint tid [[ thread_position_in_grid ]] \ ) { \ gather<TYPENAME, INDEX_TYPENAME>(dst_size, left_size, src_dim_size, right_size, ids_size, input, input_ids, output, tid); \ } template<typename TYPENAME, typename INDEX_TYPENAME> METAL_FUNC void scatter_add( constant size_t &dst_size, constant size_t &left_size, constant size_t &src_dim_size, constant size_t &right_size, constant size_t &dst_dim_size, const device TYPENAME *input, const device INDEX_TYPENAME *input_ids, device TYPENAME *output, uint tid [[ thread_position_in_grid ]] ) { if (tid >= dst_size) { return; } const size_t right_rank_i = tid % right_size; const size_t left_rank_i = tid / right_size; for (unsigned int j = 0; j < src_dim_size; ++j) { const size_t src_i = (left_rank_i * src_dim_size + j) * right_size + right_rank_i; const INDEX_TYPENAME idx = input_ids[src_i]; const size_t dst_i = (left_rank_i * dst_dim_size + idx) * right_size + right_rank_i; output[dst_i] += input[src_i]; } } # define SCATTER_ADD_OP(NAME, INDEX_TYPENAME, TYPENAME) \ kernel void NAME( \ constant size_t &dst_size, \ constant size_t &left_size, \ constant size_t &src_dim_size, \ constant size_t &right_size, \ constant size_t &dst_dim_size, \ const device TYPENAME *input, \ const device INDEX_TYPENAME *input_ids, \ device TYPENAME *output, \ uint tid [[ thread_position_in_grid ]] \ ) { \ scatter_add<TYPENAME, INDEX_TYPENAME>(dst_size, left_size, src_dim_size, right_size, dst_dim_size, input, input_ids, output, tid); \ } template<typename TYPENAME, typename INDEX_TYPENAME> METAL_FUNC void index_add( constant size_t &dst_size, constant size_t &left_size, constant size_t &src_dim_size, constant size_t &right_size, constant size_t &dst_dim_size, constant size_t &ids_dim_size, const device TYPENAME *input, const device INDEX_TYPENAME *input_ids, device TYPENAME *output, uint tid [[ thread_position_in_grid ]] ) { if (tid >= dst_size) { return; } const size_t right_rank_i = tid % right_size; const size_t left_rank_i = tid / right_size; for (unsigned int j = 0; j < ids_dim_size; ++j) { const INDEX_TYPENAME idx = input_ids[j]; const size_t src_i = (left_rank_i * src_dim_size + j) * right_size + right_rank_i; const size_t dst_i = (left_rank_i * dst_dim_size + idx) * right_size + right_rank_i; output[dst_i] += input[src_i]; } } # define INDEX_ADD_OP(NAME, INDEX_TYPENAME, TYPENAME) \ kernel void NAME( \ constant size_t &dst_size, \ constant size_t &left_size, \ constant size_t &src_dim_size, \ constant size_t &right_size, \ constant size_t &dst_dim_size, \ constant size_t &ids_dim_size, \ const device TYPENAME *input, \ const device INDEX_TYPENAME *input_ids, \ device TYPENAME *output, \ uint tid [[ thread_position_in_grid ]] \ ) { \ index_add<TYPENAME, INDEX_TYPENAME>(dst_size, left_size, src_dim_size, right_size, dst_dim_size, ids_dim_size, input, input_ids, output, tid); \ } INDEX_OP(is_u32_f32, uint, float) INDEX_OP(is_u32_f16, uint, half) #if defined(__HAVE_BFLOAT__) INDEX_OP(is_u32_bf16, uint32_t, bfloat) INDEX_OP(is_u8_bf16, uint8_t, bfloat) #endif GATHER_OP(gather_u32_f32, uint, float) GATHER_OP(gather_u32_f16, uint, half) SCATTER_ADD_OP(sa_u32_f32, uint32_t, float) SCATTER_ADD_OP(sa_u8_f32, uint8_t, float) SCATTER_ADD_OP(sa_i64_f32, int64_t, float) SCATTER_ADD_OP(sa_u32_f16, uint32_t, half) SCATTER_ADD_OP(sa_u8_f16, uint8_t, half) SCATTER_ADD_OP(sa_i64_f16, int64_t, half) #if defined(__HAVE_BFLOAT__) SCATTER_ADD_OP(sa_u32_bf16, uint32_t, bfloat) SCATTER_ADD_OP(sa_u8_bf16, uint8_t, bfloat) SCATTER_ADD_OP(sa_i64_bf16, int64_t, bfloat) #endif // i64 INDEX_ADD_OP(ia_i64_f16, int64_t, half) INDEX_ADD_OP(ia_i64_f32, int64_t, float) INDEX_ADD_OP(ia_i64_i64, int64_t, int64_t) INDEX_ADD_OP(ia_i64_u32, int64_t, uint32_t) INDEX_ADD_OP(ia_i64_u8, int64_t, uint8_t) #if defined(__HAVE_BFLOAT__) INDEX_ADD_OP(ia_i64_bf16, int64_t, bfloat) #endif // u32 INDEX_ADD_OP(ia_u32_f16, uint32_t, half) INDEX_ADD_OP(ia_u32_f32, uint32_t, float) INDEX_ADD_OP(ia_u32_i64, uint32_t, int64_t) INDEX_ADD_OP(ia_u32_u32, uint32_t, uint32_t) INDEX_ADD_OP(ia_u32_u8, uint32_t, uint8_t) #if defined(__HAVE_BFLOAT__) INDEX_ADD_OP(ia_u32_bf16, uint32_t, bfloat) #endif // u8 INDEX_ADD_OP(ia_u8_f16, uint8_t, half) INDEX_ADD_OP(ia_u8_f32, uint8_t, float) INDEX_ADD_OP(ia_u8_i64, uint8_t, int64_t) INDEX_ADD_OP(ia_u8_u32, uint8_t, uint32_t) INDEX_ADD_OP(ia_u8_u8, uint8_t, uint8_t) #if defined(__HAVE_BFLOAT__) INDEX_ADD_OP(ia_u8_bf16, uint8_t, bfloat) #endif
candle/candle-metal-kernels/src/indexing.metal/0
{ "file_path": "candle/candle-metal-kernels/src/indexing.metal", "repo_id": "candle", "token_count": 3485 }
29
/// This example contains some simple benchmarks so that it's easy to run them in perf etc. #[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use candle::quantized::GgmlType; use candle::{CpuStorage, Device, Layout, Module, Result, Shape, Tensor, D}; use clap::{Parser, Subcommand}; const CHECK_CONV2D: bool = false; trait Benchmark { type PreProcessData; type RunResult; fn preprocess() -> Result<Self::PreProcessData>; fn run_one(_: &Self::PreProcessData) -> Result<Self::RunResult>; const ITERS: usize; } struct Im2Col { h_k: usize, w_k: usize, stride: usize, dilation: usize, padding: usize, } impl Im2Col { fn hw_out(&self, h: usize, w: usize) -> (usize, usize) { let h_out = (h + 2 * self.padding - self.dilation * (self.h_k - 1) - 1) / self.stride + 1; let w_out = (w + 2 * self.padding - self.dilation * (self.w_k - 1) - 1) / self.stride + 1; (h_out, w_out) } } impl candle::CustomOp1 for Im2Col { fn name(&self) -> &'static str { "im2col" } fn cpu_fwd(&self, storage: &CpuStorage, layout: &Layout) -> Result<(CpuStorage, Shape)> { let &Self { h_k, w_k, stride, dilation, padding, } = self; let (b, c, h, w) = layout.shape().dims4()?; let (h_out, w_out) = self.hw_out(h, w); let slice = storage.as_slice::<f32>()?; let src = &slice[layout.start_offset()..]; let mut dst = vec![0f32; b * h_out * w_out * c * h_k * w_k]; let (src_s0, src_s1, src_s2, src_s3) = { let s = layout.stride(); (s[0], s[1], s[2], s[3]) }; // TODO: provide specialized kernels for the common use cases. // - h_k = w_k = 1 // - padding = 0 // - stride = 1 // - dilation = 1 for b_idx in 0..b { let src_idx = b_idx * src_s0; let dst_idx = b_idx * h_out * w_out * c * h_k * w_k; for h_idx in 0..h_out { let dst_idx = dst_idx + h_idx * w_out * c * h_k * w_k; for w_idx in 0..w_out { let dst_idx = dst_idx + w_idx * c * h_k * w_k; for c_idx in 0..c { let dst_idx = dst_idx + c_idx * h_k * w_k; let src_idx = c_idx * src_s1 + src_idx; for h_k_idx in 0..h_k { let src_h = h_idx * stride + h_k_idx * dilation; if padding != 0 && (src_h < padding || src_h >= h + padding) { continue; } let src_h = src_h - padding; let src_idx = src_idx + src_h * src_s2; let dst_idx = dst_idx + h_k_idx * w_k; for w_k_idx in 0..w_k { let src_w = w_idx * stride + w_k_idx * dilation; if padding != 0 && (src_w < padding || src_w >= w + padding) { continue; } let src_w = src_w - padding; let src_idx = src_idx + src_w * src_s3; let dst_idx = dst_idx + w_k_idx; dst[dst_idx] = src[src_idx] } } } } } } let storage = candle::WithDType::to_cpu_storage_owned(dst); Ok((storage, (b * h_out * w_out, c * h_k * w_k).into())) } } // Conv1d example as used in whisper. struct Conv1d; impl Benchmark for Conv1d { type PreProcessData = (Tensor, Tensor); type RunResult = Tensor; fn preprocess() -> Result<Self::PreProcessData> { let inp = Tensor::randn(0f32, 1., (1, 384, 3000), &Device::Cpu)?; let w = Tensor::randn(0f32, 1., (384, 384, 3), &Device::Cpu)?; Ok((inp, w)) } fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> { d.0.conv1d(&d.1, 0, 1, 1, 1) } const ITERS: usize = 5; } // Conv2d example as used in stable-diffusion. struct Conv2d; impl Benchmark for Conv2d { type PreProcessData = (Tensor, Tensor); type RunResult = Tensor; fn preprocess() -> Result<Self::PreProcessData> { let inp = Tensor::randn(0f32, 1., (2, 320, 96, 96), &Device::Cpu)?; let w = Tensor::randn(0f32, 1., (320, 320, 3, 3), &Device::Cpu)?; Ok((inp, w)) } fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> { d.0.conv2d(&d.1, 0, 1, 1, 1) } const ITERS: usize = 5; } // Conv2d example as used in stable-diffusion, im2col implementation. struct Conv2dIm2Col; impl Benchmark for Conv2dIm2Col { type PreProcessData = (Tensor, Tensor); type RunResult = Tensor; fn preprocess() -> Result<Self::PreProcessData> { let inp = Tensor::randn(0f32, 1., (2, 320, 96, 96), &Device::Cpu)?; let w = Tensor::randn(0f32, 1., (320, 320, 3, 3), &Device::Cpu)?; Ok((inp, w)) } fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> { // d.0.conv2d(&d.1, 0, 1, 1, 1) let (b, _, h, w) = d.0.dims4()?; let (_, _, h_k, w_k) = d.1.dims4()?; let op = Im2Col { h_k, w_k, stride: 1, dilation: 1, padding: 0, }; let (h_out, w_out) = op.hw_out(h, w); let col = d.0.apply_op1_no_bwd(&op)?; let res = col.matmul(&d.1.flatten_from(1)?.t()?)?; let res = res .reshape((b, h_out, w_out, ()))? .permute((0, 3, 1, 2))? .contiguous()?; if CHECK_CONV2D { let res2 = d.0.conv2d(&d.1, op.padding, op.stride, op.dilation, 1); let diff = (&res - res2)?.sqr()?.mean_all()?; println!("{diff}"); } Ok(res) } const ITERS: usize = 5; } struct MatMul; impl Benchmark for MatMul { type PreProcessData = (Tensor, Tensor); type RunResult = Tensor; fn preprocess() -> Result<Self::PreProcessData> { let lhs = Tensor::randn(0f32, 1., (1024, 1024), &Device::Cpu)?; let rhs = Tensor::randn(0f32, 1., (1024, 1024), &Device::Cpu)?; Ok((lhs, rhs)) } fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> { d.0.matmul(&d.1) } const ITERS: usize = 100; } struct MatVec; impl Benchmark for MatVec { type PreProcessData = (Tensor, Tensor); type RunResult = Tensor; fn preprocess() -> Result<Self::PreProcessData> { let lhs = Tensor::randn(0f32, 1., (1024 * 4, 1024 * 4), &Device::Cpu)?; let rhs = Tensor::randn(0f32, 1., (1024 * 4, 1), &Device::Cpu)?; Ok((lhs, rhs)) } fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> { d.0.matmul(&d.1) } const ITERS: usize = 100; } // This benchmark is similar to: // https://github.com/ggerganov/llama.cpp/blob/master/examples/benchmark/benchmark-matmult.cpp struct QMatMul; impl Benchmark for QMatMul { type PreProcessData = (candle::quantized::QMatMul, Tensor); type RunResult = Tensor; fn preprocess() -> Result<Self::PreProcessData> { let zeros = vec![candle::quantized::k_quants::BlockQ4_0::zeros(); 4096 * 11008 / 32]; let mm = candle::quantized::QTensor::new( candle::quantized::QStorage::Cpu(Box::new(zeros)), (4096, 11008), )?; let mm = candle::quantized::QMatMul::from_qtensor(mm)?; let arg = Tensor::randn(0f32, 1., (128, 11008), &Device::Cpu)?; Ok((mm, arg)) } fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> { d.0.forward(&d.1) } const ITERS: usize = 100; } struct Cat; impl Benchmark for Cat { type PreProcessData = (Tensor, Tensor); type RunResult = Tensor; fn preprocess() -> Result<Self::PreProcessData> { let lhs = Tensor::randn(0f32, 1., (1, 32, 2000, 128), &Device::Cpu)?; let rhs = Tensor::randn(0f32, 1., (1, 32, 1, 128), &Device::Cpu)?; Ok((lhs, rhs)) } fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> { Tensor::cat(&[&d.0, &d.1], 2) } const ITERS: usize = 1000; } struct Softmax; impl Benchmark for Softmax { type PreProcessData = Tensor; type RunResult = Tensor; fn preprocess() -> Result<Self::PreProcessData> { // Typical whisper tiny size. let x = Tensor::randn(0f32, 1., (1, 6, 200, 1500), &Device::Cpu)?; Ok(x) } fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> { candle_nn::ops::softmax(d, D::Minus1) } const ITERS: usize = 100; } struct SoftmaxLastDim; impl Benchmark for SoftmaxLastDim { type PreProcessData = Tensor; type RunResult = Tensor; fn preprocess() -> Result<Self::PreProcessData> { // Typical whisper tiny size. let x = Tensor::randn(0f32, 1., (1, 6, 200, 1500), &Device::Cpu)?; Ok(x) } fn run_one(d: &Self::PreProcessData) -> Result<Self::RunResult> { candle_nn::ops::softmax_last_dim(d) } const ITERS: usize = 100; } fn run<B: Benchmark>(iters: Option<usize>) -> Result<()> { use std::hint::black_box; let iters = iters.unwrap_or(B::ITERS); let d = B::preprocess()?; let start = std::time::Instant::now(); for _iter in 0..iters { let _res = black_box(B::run_one(black_box(&d))?); } println!("{:?}", start.elapsed() / iters as u32); Ok(()) } #[derive(Subcommand, Debug, Clone)] enum Task { Conv1d, Conv2d, Conv2dIm2Col, Matmul, Matvec, Qmatmul, Softmax, SoftmaxLastDim, Cat, } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] pub struct Args { /// The benchmark to be run. #[command(subcommand)] task: Task, #[arg(long)] iters: Option<usize>, } fn main() -> Result<()> { let args = Args::parse(); match args.task { Task::Conv1d => run::<Conv1d>(args.iters)?, Task::Conv2d => run::<Conv2d>(args.iters)?, Task::Conv2dIm2Col => run::<Conv2dIm2Col>(args.iters)?, Task::Matmul => run::<MatMul>(args.iters)?, Task::Matvec => run::<MatVec>(args.iters)?, Task::Softmax => run::<Softmax>(args.iters)?, Task::SoftmaxLastDim => run::<SoftmaxLastDim>(args.iters)?, Task::Qmatmul => run::<QMatMul>(args.iters)?, Task::Cat => run::<Cat>(args.iters)?, } Ok(()) }
candle/candle-nn/examples/cpu_benchmarks.rs/0
{ "file_path": "candle/candle-nn/examples/cpu_benchmarks.rs", "repo_id": "candle", "token_count": 5543 }
30
//! A sequential layer used to chain multiple layers and closures. use candle::{Module, Result, Tensor}; /// A sequential layer combining multiple other layers. pub struct Sequential { layers: Vec<Box<dyn Module>>, } /// Creates a new empty sequential layer. pub fn seq() -> Sequential { Sequential { layers: vec![] } } impl Sequential { /// The number of sub-layers embedded in this layer. pub fn len(&self) -> i64 { self.layers.len() as i64 } /// Returns true if this layer does not have any sub-layer. pub fn is_empty(&self) -> bool { self.layers.is_empty() } } impl Module for Sequential { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = xs.clone(); for layer in self.layers.iter() { xs = layer.forward(&xs)? } Ok(xs) } } impl Sequential { /// Appends a layer after all the current layers. #[allow(clippy::should_implement_trait)] pub fn add<M: Module + 'static>(mut self, layer: M) -> Self { self.layers.push(Box::new(layer)); self } /// Appends a closure after all the current layers. pub fn add_fn<F>(self, f: F) -> Self where F: 'static + Fn(&Tensor) -> Result<Tensor> + Send + Sync, { self.add(super::func(f)) } /// Applies the forward pass and returns the output for each layer. pub fn forward_all(&self, xs: &Tensor) -> Result<Vec<Tensor>> { let mut vec = Vec::with_capacity(self.layers.len()); let mut xs = xs.clone(); for layer in self.layers.iter() { xs = layer.forward(&xs)?; vec.push(xs.clone()) } Ok(vec) } }
candle/candle-nn/src/sequential.rs/0
{ "file_path": "candle/candle-nn/src/sequential.rs", "repo_id": "candle", "token_count": 705 }
31
// // WARNING: This file is automatically generated! Please edit onnx.in.proto. // // SPDX-License-Identifier: Apache-2.0 syntax = "proto3"; package onnx; // Overview // // ONNX is an open specification that is comprised of the following components: // // 1) A definition of an extensible computation graph model. // 2) Definitions of standard data types. // 3) Definitions of built-in operators. // // This document describes the syntax of models and their computation graphs, // as well as the standard data types. Together, they are referred to as the ONNX // Intermediate Representation, or 'IR' for short. // // The normative semantic specification of the ONNX IR is found in docs/IR.md. // Definitions of the built-in neural network operators may be found in docs/Operators.md. // Notes // // Protobuf compatibility // // To simplify framework compatibility, ONNX is defined using the subset of protobuf // that is compatible with both protobuf v2 and v3. This means that we do not use any // protobuf features that are only available in one of the two versions. // // Here are the most notable contortions we have to carry out to work around // these limitations: // // - No 'map' (added protobuf 3.0). We instead represent mappings as lists // of key-value pairs, where order does not matter and duplicates // are not allowed. // Versioning // // ONNX versioning is specified in docs/IR.md and elaborated on in docs/Versioning.md // // To be compatible with both proto2 and proto3, we will use a version number // that is not defined by the default value but an explicit enum number. enum Version { // proto3 requires the first enum value to be zero. // We add this just to appease the compiler. _START_VERSION = 0; // The version field is always serialized and we will use it to store the // version that the graph is generated from. This helps us set up version // control. // For the IR, we are using simple numbers starting with 0x00000001, // which was the version we published on Oct 10, 2017. IR_VERSION_2017_10_10 = 0x0000000000000001; // IR_VERSION 2 published on Oct 30, 2017 // - Added type discriminator to AttributeProto to support proto3 users IR_VERSION_2017_10_30 = 0x0000000000000002; // IR VERSION 3 published on Nov 3, 2017 // - For operator versioning: // - Added new message OperatorSetIdProto // - Added opset_import in ModelProto // - For vendor extensions, added domain in NodeProto IR_VERSION_2017_11_3 = 0x0000000000000003; // IR VERSION 4 published on Jan 22, 2019 // - Relax constraint that initializers should be a subset of graph inputs // - Add type BFLOAT16 IR_VERSION_2019_1_22 = 0x0000000000000004; // IR VERSION 5 published on March 18, 2019 // - Add message TensorAnnotation. // - Add quantization annotation in GraphProto to map tensor with its scale and zero point quantization parameters. IR_VERSION_2019_3_18 = 0x0000000000000005; // IR VERSION 6 published on Sep 19, 2019 // - Add support for sparse tensor constants stored in model. // - Add message SparseTensorProto // - Add sparse initializers IR_VERSION_2019_9_19 = 0x0000000000000006; // IR VERSION 7 published on May 8, 2020 // - Add support to allow function body graph to rely on multiple external opreator sets. // - Add a list to promote inference graph's initializers to global and // mutable variables. Global variables are visible in all graphs of the // stored models. // - Add message TrainingInfoProto to store initialization // method and training algorithm. The execution of TrainingInfoProto // can modify the values of mutable variables. // - Implicitly add inference graph into each TrainingInfoProto's algorithm. IR_VERSION_2020_5_8 = 0x0000000000000007; // IR VERSION 8 published on July 30, 2021 // Introduce TypeProto.SparseTensor // Introduce TypeProto.Optional // Added a list of FunctionProtos local to the model // Deprecated since_version and operator status from FunctionProto IR_VERSION_2021_7_30 = 0x0000000000000008; // IR VERSION 9 published on May 5, 2023 // Added AttributeProto to FunctionProto so that default attribute values can be set. // Added FLOAT8E4M3FN, FLOAT8E4M3FNUZ, FLOAT8E5M2, FLOAT8E5M2FNUZ. IR_VERSION = 0x0000000000000009; } // Attributes // // A named attribute containing either singular float, integer, string, graph, // and tensor values, or repeated float, integer, string, graph, and tensor values. // An AttributeProto MUST contain the name field, and *only one* of the // following content fields, effectively enforcing a C/C++ union equivalent. message AttributeProto { reserved 12, 16 to 19; reserved "v"; // Note: this enum is structurally identical to the OpSchema::AttrType // enum defined in schema.h. If you rev one, you likely need to rev the other. enum AttributeType { UNDEFINED = 0; FLOAT = 1; INT = 2; STRING = 3; TENSOR = 4; GRAPH = 5; SPARSE_TENSOR = 11; TYPE_PROTO = 13; FLOATS = 6; INTS = 7; STRINGS = 8; TENSORS = 9; GRAPHS = 10; SPARSE_TENSORS = 12; TYPE_PROTOS = 14; } // The name field MUST be present for this version of the IR. string name = 1; // namespace Attribute // if ref_attr_name is not empty, ref_attr_name is the attribute name in parent function. // In this case, this AttributeProto does not contain data, and it's a reference of attribute // in parent scope. // NOTE: This should ONLY be used in function (sub-graph). It's invalid to be used in main graph. string ref_attr_name = 21; // A human-readable documentation for this attribute. Markdown is allowed. string doc_string = 13; // The type field MUST be present for this version of the IR. // For 0.0.1 versions of the IR, this field was not defined, and // implementations needed to use has_field heuristics to determine // which value field was in use. For IR_VERSION 0.0.2 or later, this // field MUST be set and match the f|i|s|t|... field in use. This // change was made to accommodate proto3 implementations. AttributeType type = 20; // discriminator that indicates which field below is in use // Exactly ONE of the following fields must be present for this version of the IR float f = 2; // float int64 i = 3; // int bytes s = 4; // UTF-8 string TensorProto t = 5; // tensor value GraphProto g = 6; // graph SparseTensorProto sparse_tensor = 22; // sparse tensor value // Do not use field below, it's deprecated. // optional ValueProto v = 12; // value - subsumes everything but graph TypeProto tp = 14; // type proto repeated float floats = 7; // list of floats repeated int64 ints = 8; // list of ints repeated bytes strings = 9; // list of UTF-8 strings repeated TensorProto tensors = 10; // list of tensors repeated GraphProto graphs = 11; // list of graph repeated SparseTensorProto sparse_tensors = 23; // list of sparse tensors repeated TypeProto type_protos = 15;// list of type protos } // Defines information on value, including the name, the type, and // the shape of the value. message ValueInfoProto { // This field MUST be present in this version of the IR. string name = 1; // namespace Value // This field MUST be present in this version of the IR for // inputs and outputs of the top-level graph. TypeProto type = 2; // A human-readable documentation for this value. Markdown is allowed. string doc_string = 3; } // Nodes // // Computation graphs are made up of a DAG of nodes, which represent what is // commonly called a "layer" or "pipeline stage" in machine learning frameworks. // // For example, it can be a node of type "Conv" that takes in an image, a filter // tensor and a bias tensor, and produces the convolved output. message NodeProto { repeated string input = 1; // namespace Value repeated string output = 2; // namespace Value // An optional identifier for this node in a graph. // This field MAY be absent in ths version of the IR. string name = 3; // namespace Node // The symbolic identifier of the Operator to execute. string op_type = 4; // namespace Operator // The domain of the OperatorSet that specifies the operator named by op_type. string domain = 7; // namespace Domain // Additional named attributes. repeated AttributeProto attribute = 5; // A human-readable documentation for this node. Markdown is allowed. string doc_string = 6; } // Training information // TrainingInfoProto stores information for training a model. // In particular, this defines two functionalities: an initialization-step // and a training-algorithm-step. Initialization resets the model // back to its original state as if no training has been performed. // Training algorithm improves the model based on input data. // // The semantics of the initialization-step is that the initializers // in ModelProto.graph and in TrainingInfoProto.algorithm are first // initialized as specified by the initializers in the graph, and then // updated by the "initialization_binding" in every instance in // ModelProto.training_info. // // The field "algorithm" defines a computation graph which represents a // training algorithm's step. After the execution of a // TrainingInfoProto.algorithm, the initializers specified by "update_binding" // may be immediately updated. If the targeted training algorithm contains // consecutive update steps (such as block coordinate descent methods), // the user needs to create a TrainingInfoProto for each step. message TrainingInfoProto { // This field describes a graph to compute the initial tensors // upon starting the training process. Initialization graph has no input // and can have multiple outputs. Usually, trainable tensors in neural // networks are randomly initialized. To achieve that, for each tensor, // the user can put a random number operator such as RandomNormal or // RandomUniform in TrainingInfoProto.initialization.node and assign its // random output to the specific tensor using "initialization_binding". // This graph can also set the initializers in "algorithm" in the same // TrainingInfoProto; a use case is resetting the number of training // iteration to zero. // // By default, this field is an empty graph and its evaluation does not // produce any output. Thus, no initializer would be changed by default. GraphProto initialization = 1; // This field represents a training algorithm step. Given required inputs, // it computes outputs to update initializers in its own or inference graph's // initializer lists. In general, this field contains loss node, gradient node, // optimizer node, increment of iteration count. // // An execution of the training algorithm step is performed by executing the // graph obtained by combining the inference graph (namely "ModelProto.graph") // and the "algorithm" graph. That is, the actual // input/initializer/output/node/value_info/sparse_initializer list of // the training graph is the concatenation of // "ModelProto.graph.input/initializer/output/node/value_info/sparse_initializer" // and "algorithm.input/initializer/output/node/value_info/sparse_initializer" // in that order. This combined graph must satisfy the normal ONNX conditions. // Now, let's provide a visualization of graph combination for clarity. // Let the inference graph (i.e., "ModelProto.graph") be // tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d // and the "algorithm" graph be // tensor_d -> Add -> tensor_e // The combination process results // tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d -> Add -> tensor_e // // Notice that an input of a node in the "algorithm" graph may reference the // output of a node in the inference graph (but not the other way round). Also, inference // node cannot reference inputs of "algorithm". With these restrictions, inference graph // can always be run independently without training information. // // By default, this field is an empty graph and its evaluation does not // produce any output. Evaluating the default training step never // update any initializers. GraphProto algorithm = 2; // This field specifies the bindings from the outputs of "initialization" to // some initializers in "ModelProto.graph.initializer" and // the "algorithm.initializer" in the same TrainingInfoProto. // See "update_binding" below for details. // // By default, this field is empty and no initializer would be changed // by the execution of "initialization". repeated StringStringEntryProto initialization_binding = 3; // Gradient-based training is usually an iterative procedure. In one gradient // descent iteration, we apply // // x = x - r * g // // where "x" is the optimized tensor, "r" stands for learning rate, and "g" is // gradient of "x" with respect to a chosen loss. To avoid adding assignments // into the training graph, we split the update equation into // // y = x - r * g // x = y // // The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To // tell that "y" should be assigned to "x", the field "update_binding" may // contain a key-value pair of strings, "x" (key of StringStringEntryProto) // and "y" (value of StringStringEntryProto). // For a neural network with multiple trainable (mutable) tensors, there can // be multiple key-value pairs in "update_binding". // // The initializers appears as keys in "update_binding" are considered // mutable variables. This implies some behaviors // as described below. // // 1. We have only unique keys in all "update_binding"s so that two // variables may not have the same name. This ensures that one // variable is assigned up to once. // 2. The keys must appear in names of "ModelProto.graph.initializer" or // "TrainingInfoProto.algorithm.initializer". // 3. The values must be output names of "algorithm" or "ModelProto.graph.output". // 4. Mutable variables are initialized to the value specified by the // corresponding initializer, and then potentially updated by // "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s. // // This field usually contains names of trainable tensors // (in ModelProto.graph), optimizer states such as momentums in advanced // stochastic gradient methods (in TrainingInfoProto.graph), // and number of training iterations (in TrainingInfoProto.graph). // // By default, this field is empty and no initializer would be changed // by the execution of "algorithm". repeated StringStringEntryProto update_binding = 4; } // Models // // ModelProto is a top-level file/container format for bundling a ML model and // associating its computation graph with metadata. // // The semantics of the model are described by the associated GraphProto's. message ModelProto { // The version of the IR this model targets. See Version enum above. // This field MUST be present. int64 ir_version = 1; // The OperatorSets this model relies on. // All ModelProtos MUST have at least one entry that // specifies which version of the ONNX OperatorSet is // being imported. // // All nodes in the ModelProto's graph will bind against the operator // with the same-domain/same-op_type operator with the HIGHEST version // in the referenced operator sets. repeated OperatorSetIdProto opset_import = 8; // The name of the framework or tool used to generate this model. // This field SHOULD be present to indicate which implementation/tool/framework // emitted the model. string producer_name = 2; // The version of the framework or tool used to generate this model. // This field SHOULD be present to indicate which implementation/tool/framework // emitted the model. string producer_version = 3; // Domain name of the model. // We use reverse domain names as name space indicators. For example: // `com.facebook.fair` or `com.microsoft.cognitiveservices` // // Together with `model_version` and GraphProto.name, this forms the unique identity of // the graph. string domain = 4; // The version of the graph encoded. See Version enum below. int64 model_version = 5; // A human-readable documentation for this model. Markdown is allowed. string doc_string = 6; // The parameterized graph that is evaluated to execute the model. GraphProto graph = 7; // Named metadata values; keys should be distinct. repeated StringStringEntryProto metadata_props = 14; // Training-specific information. Sequentially executing all stored // `TrainingInfoProto.algorithm`s and assigning their outputs following // the corresponding `TrainingInfoProto.update_binding`s is one training // iteration. Similarly, to initialize the model // (as if training hasn't happened), the user should sequentially execute // all stored `TrainingInfoProto.initialization`s and assigns their outputs // using `TrainingInfoProto.initialization_binding`s. // // If this field is empty, the training behavior of the model is undefined. repeated TrainingInfoProto training_info = 20; // A list of function protos local to the model. // // Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain". // In case of any conflicts the behavior (whether the model local functions are given higher priority, // or standard operator sets are given higher priotity or this is treated as error) is defined by // the runtimes. // // The operator sets imported by FunctionProto should be compatible with the ones // imported by ModelProto and other model local FunctionProtos. // Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto // or by 2 FunctionProtos then versions for the operator set may be different but, // the operator schema returned for op_type, domain, version combination // for both the versions should be same for every node in the function body. // // One FunctionProto can reference other FunctionProto in the model, however, recursive reference // is not allowed. repeated FunctionProto functions = 25; }; // StringStringEntryProto follows the pattern for cross-proto-version maps. // See https://developers.google.com/protocol-buffers/docs/proto3#maps message StringStringEntryProto { string key = 1; string value = 2; }; message TensorAnnotation { string tensor_name = 1; // <key, value> pairs to annotate tensor specified by <tensor_name> above. // The keys used in the mapping below must be pre-defined in ONNX spec. // For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as // quantization parameter keys. repeated StringStringEntryProto quant_parameter_tensor_names = 2; } // Graphs // // A graph defines the computational logic of a model and is comprised of a parameterized // list of nodes that form a directed acyclic graph based on their inputs and outputs. // This is the equivalent of the "network" or "graph" in many deep learning // frameworks. message GraphProto { // The nodes in the graph, sorted topologically. repeated NodeProto node = 1; // The name of the graph. string name = 2; // namespace Graph // A list of named tensor values, used to specify constant inputs of the graph. // Each initializer (both TensorProto as well SparseTensorProto) MUST have a name. // The name MUST be unique across both initializer and sparse_initializer, // but the name MAY also appear in the input list. repeated TensorProto initializer = 5; // Initializers (see above) stored in sparse format. repeated SparseTensorProto sparse_initializer = 15; // A human-readable documentation for this graph. Markdown is allowed. string doc_string = 10; // The inputs and outputs of the graph. repeated ValueInfoProto input = 11; repeated ValueInfoProto output = 12; // Information for the values in the graph. The ValueInfoProto.name's // must be distinct. It is optional for a value to appear in value_info list. repeated ValueInfoProto value_info = 13; // This field carries information to indicate the mapping among a tensor and its // quantization parameter tensors. For example: // For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated, // which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model. repeated TensorAnnotation quantization_annotation = 14; reserved 3, 4, 6 to 9; reserved "ir_version", "producer_version", "producer_tag", "domain"; } // Tensors // // A serialized tensor value. message TensorProto { enum DataType { UNDEFINED = 0; // Basic types. FLOAT = 1; // float UINT8 = 2; // uint8_t INT8 = 3; // int8_t UINT16 = 4; // uint16_t INT16 = 5; // int16_t INT32 = 6; // int32_t INT64 = 7; // int64_t STRING = 8; // string BOOL = 9; // bool // IEEE754 half-precision floating-point format (16 bits wide). // This format has 1 sign bit, 5 exponent bits, and 10 mantissa bits. FLOAT16 = 10; DOUBLE = 11; UINT32 = 12; UINT64 = 13; COMPLEX64 = 14; // complex with float32 real and imaginary components COMPLEX128 = 15; // complex with float64 real and imaginary components // Non-IEEE floating-point format based on IEEE754 single-precision // floating-point number truncated to 16 bits. // This format has 1 sign bit, 8 exponent bits, and 7 mantissa bits. BFLOAT16 = 16; // Non-IEEE floating-point format based on papers // FP8 Formats for Deep Learning, https://arxiv.org/abs/2209.05433, // 8-bit Numerical Formats For Deep Neural Networks, https://arxiv.org/pdf/2206.02915.pdf. // Operators supported FP8 are Cast, CastLike, QuantizeLinear, DequantizeLinear. // The computation usually happens inside a block quantize / dequantize // fused by the runtime. FLOAT8E4M3FN = 17; // float 8, mostly used for coefficients, supports nan, not inf FLOAT8E4M3FNUZ = 18; // float 8, mostly used for coefficients, supports nan, not inf, no negative zero FLOAT8E5M2 = 19; // follows IEEE 754, supports nan, inf, mostly used for gradients FLOAT8E5M2FNUZ = 20; // follows IEEE 754, supports nan, inf, mostly used for gradients, no negative zero // Future extensions go here. } // The shape of the tensor. repeated int64 dims = 1; // The data type of the tensor. // This field MUST have a valid TensorProto.DataType value int32 data_type = 2; // For very large tensors, we may want to store them in chunks, in which // case the following fields will specify the segment that is stored in // the current TensorProto. message Segment { int64 begin = 1; int64 end = 2; } Segment segment = 3; // Tensor content must be organized in row-major order. // // Depending on the data_type field, exactly one of the fields below with // name ending in _data is used to store the elements of the tensor. // For float and complex64 values // Complex64 tensors are encoded as a single array of floats, // with the real components appearing in odd numbered positions, // and the corresponding imaginary component appearing in the // subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i] // is encoded as [1.0, 2.0 ,3.0 ,4.0] // When this field is present, the data_type field MUST be FLOAT or COMPLEX64. repeated float float_data = 4 [packed = true]; // For int32, uint8, int8, uint16, int16, bool, float8, and float16 values // float16 and float8 values must be bit-wise converted to an uint16_t prior // to writing to the buffer. // When this field is present, the data_type field MUST be // INT32, INT16, INT8, UINT16, UINT8, BOOL, FLOAT16, BFLOAT16, FLOAT8E4M3FN, FLOAT8E4M3FNUZ, FLOAT8E5M2, FLOAT8E5M2FNUZ repeated int32 int32_data = 5 [packed = true]; // For strings. // Each element of string_data is a UTF-8 encoded Unicode // string. No trailing null, no leading BOM. The protobuf "string" // scalar type is not used to match ML community conventions. // When this field is present, the data_type field MUST be STRING repeated bytes string_data = 6; // For int64. // When this field is present, the data_type field MUST be INT64 repeated int64 int64_data = 7 [packed = true]; // Optionally, a name for the tensor. string name = 8; // namespace Value // A human-readable documentation for this tensor. Markdown is allowed. string doc_string = 12; // Serializations can either use one of the fields above, or use this // raw bytes field. The only exception is the string case, where one is // required to store the content in the repeated bytes string_data field. // // When this raw_data field is used to store tensor value, elements MUST // be stored in as fixed-width, little-endian order. // Floating-point data types MUST be stored in IEEE 754 format. // Complex64 elements must be written as two consecutive FLOAT values, real component first. // Complex128 elements must be written as two consecutive DOUBLE values, real component first. // Boolean type MUST be written one byte per tensor element (00000001 for true, 00000000 for false). // // Note: the advantage of specific field rather than the raw_data field is // that in some cases (e.g. int data), protobuf does a better packing via // variable length storage, and may lead to smaller binary footprint. // When this field is present, the data_type field MUST NOT be STRING or UNDEFINED bytes raw_data = 9; // Data can be stored inside the protobuf file using type-specific fields or raw_data. // Alternatively, raw bytes data can be stored in an external file, using the external_data field. // external_data stores key-value pairs describing data location. Recognized keys are: // - "location" (required) - POSIX filesystem path relative to the directory where the ONNX // protobuf model was stored // - "offset" (optional) - position of byte at which stored data begins. Integer stored as string. // Offset values SHOULD be multiples 4096 (page size) to enable mmap support. // - "length" (optional) - number of bytes containing data. Integer stored as string. // - "checksum" (optional) - SHA1 digest of file specified in under 'location' key. repeated StringStringEntryProto external_data = 13; // Location of the data for this tensor. MUST be one of: // - DEFAULT - data stored inside the protobuf message. Data is stored in raw_data (if set) otherwise in type-specified field. // - EXTERNAL - data stored in an external location as described by external_data field. enum DataLocation { DEFAULT = 0; EXTERNAL = 1; } // If value not set, data is stored in raw_data (if set) otherwise in type-specified field. DataLocation data_location = 14; // For double // Complex128 tensors are encoded as a single array of doubles, // with the real components appearing in odd numbered positions, // and the corresponding imaginary component appearing in the // subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i] // is encoded as [1.0, 2.0 ,3.0 ,4.0] // When this field is present, the data_type field MUST be DOUBLE or COMPLEX128 repeated double double_data = 10 [packed = true]; // For uint64 and uint32 values // When this field is present, the data_type field MUST be // UINT32 or UINT64 repeated uint64 uint64_data = 11 [packed = true]; } // A serialized sparse-tensor value message SparseTensorProto { // The sequence of non-default values are encoded as a tensor of shape [NNZ]. // The default-value is zero for numeric tensors, and empty-string for string tensors. // values must have a non-empty name present which serves as a name for SparseTensorProto // when used in sparse_initializer list. TensorProto values = 1; // The indices of the non-default values, which may be stored in one of two formats. // (a) Indices can be a tensor of shape [NNZ, rank] with the [i,j]-th value // corresponding to the j-th index of the i-th value (in the values tensor). // (b) Indices can be a tensor of shape [NNZ], in which case the i-th value // must be the linearized-index of the i-th value (in the values tensor). // The linearized-index can be converted into an index tuple (k_1,...,k_rank) // using the shape provided below. // The indices must appear in ascending order without duplication. // In the first format, the ordering is lexicographic-ordering: // e.g., index-value [1,4] must appear before [2,1] TensorProto indices = 2; // The shape of the underlying dense-tensor: [dim_1, dim_2, ... dim_rank] repeated int64 dims = 3; } // Defines a tensor shape. A dimension can be either an integer value // or a symbolic variable. A symbolic variable represents an unknown // dimension. message TensorShapeProto { message Dimension { oneof value { int64 dim_value = 1; string dim_param = 2; // namespace Shape }; // Standard denotation can optionally be used to denote tensor // dimensions with standard semantic descriptions to ensure // that operations are applied to the correct axis of a tensor. // Refer to https://github.com/onnx/onnx/blob/main/docs/DimensionDenotation.md#denotation-definition // for pre-defined dimension denotations. string denotation = 3; }; repeated Dimension dim = 1; } // Types // // The standard ONNX data types. message TypeProto { message Tensor { // This field MUST NOT have the value of UNDEFINED // This field MUST have a valid TensorProto.DataType value // This field MUST be present for this version of the IR. int32 elem_type = 1; TensorShapeProto shape = 2; } // repeated T message Sequence { // The type and optional shape of each element of the sequence. // This field MUST be present for this version of the IR. TypeProto elem_type = 1; }; // map<K,V> message Map { // This field MUST have a valid TensorProto.DataType value // This field MUST be present for this version of the IR. // This field MUST refer to an integral type ([U]INT{8|16|32|64}) or STRING int32 key_type = 1; // This field MUST be present for this version of the IR. TypeProto value_type = 2; }; // wrapper for Tensor, Sequence, or Map message Optional { // The type and optional shape of the element wrapped. // This field MUST be present for this version of the IR. // Possible values correspond to OptionalProto.DataType enum TypeProto elem_type = 1; }; message SparseTensor { // This field MUST NOT have the value of UNDEFINED // This field MUST have a valid TensorProto.DataType value // This field MUST be present for this version of the IR. int32 elem_type = 1; TensorShapeProto shape = 2; } oneof value { // The type of a tensor. Tensor tensor_type = 1; // NOTE: DNN-only implementations of ONNX MAY elect to not support non-tensor values // as input and output to graphs and nodes. These types are needed to naturally // support classical ML operators. DNN operators SHOULD restrict their input // and output types to tensors. // The type of a sequence. Sequence sequence_type = 4; // The type of a map. Map map_type = 5; // The type of an optional. Optional optional_type = 9; // Type of the sparse tensor SparseTensor sparse_tensor_type = 8; } // An optional denotation can be used to denote the whole // type with a standard semantic description as to what is // stored inside. Refer to https://github.com/onnx/onnx/blob/main/docs/TypeDenotation.md#type-denotation-definition // for pre-defined type denotations. string denotation = 6; } // Operator Sets // // OperatorSets are uniquely identified by a (domain, opset_version) pair. message OperatorSetIdProto { // The domain of the operator set being identified. // The empty string ("") or absence of this field implies the operator // set that is defined as part of the ONNX specification. // This field MUST be present in this version of the IR when referring to any other operator set. string domain = 1; // The version of the operator set being identified. // This field MUST be present in this version of the IR. int64 version = 2; } // Operator/function status. enum OperatorStatus { EXPERIMENTAL = 0; STABLE = 1; } message FunctionProto { // The name of the function, similar usage of op_type in OperatorProto. // Combined with FunctionProto.domain, this forms the unique identity of // the FunctionProto. string name = 1; // Deprecated since IR Version 8 // optional int64 since_version = 2; reserved 2; reserved "since_version"; // Deprecated since IR Version 8 // optional OperatorStatus status = 3; reserved 3; reserved "status"; // The inputs and outputs of the function. repeated string input = 4; repeated string output = 5; // The attribute parameters of the function. // It is for function parameters without default values. repeated string attribute = 6; // The attribute protos of the function. // It is for function attributes with default values. // A function attribute shall be represented either as // a string attribute or an AttributeProto, not both. repeated AttributeProto attribute_proto = 11; // The nodes in the function. repeated NodeProto node = 7; // A human-readable documentation for this function. Markdown is allowed. string doc_string = 8; // The OperatorSets this function body (graph) relies on. // // All nodes in the function body (graph) will bind against the operator // with the same-domain/same-op_type operator with the HIGHEST version // in the referenced operator sets. This means at most one version can be relied // for one domain. // // The operator sets imported by FunctionProto should be compatible with the ones // imported by ModelProto. Example, if same operator set say 'A' is imported by FunctionProto // and ModelProto then versions for the operator set may be different but, // the operator schema returned for op_type, domain, version combination // for both the versions should be same. repeated OperatorSetIdProto opset_import = 9; // The domain which this function belongs to. Combined with FunctionProto.name, this forms the unique identity of // the FunctionProto. string domain = 10; } // For using protobuf-lite option optimize_for = LITE_RUNTIME;
candle/candle-onnx/src/onnx.proto3/0
{ "file_path": "candle/candle-onnx/src/onnx.proto3", "repo_id": "candle", "token_count": 10183 }
32
# see https://github.com/pytorch/pytorch/blob/main/torch/nn/modules/container.py from .module import Module from typing import ( Any, Dict, Iterable, Iterator, Mapping, Optional, overload, Tuple, TypeVar, Union, ) from collections import OrderedDict, abc as container_abcs import operator from itertools import chain, islice __all__ = ["Sequential", "ModuleList", "ModuleDict"] T = TypeVar("T", bound=Module) def _addindent(s_: str, numSpaces: int): s = s_.split("\n") # don't do anything for single-line stuff if len(s) == 1: return s_ first = s.pop(0) s = [(numSpaces * " ") + line for line in s] s = "\n".join(s) s = first + "\n" + s return s class Sequential(Module): r"""A sequential container. Modules will be added to it in the order they are passed in the constructor. Alternatively, an ``OrderedDict`` of modules can be passed in. The ``forward()`` method of ``Sequential`` accepts any input and forwards it to the first module it contains. It then "chains" outputs to inputs sequentially for each subsequent module, finally returning the output of the last module. The value a ``Sequential`` provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the ``Sequential`` applies to each of the modules it stores (which are each a registered submodule of the ``Sequential``). What's the difference between a ``Sequential`` and a :class:`candle.nn.ModuleList`? A ``ModuleList`` is exactly what it sounds like--a list for storing ``Module`` s! On the other hand, the layers in a ``Sequential`` are connected in a cascading way. """ _modules: Dict[str, Module] # type: ignore[assignment] @overload def __init__(self, *args: Module) -> None: ... @overload def __init__(self, arg: "OrderedDict[str, Module]") -> None: ... def __init__(self, *args): super().__init__() if len(args) == 1 and isinstance(args[0], OrderedDict): for key, module in args[0].items(): self.add_module(key, module) else: for idx, module in enumerate(args): self.add_module(str(idx), module) def _get_item_by_idx(self, iterator, idx) -> T: """Get the idx-th item of the iterator""" size = len(self) idx = operator.index(idx) if not -size <= idx < size: raise IndexError("index {} is out of range".format(idx)) idx %= size return next(islice(iterator, idx, None)) def __getitem__(self, idx: Union[slice, int]) -> Union["Sequential", T]: if isinstance(idx, slice): return self.__class__(OrderedDict(list(self._modules.items())[idx])) else: return self._get_item_by_idx(self._modules.values(), idx) def __setitem__(self, idx: int, module: Module) -> None: key: str = self._get_item_by_idx(self._modules.keys(), idx) return setattr(self, key, module) def __delitem__(self, idx: Union[slice, int]) -> None: if isinstance(idx, slice): for key in list(self._modules.keys())[idx]: delattr(self, key) else: key = self._get_item_by_idx(self._modules.keys(), idx) delattr(self, key) # To preserve numbering str_indices = [str(i) for i in range(len(self._modules))] self._modules = OrderedDict(list(zip(str_indices, self._modules.values()))) def __len__(self) -> int: return len(self._modules) def __add__(self, other) -> "Sequential": if isinstance(other, Sequential): ret = Sequential() for layer in self: ret.append(layer) for layer in other: ret.append(layer) return ret else: raise ValueError( "add operator supports only objects " "of Sequential class, but {} is given.".format(str(type(other))) ) def pop(self, key: Union[int, slice]) -> Module: v = self[key] del self[key] return v def __iadd__(self, other) -> "Sequential": if isinstance(other, Sequential): offset = len(self) for i, module in enumerate(other): self.add_module(str(i + offset), module) return self else: raise ValueError( "add operator supports only objects " "of Sequential class, but {} is given.".format(str(type(other))) ) def __mul__(self, other: int) -> "Sequential": if not isinstance(other, int): raise TypeError(f"unsupported operand type(s) for *: {type(self)} and {type(other)}") elif other <= 0: raise ValueError(f"Non-positive multiplication factor {other} for {type(self)}") else: combined = Sequential() offset = 0 for _ in range(other): for module in self: combined.add_module(str(offset), module) offset += 1 return combined def __rmul__(self, other: int) -> "Sequential": return self.__mul__(other) def __imul__(self, other: int) -> "Sequential": if not isinstance(other, int): raise TypeError(f"unsupported operand type(s) for *: {type(self)} and {type(other)}") elif other <= 0: raise ValueError(f"Non-positive multiplication factor {other} for {type(self)}") else: len_original = len(self) offset = len(self) for _ in range(other - 1): for i in range(len_original): self.add_module(str(i + offset), self._modules[str(i)]) offset += len_original return self def __dir__(self): keys = super().__dir__() keys = [key for key in keys if not key.isdigit()] return keys def __iter__(self) -> Iterator[Module]: return iter(self._modules.values()) # NB: We can't really type check this function as the type of input # may change dynamically (as is tested in # TestScript.test_sequential_intermediary_types). Cannot annotate # with Any as TorchScript expects a more precise type def forward(self, input): for module in self: input = module(input) return input def append(self, module: Module) -> "Sequential": r"""Appends a given module to the end. Args: module (nn.Module): module to append """ self.add_module(str(len(self)), module) return self def insert(self, index: int, module: Module) -> "Sequential": if not isinstance(module, Module): raise AssertionError("module should be of type: {}".format(Module)) n = len(self._modules) if not (-n <= index <= n): raise IndexError("Index out of range: {}".format(index)) if index < 0: index += n for i in range(n, index, -1): self._modules[str(i)] = self._modules[str(i - 1)] self._modules[str(index)] = module return self def extend(self, sequential) -> "Sequential": for layer in sequential: self.append(layer) return self class ModuleList(Module): r"""Holds submodules in a list. :class:`~candle.nn.ModuleList` can be indexed like a regular Python list, but modules it contains are properly registered, and will be visible by all :class:`~candle.nn.Module` methods. Args: modules (iterable, optional): an iterable of modules to add Example:: class MyModule(nn.Module): def __init__(self): super().__init__() self.linears = nn.ModuleList([nn.Linear(10, 10) for i in range(10)]) def forward(self, x): # ModuleList can act as an iterable, or be indexed using ints for i, l in enumerate(self.linears): x = self.linears[i // 2](x) + l(x) return x """ _modules: Dict[str, Module] # type: ignore[assignment] def __init__(self, modules: Optional[Iterable[Module]] = None) -> None: super().__init__() if modules is not None: self += modules def _get_abs_string_index(self, idx): """Get the absolute index for the list of modules""" idx = operator.index(idx) if not (-len(self) <= idx < len(self)): raise IndexError("index {} is out of range".format(idx)) if idx < 0: idx += len(self) return str(idx) def __getitem__(self, idx: Union[int, slice]) -> Union[Module, "ModuleList"]: if isinstance(idx, slice): return self.__class__(list(self._modules.values())[idx]) else: return self._modules[self._get_abs_string_index(idx)] def __setitem__(self, idx: int, module: Module) -> None: idx = self._get_abs_string_index(idx) return setattr(self, str(idx), module) def __delitem__(self, idx: Union[int, slice]) -> None: if isinstance(idx, slice): for k in range(len(self._modules))[idx]: delattr(self, str(k)) else: delattr(self, self._get_abs_string_index(idx)) # To preserve numbering, self._modules is being reconstructed with modules after deletion str_indices = [str(i) for i in range(len(self._modules))] self._modules = OrderedDict(list(zip(str_indices, self._modules.values()))) def __len__(self) -> int: return len(self._modules) def __iter__(self) -> Iterator[Module]: return iter(self._modules.values()) def __iadd__(self, modules: Iterable[Module]) -> "ModuleList": return self.extend(modules) def __add__(self, other: Iterable[Module]) -> "ModuleList": combined = ModuleList() for i, module in enumerate(chain(self, other)): combined.add_module(str(i), module) return combined def __repr__(self): """A custom repr for ModuleList that compresses repeated module representations""" list_of_reprs = [repr(item) for item in self] if len(list_of_reprs) == 0: return self._get_name() + "()" start_end_indices = [[0, 0]] repeated_blocks = [list_of_reprs[0]] for i, r in enumerate(list_of_reprs[1:], 1): if r == repeated_blocks[-1]: start_end_indices[-1][1] += 1 continue start_end_indices.append([i, i]) repeated_blocks.append(r) lines = [] main_str = self._get_name() + "(" for (start_id, end_id), b in zip(start_end_indices, repeated_blocks): local_repr = f"({start_id}): {b}" # default repr if start_id != end_id: n = end_id - start_id + 1 local_repr = f"({start_id}-{end_id}): {n} x {b}" local_repr = _addindent(local_repr, 2) lines.append(local_repr) main_str += "\n " + "\n ".join(lines) + "\n" main_str += ")" return main_str def __dir__(self): keys = super().__dir__() keys = [key for key in keys if not key.isdigit()] return keys def insert(self, index: int, module: Module) -> None: r"""Insert a given module before a given index in the list. Args: index (int): index to insert. module (nn.Module): module to insert """ for i in range(len(self._modules), index, -1): self._modules[str(i)] = self._modules[str(i - 1)] self._modules[str(index)] = module def append(self, module: Module) -> "ModuleList": r"""Appends a given module to the end of the list. Args: module (nn.Module): module to append """ self.add_module(str(len(self)), module) return self def pop(self, key: Union[int, slice]) -> Module: v = self[key] del self[key] return v def extend(self, modules: Iterable[Module]) -> "ModuleList": r"""Appends modules from a Python iterable to the end of the list. Args: modules (iterable): iterable of modules to append """ if not isinstance(modules, container_abcs.Iterable): raise TypeError( "ModuleList.extend should be called with an " "iterable, but got " + type(modules).__name__ ) offset = len(self) for i, module in enumerate(modules): self.add_module(str(offset + i), module) return self # remove forward altogether to fallback on Module's _forward_unimplemented class ModuleDict(Module): r"""Holds submodules in a dictionary. :class:`~candle.nn.ModuleDict` can be indexed like a regular Python dictionary, but modules it contains are properly registered, and will be visible by all :class:`~candle.nn.Module` methods. :class:`~candle.nn.ModuleDict` is an **ordered** dictionary that respects * the order of insertion, and * in :meth:`~candle.nn.ModuleDict.update`, the order of the merged ``OrderedDict``, ``dict`` (started from Python 3.6) or another :class:`~candle.nn.ModuleDict` (the argument to :meth:`~candle.nn.ModuleDict.update`). Note that :meth:`~candle.nn.ModuleDict.update` with other unordered mapping types (e.g., Python's plain ``dict`` before Python version 3.6) does not preserve the order of the merged mapping. Args: modules (iterable, optional): a mapping (dictionary) of (string: module) or an iterable of key-value pairs of type (string, module) """ _modules: Dict[str, Module] # type: ignore[assignment] def __init__(self, modules: Optional[Mapping[str, Module]] = None) -> None: super().__init__() if modules is not None: self.update(modules) def __getitem__(self, key: str) -> Module: return self._modules[key] def __setitem__(self, key: str, module: Module) -> None: self.add_module(key, module) def __delitem__(self, key: str) -> None: del self._modules[key] def __len__(self) -> int: return len(self._modules) def __iter__(self) -> Iterator[str]: return iter(self._modules) def __contains__(self, key: str) -> bool: return key in self._modules def clear(self) -> None: """Remove all items from the ModuleDict.""" self._modules.clear() def pop(self, key: str) -> Module: r"""Remove key from the ModuleDict and return its module. Args: key (str): key to pop from the ModuleDict """ v = self[key] del self[key] return v def keys(self) -> Iterable[str]: r"""Return an iterable of the ModuleDict keys.""" return self._modules.keys() def items(self) -> Iterable[Tuple[str, Module]]: r"""Return an iterable of the ModuleDict key/value pairs.""" return self._modules.items() def values(self) -> Iterable[Module]: r"""Return an iterable of the ModuleDict values.""" return self._modules.values() def update(self, modules: Mapping[str, Module]) -> None: r"""Update the :class:`~candle.nn.ModuleDict` with the key-value pairs from a mapping or an iterable, overwriting existing keys. .. note:: If :attr:`modules` is an ``OrderedDict``, a :class:`~candle.nn.ModuleDict`, or an iterable of key-value pairs, the order of new elements in it is preserved. Args: modules (iterable): a mapping (dictionary) from string to :class:`~candle.nn.Module`, or an iterable of key-value pairs of type (string, :class:`~candle.nn.Module`) """ if not isinstance(modules, container_abcs.Iterable): raise TypeError( "ModuleDict.update should be called with an " "iterable of key/value pairs, but got " + type(modules).__name__ ) if isinstance(modules, (OrderedDict, ModuleDict, container_abcs.Mapping)): for key, module in modules.items(): self[key] = module else: # modules here can be a list with two items for j, m in enumerate(modules): if not isinstance(m, container_abcs.Iterable): raise TypeError( "ModuleDict update sequence element " "#" + str(j) + " should be Iterable; is" + type(m).__name__ ) if not len(m) == 2: raise ValueError( "ModuleDict update sequence element " "#" + str(j) + " has length " + str(len(m)) + "; 2 is required" ) # modules can be Mapping (what it's typed at), or a list: [(name1, module1), (name2, module2)] # that's too cumbersome to type correctly with overloads, so we add an ignore here self[m[0]] = m[1] # type: ignore[assignment] # remove forward altogether to fallback on Module's _forward_unimplemented
candle/candle-pyo3/py_src/candle/nn/container.py/0
{ "file_path": "candle/candle-pyo3/py_src/candle/nn/container.py", "repo_id": "candle", "token_count": 7602 }
33
use pyo3::exceptions::PyValueError; use pyo3::prelude::*; pub fn wrap_err(err: ::candle::Error) -> PyErr { PyErr::new::<PyValueError, _>(format!("{err:?}")) }
candle/candle-pyo3/src/utils.rs/0
{ "file_path": "candle/candle-pyo3/src/utils.rs", "repo_id": "candle", "token_count": 74 }
34
use candle::{DType, Device, IndexOp, Result, Tensor, D}; use candle_nn::{embedding, linear_b as linear, Embedding, LayerNorm, Linear, Module, VarBuilder}; fn layer_norm(size: usize, eps: f64, vb: VarBuilder) -> Result<LayerNorm> { let weight = vb.get(size, "weight")?; let bias = vb.get(size, "bias")?; Ok(LayerNorm::new(weight, bias, eps)) } fn make_causal_mask(t: usize, device: &Device) -> Result<Tensor> { let mask: Vec<_> = (0..t) .flat_map(|i| (0..t).map(move |j| u8::from(j <= i))) .collect(); let mask = Tensor::from_slice(&mask, (t, t), device)?; Ok(mask) } #[derive(Debug)] pub struct Config { pub vocab_size: usize, // max_position_embeddings aka n_positions pub max_position_embeddings: usize, // num_hidden_layers aka n_layer pub num_hidden_layers: usize, // hidden_size aka n_embd pub hidden_size: usize, pub layer_norm_epsilon: f64, pub n_inner: Option<usize>, // num_attention_heads aka n_head pub num_attention_heads: usize, pub multi_query: bool, pub use_cache: bool, } impl Config { #[allow(dead_code)] pub fn starcoder_1b() -> Self { Self { vocab_size: 49152, max_position_embeddings: 8192, num_hidden_layers: 24, hidden_size: 2048, layer_norm_epsilon: 1e-5, n_inner: Some(8192), num_attention_heads: 16, multi_query: true, use_cache: true, } } #[allow(dead_code)] pub fn starcoder_3b() -> Self { Self { vocab_size: 49152, max_position_embeddings: 8192, num_hidden_layers: 36, hidden_size: 2816, layer_norm_epsilon: 1e-5, n_inner: Some(11264), num_attention_heads: 22, multi_query: true, use_cache: true, } } #[allow(dead_code)] pub fn starcoder_7b() -> Self { Self { vocab_size: 49152, max_position_embeddings: 8192, num_hidden_layers: 42, hidden_size: 4096, layer_norm_epsilon: 1e-5, n_inner: Some(16384), num_attention_heads: 32, multi_query: true, use_cache: true, } } #[allow(dead_code)] pub fn starcoder() -> Self { Self { vocab_size: 49152, max_position_embeddings: 8192, num_hidden_layers: 40, hidden_size: 6144, layer_norm_epsilon: 1e-5, n_inner: Some(24576), num_attention_heads: 48, multi_query: true, use_cache: true, } } } struct Attention { c_attn: Linear, c_proj: Linear, kv_cache: Option<Tensor>, use_cache: bool, embed_dim: usize, kv_dim: usize, num_heads: usize, head_dim: usize, multi_query: bool, } impl Attention { pub fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let hidden_size = cfg.hidden_size; let head_dim = hidden_size / cfg.num_attention_heads; let kv_heads = if cfg.multi_query { 1 } else { cfg.num_attention_heads }; let kv_dim = kv_heads * head_dim; let c_attn = linear(hidden_size, hidden_size + 2 * kv_dim, true, vb.pp("c_attn"))?; let c_proj = linear(hidden_size, hidden_size, true, vb.pp("c_proj"))?; Ok(Self { c_proj, c_attn, embed_dim: hidden_size, kv_cache: None, use_cache: cfg.use_cache, kv_dim, head_dim, num_heads: cfg.num_attention_heads, multi_query: cfg.multi_query, }) } fn attn( &self, query: &Tensor, key: &Tensor, value: &Tensor, attention_mask: &Tensor, ) -> Result<Tensor> { if query.dtype() != DType::F32 { // If we start supporting f16 models, we may need the upcasting scaling bits. // https://github.com/huggingface/transformers/blob/a0042379269bea9182c1f87e6b2eee4ba4c8cce8/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py#L133 candle::bail!("upcasting is not supported {:?}", query.dtype()) } let scale_factor = 1f64 / (self.head_dim as f64).sqrt(); let initial_query_shape = query.shape(); let key_len = key.dim(D::Minus1)?; let (query, key, attn_shape, attn_view) = if self.multi_query { let (b_sz, query_len, _) = query.dims3()?; let query = query.reshape((b_sz, query_len * self.num_heads, self.head_dim))?; let attn_shape = (b_sz, query_len, self.num_heads, key_len); let attn_view = (b_sz, query_len * self.num_heads, key_len); (query, key.clone(), attn_shape, attn_view) } else { let (b_sz, _num_heads, query_len, _head_dim) = query.dims4()?; let query = query.reshape((b_sz, query_len * self.num_heads, self.head_dim))?; let key = key.reshape((b_sz * self.num_heads, self.head_dim, key_len))?; let attn_shape = (b_sz, self.num_heads, query_len, key_len); let attn_view = (b_sz * self.num_heads, query_len, key_len); (query, key, attn_shape, attn_view) }; let attn_weights = (query.matmul(&key.contiguous()?)? * scale_factor)?.reshape(attn_shape)?; let attention_mask = attention_mask.broadcast_as(attn_shape)?; let mask_value = Tensor::new(f32::NEG_INFINITY, query.device())?.broadcast_as(attn_shape)?; let attn_weights = attention_mask.where_cond(&attn_weights, &mask_value)?; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; let value = value.contiguous()?; let attn_output = if self.multi_query { attn_weights .reshape(attn_view)? .matmul(&value)? .reshape(initial_query_shape)? } else { attn_weights.matmul(&value)? }; Ok(attn_output) } fn forward(&mut self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> { let qkv = self.c_attn.forward(hidden_states)?; let (query, key_value) = if self.multi_query { let query = qkv.i((.., .., ..self.embed_dim))?; let key_value = qkv.i((.., .., self.embed_dim..self.embed_dim + 2 * self.kv_dim))?; (query, key_value) } else { let mut dims = qkv.dims().to_vec(); dims.pop(); dims.push(self.embed_dim); dims.push(self.head_dim * 3); let qkv = qkv.reshape(dims)?.transpose(1, 2)?; let query = qkv.i((.., .., .., ..self.head_dim))?; let key_value = qkv.i((.., .., .., self.head_dim..3 * self.head_dim))?; (query, key_value) }; let mut key_value = key_value; if self.use_cache { if let Some(kv_cache) = &self.kv_cache { // TODO: we could trim the tensors to MAX_SEQ_LEN so that this would work for // arbitrarily large sizes. key_value = Tensor::cat(&[kv_cache, &key_value], D::Minus2)?.contiguous()?; } self.kv_cache = Some(key_value.clone()) } let key = key_value.narrow(D::Minus1, 0, self.head_dim)?; let value = key_value.narrow(D::Minus1, self.head_dim, self.head_dim)?; let attn_output = self.attn(&query, &key.t()?, &value, attention_mask)?; let attn_output = if self.multi_query { attn_output } else { attn_output .transpose(1, 2)? .reshape(hidden_states.shape())? }; let attn_output = self.c_proj.forward(&attn_output)?; Ok(attn_output) } } struct Mlp { c_fc: Linear, c_proj: Linear, } impl Mlp { fn load(inner_dim: usize, vb: VarBuilder, cfg: &Config) -> Result<Self> { let c_fc = linear(cfg.hidden_size, inner_dim, true, vb.pp("c_fc"))?; let c_proj = linear(inner_dim, cfg.hidden_size, true, vb.pp("c_proj"))?; Ok(Self { c_fc, c_proj }) } fn forward(&mut self, hidden_states: &Tensor) -> Result<Tensor> { let hidden_states = self.c_fc.forward(hidden_states)?.gelu()?; let hidden_states = self.c_proj.forward(&hidden_states)?; Ok(hidden_states) } } // TODO: Add cross-attention? struct Block { ln_1: LayerNorm, attn: Attention, ln_2: LayerNorm, mlp: Mlp, } impl Block { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let hidden_size = cfg.hidden_size; let inner_dim = cfg.n_inner.unwrap_or(4 * hidden_size); let ln_1 = layer_norm(hidden_size, cfg.layer_norm_epsilon, vb.pp("ln_1"))?; let attn = Attention::load(vb.pp("attn"), cfg)?; let ln_2 = layer_norm(hidden_size, cfg.layer_norm_epsilon, vb.pp("ln_2"))?; let mlp = Mlp::load(inner_dim, vb.pp("mlp"), cfg)?; Ok(Self { ln_1, attn, ln_2, mlp, }) } fn forward(&mut self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> { let residual = hidden_states; let hidden_states = self.ln_1.forward(hidden_states)?; let attn_outputs = self.attn.forward(&hidden_states, attention_mask)?; let hidden_states = (&attn_outputs + residual)?; let residual = &hidden_states; let hidden_states = self.ln_2.forward(&hidden_states)?; let hidden_states = self.mlp.forward(&hidden_states)?; let hidden_states = (&hidden_states + residual)?; Ok(hidden_states) } } pub struct GPTBigCode { wte: Embedding, wpe: Embedding, blocks: Vec<Block>, ln_f: LayerNorm, lm_head: Linear, bias: Tensor, config: Config, } impl GPTBigCode { pub fn config(&self) -> &Config { &self.config } pub fn load(vb: VarBuilder, cfg: Config) -> Result<Self> { let hidden_size = cfg.hidden_size; let vb_t = vb.pp("transformer"); let wte = embedding(cfg.vocab_size, hidden_size, vb_t.pp("wte"))?; let wpe = embedding(cfg.max_position_embeddings, hidden_size, vb_t.pp("wpe"))?; let blocks = (0..cfg.num_hidden_layers) .map(|i| Block::load(vb_t.pp(&format!("h.{i}")), &cfg)) .collect::<Result<Vec<_>>>()?; let ln_f = layer_norm(hidden_size, cfg.layer_norm_epsilon, vb_t.pp("ln_f"))?; let lm_head = linear(hidden_size, cfg.vocab_size, false, vb_t.pp("wte"))?; let bias = make_causal_mask(cfg.max_position_embeddings, vb.device())?; Ok(Self { wte, wpe, blocks, lm_head, ln_f, bias, config: cfg, }) } pub fn forward(&mut self, input_ids: &Tensor, past_len: usize) -> Result<Tensor> { let dev = input_ids.device(); let (b_sz, seq_len) = input_ids.dims2()?; let key_len = past_len + seq_len; let attention_mask = self.bias.i((past_len..key_len, ..key_len))?.unsqueeze(0)?; // MQA models: (batch_size, query_length, n_heads, key_length) // MHA models: (batch_size, n_heads, query_length, key_length) let seq_len_dim = if self.config.multi_query { 2 } else { 1 }; let attention_mask = attention_mask.unsqueeze(seq_len_dim)?; let position_ids = Tensor::arange(past_len as u32, (past_len + seq_len) as u32, dev)?; let position_ids = position_ids.unsqueeze(0)?.broadcast_as((b_sz, seq_len))?; let input_embeds = self.wte.forward(input_ids)?; let position_embeds = self.wpe.forward(&position_ids)?; let mut hidden_states = (&input_embeds + &position_embeds)?; for block in self.blocks.iter_mut() { hidden_states = block.forward(&hidden_states, &attention_mask)?; } let hidden_states = self.ln_f.forward(&hidden_states)?; let hidden_states = hidden_states .reshape((b_sz, seq_len, self.config.hidden_size))? .narrow(1, seq_len - 1, 1)?; let logits = self.lm_head.forward(&hidden_states)?.squeeze(1)?; Ok(logits) } }
candle/candle-transformers/src/models/bigcode.rs/0
{ "file_path": "candle/candle-transformers/src/models/bigcode.rs", "repo_id": "candle", "token_count": 6280 }
35
use byteorder::{LittleEndian, ReadBytesExt}; use candle::{DType, Device, IndexOp, Result, Shape, Tensor}; use candle_nn::VarBuilder; use super::llama2_c::Config; pub struct TransformerWeights { // token embedding table token_embedding_table: Tensor, // (vocab_size, dim) // weights for rmsnorms rms_att_weight: Tensor, // (layer, dim) rmsnorm weights rms_ffn_weight: Tensor, // (layer, dim) // weights for matmuls wq: Tensor, // (layer, dim, dim) wk: Tensor, // (layer, dim, dim) wv: Tensor, // (layer, dim, dim) wo: Tensor, // (layer, dim, dim) // weights for ffn w1: Tensor, // (layer, hidden_dim, dim) w2: Tensor, // (layer, dim, hidden_dim) w3: Tensor, // (layer, hidden_dim, dim) // final rmsnorm rms_final_weight: Tensor, // (dim,) // freq_cis for RoPE relatively positional embeddings freq_cis_real: Tensor, // (seq_len, head_size/2) freq_cis_imag: Tensor, // (seq_len, head_size/2) } fn read_i32<R: std::io::Read>(r: &mut R) -> Result<i32> { let mut buf = [0u8; 4]; r.read_exact(&mut buf)?; Ok(i32::from_le_bytes(buf)) } fn read_tensor<R: std::io::Read, S: Into<Shape>>( r: &mut R, shape: S, dev: &Device, ) -> Result<Tensor> { let shape = shape.into(); let mut data_t = vec![0f32; shape.elem_count()]; r.read_f32_into::<LittleEndian>(&mut data_t)?; let tensor = Tensor::from_vec(data_t, shape, dev)?; Ok(tensor) } impl Config { pub fn from_reader<R: std::io::Read>(r: &mut R) -> Result<Self> { let dim = read_i32(r)? as usize; let hidden_dim = read_i32(r)? as usize; let n_layers = read_i32(r)? as usize; let n_heads = read_i32(r)? as usize; let n_kv_heads = read_i32(r)? as usize; let vocab_size = read_i32(r)? as usize; let seq_len = read_i32(r)? as usize; Ok(Self { dim, hidden_dim, n_layers, n_heads, n_kv_heads, vocab_size, seq_len, norm_eps: 1e-5, }) } pub fn head_size(&self) -> usize { self.dim / self.n_heads } } impl TransformerWeights { pub fn from_reader<R: std::io::Read>(r: &mut R, c: &Config, dev: &Device) -> Result<Self> { let token_embedding_table = read_tensor(r, (c.vocab_size, c.dim), dev)?; let rms_att_weight = read_tensor(r, (c.n_layers, c.dim), dev)?; let wq = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?; let wk = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?; let wv = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?; let wo = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?; let rms_ffn_weight = read_tensor(r, (c.n_layers, c.dim), dev)?; let w1 = read_tensor(r, (c.n_layers, c.hidden_dim, c.dim), dev)?; let w2 = read_tensor(r, (c.n_layers, c.dim, c.hidden_dim), dev)?; let w3 = read_tensor(r, (c.n_layers, c.hidden_dim, c.dim), dev)?; let rms_final_weight = read_tensor(r, c.dim, dev)?; let head_size = c.head_size(); let freq_cis_real = read_tensor(r, (c.seq_len, head_size / 2), dev)?; let freq_cis_imag = read_tensor(r, (c.seq_len, head_size / 2), dev)?; Ok(Self { token_embedding_table, rms_att_weight, wq, wk, wv, wo, rms_ffn_weight, w1, w2, w3, rms_final_weight, freq_cis_real, freq_cis_imag, }) } pub fn var_builder(&self, cfg: &Config, device: &Device) -> Result<VarBuilder<'static>> { // TODO: As of 2023-08-04, gemm is slower than expected when multiplying a matrix of // size (1, k) with the transpose of a matrix of size (k, n) as it ends up transposing the // second matrix back. We detect this case here and as a temporary hack make the weight // matrix column major rather than row major. This ends up speeding up text generation from // 120 token/s to 220 token/s on a Ryzen 2600X. let tr = device.is_cpu() && !candle::utils::has_mkl(); let tr = |x: Tensor| if tr { x.t()?.contiguous()?.t() } else { Ok(x) }; let mut ws = std::collections::HashMap::new(); let mut insert = |name: &str, t: Tensor| { ws.insert(name.to_string(), t); }; insert("rot.freq_cis_real", self.freq_cis_real.clone()); insert("rot.freq_cis_imag", self.freq_cis_imag.clone()); insert( "model.embed_tokens.weight", self.token_embedding_table.clone(), ); insert("lm_head.weight", tr(self.token_embedding_table.clone())?); insert("model.norm.weight", self.rms_final_weight.clone()); for layer in 0..cfg.n_layers { ws.insert( format!("model.layers.{layer}.self_attn.q_proj.weight"), tr(self.wq.i(layer)?)?, ); ws.insert( format!("model.layers.{layer}.self_attn.k_proj.weight"), tr(self.wk.i(layer)?)?, ); ws.insert( format!("model.layers.{layer}.self_attn.v_proj.weight"), tr(self.wv.i(layer)?)?, ); ws.insert( format!("model.layers.{layer}.self_attn.o_proj.weight"), tr(self.wo.i(layer)?)?, ); ws.insert( format!("model.layers.{layer}.mlp.gate_proj.weight"), tr(self.w1.i(layer)?)?, ); ws.insert( format!("model.layers.{layer}.mlp.down_proj.weight"), tr(self.w2.i(layer)?)?, ); ws.insert( format!("model.layers.{layer}.mlp.up_proj.weight"), tr(self.w3.i(layer)?)?, ); ws.insert( format!("model.layers.{layer}.input_layernorm.weight"), self.rms_att_weight.i(layer)?, ); ws.insert( format!("model.layers.{layer}.post_attention_layernorm.weight"), self.rms_ffn_weight.i(layer)?, ); } let vb = VarBuilder::from_tensors(ws, DType::F32, device); Ok(vb) } }
candle/candle-transformers/src/models/llama2_c_weights.rs/0
{ "file_path": "candle/candle-transformers/src/models/llama2_c_weights.rs", "repo_id": "candle", "token_count": 3322 }
36
use crate::quantized_nn::{linear_b, Embedding, Linear, RmsNorm}; pub use crate::quantized_var_builder::VarBuilder; use crate::models::metavoice::repeat_interleave; use candle::{Module, Result, Tensor, D}; pub mod transformer { use super::*; type Config = crate::models::metavoice::transformer::Config; #[derive(Debug, Clone)] struct FeedForward { w1: Linear, w2: Linear, w3: Linear, span: tracing::Span, } impl FeedForward { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let i_size = cfg.intermediate_size(); let w1 = linear_b(cfg.dim, i_size, false, vb.pp("swiglu.w1"))?; let w2 = linear_b(i_size, cfg.dim, false, vb.pp("w2"))?; let w3 = linear_b(cfg.dim, i_size, false, vb.pp("swiglu.w3"))?; Ok(Self { w1, w2, w3, span: tracing::span!(tracing::Level::TRACE, "feed-forward"), }) } } impl Module for FeedForward { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let swiglu = (candle_nn::ops::silu(&xs.apply(&self.w1)?)? * xs.apply(&self.w3))?; swiglu.apply(&self.w2) } } #[derive(Debug, Clone)] struct Attention { wqkv: Linear, wo: Linear, dim: usize, kv_size: usize, n_local_heads: usize, head_dim: usize, n_head: usize, kv_cache: Option<(Tensor, Tensor)>, span: tracing::Span, } impl Attention { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let n_local_heads = cfg.n_local_heads(); let head_dim = cfg.head_dim(); let total_head_dim = (cfg.n_head + 2 * n_local_heads) * head_dim; let wqkv = linear_b(cfg.dim, total_head_dim, false, vb.pp("wqkv"))?; let wo = linear_b(cfg.dim, cfg.dim, false, vb.pp("wo"))?; Ok(Self { wqkv, wo, dim: cfg.dim, kv_size: n_local_heads * head_dim, n_local_heads, head_dim, n_head: cfg.n_head, kv_cache: None, span: tracing::span!(tracing::Level::TRACE, "attention"), }) } fn forward(&mut self, xs: &Tensor, _pos: usize, mask: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (b_sz, seqlen, _) = xs.dims3()?; let qkv = xs.apply(&self.wqkv)?; let q = qkv.narrow(D::Minus1, 0, self.dim)?; let k = qkv.narrow(D::Minus1, self.dim, self.kv_size)?; let v = qkv.narrow(D::Minus1, self.dim + self.kv_size, self.kv_size)?; let q = q .reshape((b_sz, seqlen, self.n_head, self.head_dim))? .transpose(1, 2)? .contiguous()?; let k = k .reshape((b_sz, seqlen, self.n_local_heads, self.head_dim))? .transpose(1, 2)?; let v = v .reshape((b_sz, seqlen, self.n_local_heads, self.head_dim))? .transpose(1, 2)?; let (k, v) = match &self.kv_cache { None => (k, v), Some((prev_k, prev_v)) => { let k = Tensor::cat(&[prev_k, &k], 2)?; let v = Tensor::cat(&[prev_v, &v], 2)?; (k, v) } }; self.kv_cache = Some((k.clone(), v.clone())); let k = repeat_interleave(&k, self.n_head / self.n_local_heads, 1)?; let v = repeat_interleave(&v, self.n_head / self.n_local_heads, 1)?; let scale = 1f64 / f64::sqrt(self.head_dim as f64); let attn_weights = (q.matmul(&k.transpose(2, 3)?)? * scale)?; let attn_weights = attn_weights.broadcast_add(mask)?; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; let attn_output = attn_weights.matmul(&v)?; attn_output .transpose(1, 2)? .reshape((b_sz, seqlen, self.dim))? .apply(&self.wo) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[derive(Debug, Clone)] struct Block { attention: Attention, feed_forward: FeedForward, ffn_norm: RmsNorm, attention_norm: RmsNorm, span: tracing::Span, } impl Block { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let attention = Attention::new(cfg, vb.pp("attention"))?; let feed_forward = FeedForward::new(cfg, vb.pp("feed_forward"))?; let ffn_norm = RmsNorm::new(cfg.dim, cfg.norm_eps, vb.pp("ffn_norm"))?; let attention_norm = RmsNorm::new(cfg.dim, cfg.norm_eps, vb.pp("attention_norm"))?; Ok(Self { attention, feed_forward, ffn_norm, attention_norm, span: tracing::span!(tracing::Level::TRACE, "block"), }) } fn forward(&mut self, xs: &Tensor, pos: usize, mask: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let hs = xs.apply(&self.attention_norm)?; let hs = (xs + self.attention.forward(&hs, pos, mask))?; &hs + hs.apply(&self.ffn_norm)?.apply(&self.feed_forward) } fn clear_kv_cache(&mut self) { self.attention.clear_kv_cache() } } #[derive(Debug, Clone)] pub struct Model { tok_embeddings: Embedding, pos_embeddings: Embedding, speaker_cond_pos: Linear, layers: Vec<Block>, norm: RmsNorm, output: Linear, spk_cond_mask: Tensor, span: tracing::Span, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let tok_embeddings = Embedding::new(cfg.vocab_size, cfg.dim, vb.pp("tok_embeddings"))?; let pos_embeddings = Embedding::new(cfg.block_size, cfg.dim, vb.pp("pos_embeddings"))?; let speaker_cond_pos = linear_b( cfg.speaker_emb_dim, cfg.dim, false, vb.pp("speaker_cond_pos"), )?; let mut layers = Vec::with_capacity(cfg.n_layer); let vb_l = vb.pp("layers"); for layer_idx in 0..cfg.n_layer { let layer = Block::new(cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let norm = RmsNorm::new(cfg.dim, cfg.norm_eps, vb.pp("norm"))?; let output = linear_b(cfg.dim, cfg.vocab_size, false, vb.pp("output"))?; let spk_cond_mask = Tensor::cat( &[ Tensor::ones((1, 1, cfg.dim), candle::DType::F32, vb.device())?, Tensor::zeros((1, 1, cfg.dim), candle::DType::F32, vb.device())?, ], 0, )?; Ok(Self { tok_embeddings, pos_embeddings, speaker_cond_pos, layers, norm, output, spk_cond_mask, span: tracing::span!(tracing::Level::TRACE, "qtransformer"), }) } pub fn clear_kv_cache(&mut self) { for layer in self.layers.iter_mut() { layer.clear_kv_cache() } } pub fn forward(&mut self, xs: &Tensor, spk_emb: &Tensor, pos: usize) -> Result<Tensor> { let _enter = self.span.enter(); let (_b_sz, seqlen) = xs.dims2()?; let mask: Vec<_> = (0..seqlen) .flat_map(|i| (0..seqlen).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. })) .collect(); let mask = Tensor::from_slice(&mask, (1, 1, seqlen, seqlen), xs.device())?; let input_pos = Tensor::arange(pos as u32, (pos + seqlen) as u32, xs.device())?; let tok_embeddings = xs.apply(&self.tok_embeddings)?; let pos_embeddings = input_pos.apply(&self.pos_embeddings)?; let mut xs = tok_embeddings .broadcast_add(&pos_embeddings)? .broadcast_add( &spk_emb .apply(&self.speaker_cond_pos)? .broadcast_mul(&self.spk_cond_mask)?, )?; let mask = mask.to_dtype(xs.dtype())?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, pos, &mask)? } xs.narrow(1, seqlen - 1, 1)? .apply(&self.norm)? .apply(&self.output) } } }
candle/candle-transformers/src/models/quantized_metavoice.rs/0
{ "file_path": "candle/candle-transformers/src/models/quantized_metavoice.rs", "repo_id": "candle", "token_count": 5029 }
37
pub use crate::models::with_tracing::Linear; use candle::{Result, Tensor}; use candle_nn::{Module, VarBuilder}; pub mod image_encoder; pub mod mask_decoder; pub mod prompt_encoder; pub mod sam; pub mod tiny_vit; pub mod transformer; pub fn linear(vb: VarBuilder, in_dim: usize, out_dim: usize, bias: bool) -> Result<Linear> { if bias { crate::models::with_tracing::linear(in_dim, out_dim, vb) } else { crate::models::with_tracing::linear_no_bias(in_dim, out_dim, vb) } } #[derive(Debug)] pub struct LayerNorm2d { weight: Tensor, bias: Tensor, num_channels: usize, eps: f64, } impl LayerNorm2d { pub fn new(num_channels: usize, eps: f64, vb: VarBuilder) -> Result<Self> { let weight = vb.get(num_channels, "weight")?; let bias = vb.get(num_channels, "bias")?; Ok(Self { weight, bias, num_channels, eps, }) } } impl Module for LayerNorm2d { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let u = xs.mean_keepdim(1)?; let xs = xs.broadcast_sub(&u)?; let s = xs.sqr()?.mean_keepdim(1)?; let xs = xs.broadcast_div(&(s + self.eps)?.sqrt()?)?; xs.broadcast_mul(&self.weight.reshape((1, self.num_channels, 1, 1))?)? .broadcast_add(&self.bias.reshape((1, self.num_channels, 1, 1))?) } } #[derive(Debug)] pub struct MlpBlock { lin1: Linear, lin2: Linear, activation: candle_nn::Activation, span: tracing::Span, } impl MlpBlock { pub fn new( embedding_dim: usize, mlp_dim: usize, activation: candle_nn::Activation, vb: VarBuilder, ) -> Result<Self> { let lin1 = linear(vb.pp("lin1"), embedding_dim, mlp_dim, true)?; let lin2 = linear(vb.pp("lin2"), mlp_dim, embedding_dim, true)?; let span = tracing::span!(tracing::Level::TRACE, "mlp-block"); Ok(Self { lin1, lin2, activation, span, }) } } impl Module for MlpBlock { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); xs.apply(&self.lin1)? .apply(&self.activation)? .apply(&self.lin2) } }
candle/candle-transformers/src/models/segment_anything/mod.rs/0
{ "file_path": "candle/candle-transformers/src/models/segment_anything/mod.rs", "repo_id": "candle", "token_count": 1119 }
38
use candle::{Device, Result, Tensor}; pub fn linspace(start: f64, stop: f64, steps: usize) -> Result<Tensor> { if steps == 0 { Tensor::from_vec(Vec::<f64>::new(), steps, &Device::Cpu) } else if steps == 1 { Tensor::from_vec(vec![start], steps, &Device::Cpu) } else { let delta = (stop - start) / (steps - 1) as f64; let vs = (0..steps) .map(|step| start + step as f64 * delta) .collect::<Vec<_>>(); Tensor::from_vec(vs, steps, &Device::Cpu) } } /// A linear interpolator for a sorted array of x and y values. struct LinearInterpolator<'x, 'y> { xp: &'x [f64], fp: &'y [f64], cache: usize, } impl<'x, 'y> LinearInterpolator<'x, 'y> { fn accel_find(&mut self, x: f64) -> usize { let xidx = self.cache; if x < self.xp[xidx] { self.cache = self.xp[0..xidx].partition_point(|o| *o < x); self.cache = self.cache.saturating_sub(1); } else if x >= self.xp[xidx + 1] { self.cache = self.xp[xidx..self.xp.len()].partition_point(|o| *o < x) + xidx; self.cache = self.cache.saturating_sub(1); } self.cache } fn eval(&mut self, x: f64) -> f64 { if x < self.xp[0] || x > self.xp[self.xp.len() - 1] { return f64::NAN; } let idx = self.accel_find(x); let x_l = self.xp[idx]; let x_h = self.xp[idx + 1]; let y_l = self.fp[idx]; let y_h = self.fp[idx + 1]; let dx = x_h - x_l; if dx > 0.0 { y_l + (x - x_l) / dx * (y_h - y_l) } else { f64::NAN } } } pub fn interp(x: &[f64], xp: &[f64], fp: &[f64]) -> Vec<f64> { let mut interpolator = LinearInterpolator { xp, fp, cache: 0 }; x.iter().map(|&x| interpolator.eval(x)).collect() }
candle/candle-transformers/src/models/stable_diffusion/utils.rs/0
{ "file_path": "candle/candle-transformers/src/models/stable_diffusion/utils.rs", "repo_id": "candle", "token_count": 979 }
39
use super::common::{AttnBlock, GlobalResponseNorm, LayerNormNoWeights, TimestepBlock, WLayerNorm}; use candle::{DType, Module, Result, Tensor, D}; use candle_nn::VarBuilder; #[derive(Debug)] pub struct ResBlockStageB { depthwise: candle_nn::Conv2d, norm: WLayerNorm, channelwise_lin1: candle_nn::Linear, channelwise_grn: GlobalResponseNorm, channelwise_lin2: candle_nn::Linear, } impl ResBlockStageB { pub fn new(c: usize, c_skip: usize, ksize: usize, vb: VarBuilder) -> Result<Self> { let cfg = candle_nn::Conv2dConfig { groups: c, padding: ksize / 2, ..Default::default() }; let depthwise = candle_nn::conv2d(c, c, ksize, cfg, vb.pp("depthwise"))?; let norm = WLayerNorm::new(c)?; let channelwise_lin1 = candle_nn::linear(c + c_skip, c * 4, vb.pp("channelwise.0"))?; let channelwise_grn = GlobalResponseNorm::new(4 * c, vb.pp("channelwise.2"))?; let channelwise_lin2 = candle_nn::linear(c * 4, c, vb.pp("channelwise.4"))?; Ok(Self { depthwise, norm, channelwise_lin1, channelwise_grn, channelwise_lin2, }) } pub fn forward(&self, xs: &Tensor, x_skip: Option<&Tensor>) -> Result<Tensor> { let x_res = xs; let xs = xs.apply(&self.depthwise)?.apply(&self.norm)?; let xs = match x_skip { None => xs.clone(), Some(x_skip) => Tensor::cat(&[&xs, x_skip], 1)?, }; let xs = xs .permute((0, 2, 3, 1))? .contiguous()? .apply(&self.channelwise_lin1)? .gelu()? .apply(&self.channelwise_grn)? .apply(&self.channelwise_lin2)? .permute((0, 3, 1, 2))?; xs + x_res } } #[derive(Debug)] struct SubBlock { res_block: ResBlockStageB, ts_block: TimestepBlock, attn_block: Option<AttnBlock>, } #[derive(Debug)] struct DownBlock { layer_norm: Option<WLayerNorm>, conv: Option<candle_nn::Conv2d>, sub_blocks: Vec<SubBlock>, } #[derive(Debug)] struct UpBlock { sub_blocks: Vec<SubBlock>, layer_norm: Option<WLayerNorm>, conv: Option<candle_nn::ConvTranspose2d>, } #[derive(Debug)] pub struct WDiffNeXt { clip_mapper: candle_nn::Linear, effnet_mappers: Vec<Option<candle_nn::Conv2d>>, seq_norm: LayerNormNoWeights, embedding_conv: candle_nn::Conv2d, embedding_ln: WLayerNorm, down_blocks: Vec<DownBlock>, up_blocks: Vec<UpBlock>, clf_ln: WLayerNorm, clf_conv: candle_nn::Conv2d, c_r: usize, patch_size: usize, } impl WDiffNeXt { #[allow(clippy::too_many_arguments)] pub fn new( c_in: usize, c_out: usize, c_r: usize, c_cond: usize, clip_embd: usize, patch_size: usize, use_flash_attn: bool, vb: VarBuilder, ) -> Result<Self> { const C_HIDDEN: [usize; 4] = [320, 640, 1280, 1280]; const BLOCKS: [usize; 4] = [4, 4, 14, 4]; const NHEAD: [usize; 4] = [1, 10, 20, 20]; const INJECT_EFFNET: [bool; 4] = [false, true, true, true]; const EFFNET_EMBD: usize = 16; let clip_mapper = candle_nn::linear(clip_embd, c_cond, vb.pp("clip_mapper"))?; let mut effnet_mappers = Vec::with_capacity(2 * INJECT_EFFNET.len()); let vb_e = vb.pp("effnet_mappers"); for (i, &inject) in INJECT_EFFNET.iter().enumerate() { let c = if inject { Some(candle_nn::conv2d( EFFNET_EMBD, c_cond, 1, Default::default(), vb_e.pp(i), )?) } else { None }; effnet_mappers.push(c) } for (i, &inject) in INJECT_EFFNET.iter().rev().enumerate() { let c = if inject { Some(candle_nn::conv2d( EFFNET_EMBD, c_cond, 1, Default::default(), vb_e.pp(i + INJECT_EFFNET.len()), )?) } else { None }; effnet_mappers.push(c) } let seq_norm = LayerNormNoWeights::new(c_cond)?; let embedding_ln = WLayerNorm::new(C_HIDDEN[0])?; let embedding_conv = candle_nn::conv2d( c_in * patch_size * patch_size, C_HIDDEN[0], 1, Default::default(), vb.pp("embedding.1"), )?; let mut down_blocks = Vec::with_capacity(C_HIDDEN.len()); for (i, &c_hidden) in C_HIDDEN.iter().enumerate() { let vb = vb.pp("down_blocks").pp(i); let (layer_norm, conv, start_layer_i) = if i > 0 { let layer_norm = WLayerNorm::new(C_HIDDEN[i - 1])?; let cfg = candle_nn::Conv2dConfig { stride: 2, ..Default::default() }; let conv = candle_nn::conv2d(C_HIDDEN[i - 1], c_hidden, 2, cfg, vb.pp("0.1"))?; (Some(layer_norm), Some(conv), 1) } else { (None, None, 0) }; let mut sub_blocks = Vec::with_capacity(BLOCKS[i]); let mut layer_i = start_layer_i; for _j in 0..BLOCKS[i] { let c_skip = if INJECT_EFFNET[i] { c_cond } else { 0 }; let res_block = ResBlockStageB::new(c_hidden, c_skip, 3, vb.pp(layer_i))?; layer_i += 1; let ts_block = TimestepBlock::new(c_hidden, c_r, vb.pp(layer_i))?; layer_i += 1; let attn_block = if i == 0 { None } else { let attn_block = AttnBlock::new( c_hidden, c_cond, NHEAD[i], true, use_flash_attn, vb.pp(layer_i), )?; layer_i += 1; Some(attn_block) }; let sub_block = SubBlock { res_block, ts_block, attn_block, }; sub_blocks.push(sub_block) } let down_block = DownBlock { layer_norm, conv, sub_blocks, }; down_blocks.push(down_block) } let mut up_blocks = Vec::with_capacity(C_HIDDEN.len()); for (i, &c_hidden) in C_HIDDEN.iter().enumerate().rev() { let vb = vb.pp("up_blocks").pp(C_HIDDEN.len() - 1 - i); let mut sub_blocks = Vec::with_capacity(BLOCKS[i]); let mut layer_i = 0; for j in 0..BLOCKS[i] { let c_skip = if INJECT_EFFNET[i] { c_cond } else { 0 }; let c_skip_res = if i < BLOCKS.len() - 1 && j == 0 { c_hidden + c_skip } else { c_skip }; let res_block = ResBlockStageB::new(c_hidden, c_skip_res, 3, vb.pp(layer_i))?; layer_i += 1; let ts_block = TimestepBlock::new(c_hidden, c_r, vb.pp(layer_i))?; layer_i += 1; let attn_block = if i == 0 { None } else { let attn_block = AttnBlock::new( c_hidden, c_cond, NHEAD[i], true, use_flash_attn, vb.pp(layer_i), )?; layer_i += 1; Some(attn_block) }; let sub_block = SubBlock { res_block, ts_block, attn_block, }; sub_blocks.push(sub_block) } let (layer_norm, conv) = if i > 0 { let layer_norm = WLayerNorm::new(C_HIDDEN[i - 1])?; let cfg = candle_nn::ConvTranspose2dConfig { stride: 2, ..Default::default() }; let conv = candle_nn::conv_transpose2d( c_hidden, C_HIDDEN[i - 1], 2, cfg, vb.pp(layer_i).pp(1), )?; (Some(layer_norm), Some(conv)) } else { (None, None) }; let up_block = UpBlock { layer_norm, conv, sub_blocks, }; up_blocks.push(up_block) } let clf_ln = WLayerNorm::new(C_HIDDEN[0])?; let clf_conv = candle_nn::conv2d( C_HIDDEN[0], 2 * c_out * patch_size * patch_size, 1, Default::default(), vb.pp("clf.1"), )?; Ok(Self { clip_mapper, effnet_mappers, seq_norm, embedding_conv, embedding_ln, down_blocks, up_blocks, clf_ln, clf_conv, c_r, patch_size, }) } fn gen_r_embedding(&self, r: &Tensor) -> Result<Tensor> { const MAX_POSITIONS: usize = 10000; let r = (r * MAX_POSITIONS as f64)?; let half_dim = self.c_r / 2; let emb = (MAX_POSITIONS as f64).ln() / (half_dim - 1) as f64; let emb = (Tensor::arange(0u32, half_dim as u32, r.device())?.to_dtype(DType::F32)? * -emb)? .exp()?; let emb = r.unsqueeze(1)?.broadcast_mul(&emb.unsqueeze(0)?)?; let emb = Tensor::cat(&[emb.sin()?, emb.cos()?], 1)?; let emb = if self.c_r % 2 == 1 { emb.pad_with_zeros(D::Minus1, 0, 1)? } else { emb }; emb.to_dtype(r.dtype()) } fn gen_c_embeddings(&self, clip: &Tensor) -> Result<Tensor> { clip.apply(&self.clip_mapper)?.apply(&self.seq_norm) } pub fn forward( &self, xs: &Tensor, r: &Tensor, effnet: &Tensor, clip: Option<&Tensor>, ) -> Result<Tensor> { const EPS: f64 = 1e-3; let r_embed = self.gen_r_embedding(r)?; let clip = match clip { None => None, Some(clip) => Some(self.gen_c_embeddings(clip)?), }; let x_in = xs; let mut xs = xs .apply(&|xs: &_| candle_nn::ops::pixel_unshuffle(xs, self.patch_size))? .apply(&self.embedding_conv)? .apply(&self.embedding_ln)?; let mut level_outputs = Vec::new(); for (i, down_block) in self.down_blocks.iter().enumerate() { if let Some(ln) = &down_block.layer_norm { xs = xs.apply(ln)? } if let Some(conv) = &down_block.conv { xs = xs.apply(conv)? } let skip = match &self.effnet_mappers[i] { None => None, Some(m) => { let effnet = effnet.interpolate2d(xs.dim(D::Minus2)?, xs.dim(D::Minus1)?)?; Some(m.forward(&effnet)?) } }; for block in down_block.sub_blocks.iter() { xs = block.res_block.forward(&xs, skip.as_ref())?; xs = block.ts_block.forward(&xs, &r_embed)?; if let Some(attn_block) = &block.attn_block { xs = attn_block.forward(&xs, clip.as_ref().unwrap())?; } } level_outputs.push(xs.clone()) } level_outputs.reverse(); let mut xs = level_outputs[0].clone(); for (i, up_block) in self.up_blocks.iter().enumerate() { let effnet_c = match &self.effnet_mappers[self.down_blocks.len() + i] { None => None, Some(m) => { let effnet = effnet.interpolate2d(xs.dim(D::Minus2)?, xs.dim(D::Minus1)?)?; Some(m.forward(&effnet)?) } }; for (j, block) in up_block.sub_blocks.iter().enumerate() { let skip = if j == 0 && i > 0 { Some(&level_outputs[i]) } else { None }; let skip = match (skip, effnet_c.as_ref()) { (Some(skip), Some(effnet_c)) => Some(Tensor::cat(&[skip, effnet_c], 1)?), (None, Some(skip)) | (Some(skip), None) => Some(skip.clone()), (None, None) => None, }; xs = block.res_block.forward(&xs, skip.as_ref())?; xs = block.ts_block.forward(&xs, &r_embed)?; if let Some(attn_block) = &block.attn_block { xs = attn_block.forward(&xs, clip.as_ref().unwrap())?; } } if let Some(ln) = &up_block.layer_norm { xs = xs.apply(ln)? } if let Some(conv) = &up_block.conv { xs = xs.apply(conv)? } } let ab = xs .apply(&self.clf_ln)? .apply(&self.clf_conv)? .apply(&|xs: &_| candle_nn::ops::pixel_shuffle(xs, self.patch_size))? .chunk(2, 1)?; let b = ((candle_nn::ops::sigmoid(&ab[1])? * (1. - EPS * 2.))? + EPS)?; (x_in - &ab[0])? / b } }
candle/candle-transformers/src/models/wuerstchen/diffnext.rs/0
{ "file_path": "candle/candle-transformers/src/models/wuerstchen/diffnext.rs", "repo_id": "candle", "token_count": 8148 }
40
<html> <head> <meta content="text/html;charset=utf-8" http-equiv="Content-Type" /> <title>Candle Bert</title> </head> <body></body> </html> <!DOCTYPE html> <html> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <style> @import url("https://fonts.googleapis.com/css2?family=Source+Code+Pro:wght@200;300;400&family=Source+Sans+3:wght@100;200;300;400;500;600;700;800;900&display=swap"); html, body { font-family: "Source Sans 3", sans-serif; } </style> <script src="https://cdn.tailwindcss.com"></script> <script type="module" src="./code.js"></script> <script type="module"> import { hcl } from "https://cdn.skypack.dev/d3-color@3"; import { interpolateReds } from "https://cdn.skypack.dev/d3-scale-chromatic@3"; import { scaleLinear } from "https://cdn.skypack.dev/d3-scale@4"; import { getModelInfo, getEmbeddings, getWikiText, cosineSimilarity, } from "./utils.js"; const bertWorker = new Worker("./bertWorker.js", { type: "module", }); const inputContainerEL = document.querySelector("#input-container"); const textAreaEl = document.querySelector("#input-area"); const outputAreaEl = document.querySelector("#output-area"); const formEl = document.querySelector("#form"); const searchInputEl = document.querySelector("#search-input"); const formWikiEl = document.querySelector("#form-wiki"); const searchWikiEl = document.querySelector("#search-wiki"); const outputStatusEl = document.querySelector("#output-status"); const modelSelectEl = document.querySelector("#model"); const sentencesRegex = /(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<![A-Z]\.)(?<=\.|\?)\s/gm; let sentenceEmbeddings = []; let currInputText = ""; let isCalculating = false; function toggleTextArea(state) { if (state) { textAreaEl.hidden = false; textAreaEl.focus(); } else { textAreaEl.hidden = true; } } inputContainerEL.addEventListener("focus", (e) => { toggleTextArea(true); }); textAreaEl.addEventListener("blur", (e) => { toggleTextArea(false); }); textAreaEl.addEventListener("focusout", (e) => { toggleTextArea(false); if (currInputText === textAreaEl.value || isCalculating) return; populateOutputArea(textAreaEl.value); calculateEmbeddings(textAreaEl.value); }); modelSelectEl.addEventListener("change", (e) => { if (currInputText === "" || isCalculating) return; populateOutputArea(textAreaEl.value); calculateEmbeddings(textAreaEl.value); }); function populateOutputArea(text) { currInputText = text; const sentences = text.split(sentencesRegex); outputAreaEl.innerHTML = ""; for (const [id, sentence] of sentences.entries()) { const sentenceEl = document.createElement("span"); sentenceEl.id = `sentence-${id}`; sentenceEl.innerText = sentence + " "; outputAreaEl.appendChild(sentenceEl); } } formEl.addEventListener("submit", async (e) => { e.preventDefault(); if (isCalculating || currInputText === "") return; toggleInputs(true); const modelID = modelSelectEl.value; const { modelURL, tokenizerURL, configURL, search_prefix } = getModelInfo(modelID); const text = searchInputEl.value; const query = search_prefix + searchInputEl.value; outputStatusEl.classList.remove("invisible"); outputStatusEl.innerText = "Calculating embeddings for query..."; isCalculating = true; const out = await getEmbeddings( bertWorker, modelURL, tokenizerURL, configURL, modelID, [query] ); outputStatusEl.classList.add("invisible"); const queryEmbeddings = out.output[0]; // calculate cosine similarity with all sentences given the query const distances = sentenceEmbeddings .map((embedding, id) => ({ id, similarity: cosineSimilarity(queryEmbeddings, embedding), })) .sort((a, b) => b.similarity - a.similarity) // getting top 10 most similar sentences .slice(0, 10); const colorScale = scaleLinear() .domain([ distances[distances.length - 1].similarity, distances[0].similarity, ]) .range([0, 1]) .interpolate(() => interpolateReds); outputAreaEl.querySelectorAll("span").forEach((el) => { el.style.color = "unset"; el.style.backgroundColor = "unset"; }); distances.forEach((d) => { const el = outputAreaEl.querySelector(`#sentence-${d.id}`); const color = colorScale(d.similarity); const fontColor = hcl(color).l < 70 ? "white" : "black"; el.style.color = fontColor; el.style.backgroundColor = color; }); outputAreaEl .querySelector(`#sentence-${distances[0].id}`) .scrollIntoView({ behavior: "smooth", block: "center", inline: "nearest", }); isCalculating = false; toggleInputs(false); }); async function calculateEmbeddings(text) { isCalculating = true; toggleInputs(true); const modelID = modelSelectEl.value; const { modelURL, tokenizerURL, configURL, document_prefix } = getModelInfo(modelID); const sentences = text.split(sentencesRegex); const allEmbeddings = []; outputStatusEl.classList.remove("invisible"); for (const [id, sentence] of sentences.entries()) { const query = document_prefix + sentence; outputStatusEl.innerText = `Calculating embeddings: sentence ${ id + 1 } of ${sentences.length}`; const embeddings = await getEmbeddings( bertWorker, modelURL, tokenizerURL, configURL, modelID, [query], updateStatus ); allEmbeddings.push(embeddings); } outputStatusEl.classList.add("invisible"); sentenceEmbeddings = allEmbeddings.map((e) => e.output[0]); isCalculating = false; toggleInputs(false); } function updateStatus(data) { if ("status" in data) { if (data.status === "loading") { outputStatusEl.innerText = data.message; outputStatusEl.classList.remove("invisible"); } } } function toggleInputs(state) { const interactive = document.querySelectorAll(".interactive"); interactive.forEach((el) => { if (state) { el.disabled = true; } else { el.disabled = false; } }); } searchWikiEl.addEventListener("input", () => { searchWikiEl.setCustomValidity(""); }); formWikiEl.addEventListener("submit", async (e) => { e.preventDefault(); if ("example" in e.submitter.dataset) { searchWikiEl.value = e.submitter.innerText; } const text = searchWikiEl.value; if (isCalculating || text === "") return; try { const wikiText = await getWikiText(text); searchWikiEl.setCustomValidity(""); textAreaEl.innerHTML = wikiText; populateOutputArea(wikiText); calculateEmbeddings(wikiText); searchWikiEl.value = ""; } catch { searchWikiEl.setCustomValidity("Invalid Wikipedia article name"); searchWikiEl.reportValidity(); } }); </script> </head> <body class="container max-w-4xl mx-auto p-4"> <main class="grid grid-cols-1 gap-5 relative"> <span class="absolute text-5xl -ml-[1em]"> 🕯️ </span> <div> <h1 class="text-5xl font-bold">Candle BERT</h1> <h2 class="text-2xl font-bold">Rust/WASM Demo</h2> <p class="max-w-lg"> Running sentence embeddings and similarity search in the browser using the Bert Model written with <a href="https://github.com/huggingface/candle/" target="_blank" class="underline hover:text-blue-500 hover:no-underline" >Candle </a> and compiled to Wasm. Embeddings models from are from <a href="https://huggingface.co/sentence-transformers/" target="_blank" class="underline hover:text-blue-500 hover:no-underline" > Sentence Transformers </a> and <a href="https://huggingface.co/intfloat/" target="_blank" class="underline hover:text-blue-500 hover:no-underline" > Liang Wang - e5 Models </a> </p> </div> <div> <label for="model" class="font-medium block">Models Options: </label> <select id="model" class="border-2 border-gray-500 rounded-md font-light interactive disabled:cursor-not-allowed w-full max-w-max" > <option value="intfloat_e5_small_v2" selected> intfloat/e5-small-v2 (133 MB) </option> <option value="intfloat_e5_base_v2"> intfloat/e5-base-v2 (438 MB) </option> <option value="intfloat_multilingual_e5_small"> intfloat/multilingual-e5-small (471 MB) </option> <option value="sentence_transformers_all_MiniLM_L6_v2"> sentence-transformers/all-MiniLM-L6-v2 (90.9 MB) </option> <option value="sentence_transformers_all_MiniLM_L12_v2"> sentence-transformers/all-MiniLM-L12-v2 (133 MB) </option> </select> </div> <div> <h3 class="font-medium">Examples:</h3> <form id="form-wiki" class="flex text-xs rounded-md justify-between w-min gap-3" > <input type="submit" hidden /> <button data-example class="disabled:cursor-not-allowed interactive"> Pizza </button> <button data-example class="disabled:cursor-not-allowed interactive"> Paris </button> <button data-example class="disabled:cursor-not-allowed interactive"> Physics </button> <input type="text" id="search-wiki" title="Search Wikipedia article by title" class="font-light py-0 mx-1 resize-none outline-none w-32 disabled:cursor-not-allowed interactive" placeholder="Load Wikipedia article..." /> <button title="Search Wikipedia article and load into input" class="bg-gray-700 hover:bg-gray-800 text-white font-normal px-2 py-1 rounded disabled:bg-gray-300 disabled:cursor-not-allowed interactive" > Load </button> </form> </div> <form id="form" class="flex text-normal px-1 py-1 border border-gray-700 rounded-md items-center" > <input type="submit" hidden /> <input type="text" id="search-input" class="font-light w-full px-3 py-2 mx-1 resize-none outline-none interactive disabled:cursor-not-allowed" placeholder="Search query here..." /> <button class="bg-gray-700 hover:bg-gray-800 text-white font-normal py-2 w-16 rounded disabled:bg-gray-300 disabled:cursor-not-allowed interactive" > Search </button> </form> <div> <h3 class="font-medium">Input text:</h3> <div class="flex justify-between items-center"> <div class="rounded-md inline text-xs"> <span id="output-status" class="m-auto font-light invisible" >C</span > </div> </div> <div id="input-container" tabindex="0" class="min-h-[250px] bg-slate-100 text-gray-500 rounded-md p-4 flex flex-col gap-2 relative" > <textarea id="input-area" hidden value="" placeholder="Input text to perform semantic similarity search..." class="flex-1 resize-none outline-none left-0 right-0 top-0 bottom-0 m-4 absolute interactive disabled:invisible" ></textarea> <p id="output-area" class="grid-rows-2"> Input text to perform semantic similarity search... </p> </div> </div> </main> </body> </html>
candle/candle-wasm-examples/bert/lib-example.html/0
{ "file_path": "candle/candle-wasm-examples/bert/lib-example.html", "repo_id": "candle", "token_count": 6066 }
41
<html> <head> <meta content="text/html;charset=utf-8" http-equiv="Content-Type" /> <title>Candle Llama.c Rust/WASM</title> </head> <body></body> </html> <!DOCTYPE html> <html> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <style> @import url("https://fonts.googleapis.com/css2?family=Source+Code+Pro:wght@200;300;400&family=Source+Sans+3:wght@100;200;300;400;500;600;700;800;900&display=swap"); html, body { font-family: "Source Sans 3", sans-serif; } code, output, select, pre { font-family: "Source Code Pro", monospace; } </style> <script src="https://cdn.tailwindcss.com"></script> <script type="module"> // base url for audio examples const MODELS_BASE_URL = "https://huggingface.co/karpathy/tinyllamas/resolve/main"; // models base url const MODELS = { stories15M: { url: "stories15M.bin", seq_len: 256, }, stories42M: { url: "stories42M.bin", seq_len: 1024, }, stories110M: { url: "stories110M.bin", seq_len: 1024, }, }; const llamaWorker = new Worker("./llama2cWorker.js", { type: "module", }); async function generateSequence(controller) { const getValue = (id) => document.querySelector(`#${id}`).value; const modelID = getValue("model"); const model = MODELS[modelID]; const weightsURL = `${MODELS_BASE_URL}/${model.url}`; const prompt = getValue("prompt"); const temperature = getValue("temperature"); const topP = getValue("top-p"); const repeatPenalty = getValue("repeat_penalty"); const seed = getValue("seed"); const maxSeqLen = getValue("max-seq"); function updateStatus(data) { const outStatus = document.querySelector("#output-status"); const outGen = document.querySelector("#output-generation"); const outCounter = document.querySelector("#output-counter"); switch (data.status) { case "loading": outStatus.hidden = false; outStatus.textContent = data.message; outGen.hidden = true; outCounter.hidden = true; break; case "generating": const { message, prompt, sentence, tokensSec, totalTime } = data; outStatus.hidden = true; outCounter.hidden = false; outGen.hidden = false; outGen.innerHTML = `<span class="font-semibold">${prompt}</span>${sentence.replace( /\<s\>|\<\/s\>/g, "" )}`; outCounter.innerHTML = `${(totalTime / 1000).toFixed( 2 )}s (${tokensSec.toFixed(2)} tok/s)`; break; case "complete": outStatus.hidden = true; outGen.hidden = false; break; } } return new Promise((resolve, reject) => { llamaWorker.postMessage({ weightsURL, modelID, tokenizerURL: "tokenizer.json", prompt, temp: temperature, top_p: topP, repeatPenalty, seed: BigInt(seed), maxSeqLen, command: "start", }); const handleAbort = () => { llamaWorker.postMessage({ command: "abort" }); }; const handleMessage = (event) => { const { status, error, message, prompt, sentence } = event.data; if (status) updateStatus(event.data); if (error) { llamaWorker.removeEventListener("message", handleMessage); reject(new Error(error)); } if (status === "aborted") { llamaWorker.removeEventListener("message", handleMessage); resolve(event.data); } if (status === "complete") { llamaWorker.removeEventListener("message", handleMessage); resolve(event.data); } }; controller.signal.addEventListener("abort", handleAbort); llamaWorker.addEventListener("message", handleMessage); }); } const form = document.querySelector("#form"); const prompt = document.querySelector("#prompt"); const clearBtn = document.querySelector("#clear-btn"); const runBtn = document.querySelector("#run"); const modelSelect = document.querySelector("#model"); let runController = new AbortController(); let isRunning = false; modelSelect.addEventListener("change", (e) => { const model = MODELS[e.target.value]; document.querySelector("#max-seq").max = model.seq_len; document.querySelector("#max-seq").nextElementSibling.value = model.seq_len; }); form.addEventListener("submit", async (e) => { e.preventDefault(); if (isRunning) { stopRunning(); } else { startRunning(); await generateSequence(runController); stopRunning(); } }); function startRunning() { isRunning = true; runBtn.textContent = "Stop"; } function stopRunning() { runController.abort(); runController = new AbortController(); runBtn.textContent = "Run"; isRunning = false; } clearBtn.addEventListener("click", (e) => { e.preventDefault(); prompt.value = ""; clearBtn.classList.add("invisible"); runBtn.disabled = true; stopRunning(); }); prompt.addEventListener("input", (e) => { runBtn.disabled = false; if (e.target.value.length > 0) { clearBtn.classList.remove("invisible"); } else { clearBtn.classList.add("invisible"); } }); </script> </head> <body class="container max-w-4xl mx-auto p-4 text-gray-800"> <main class="grid grid-cols-1 gap-8 relative"> <span class="absolute text-5xl -ml-[1em]"> 🕯️ </span> <div> <h1 class="text-5xl font-bold">Candle Llama2.c</h1> <h2 class="text-2xl font-bold">Rust/WASM Demo</h2> <p class="max-w-lg"> <a href="https://github.com/karpathy/llama2.c" target="_blank" class="underline hover:text-blue-500 hover:no-underline" target="_blank" >Llama2.c</a > is Andrey Karpathy's C implementation of the Llama 2 LLM model in C. This demo uses <a href="https://github.com/huggingface/candle/" target="_blank" class="underline hover:text-blue-500 hover:no-underline" >Candle </a> to run Llama2.c in the browser using rust/wasm. </p> </div> <div> <label for="model" class="font-medium">Models Options: </label> <select id="model" class="border-2 border-gray-500 rounded-md font-light"> <option value="stories15M" selected>stories 15M (60.8 MB)</option> <option value="stories42M">stories 42M (167 MB)</option> <option value="stories110M">stories 110M (438 MB)</option> </select> </div> <form id="form" class="flex text-normal px-1 py-1 border border-gray-700 rounded-md items-center"> <input type="submit" hidden /> <input type="text" id="prompt" class="font-light w-full px-3 py-2 mx-1 resize-none outline-none" placeholder="Add your prompt here..." value="Once upon a time" /> <button id="clear-btn"> <svg fill="none" xmlns="http://www.w3.org/2000/svg" width="40" viewBox="0 0 70 40"> <path opacity=".5" d="M39 .2v40.2" stroke="#1F2937" /> <path d="M1.5 11.5 19 29.1m0-17.6L1.5 29.1" opacity=".5" stroke="#1F2937" stroke-width="2" /> </svg> </button> <button id="run" class="bg-gray-700 hover:bg-gray-800 text-white font-normal py-2 w-16 rounded disabled:bg-gray-300 disabled:cursor-not-allowed"> Run </button> </form> <details> <summary class="font-medium cursor-pointer">Advanced Options</summary> <div class="grid grid-cols-3 max-w-md items-center gap-3 py-3"> <label class="text-sm font-medium" for="max-seq" >Maximum length </label> <input type="range" id="max-seq" name="max-seq" min="1" max="256" step="1" value="200" oninput="this.nextElementSibling.value = Number(this.value)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md"> 200</output > <label class="text-sm font-medium" for="temperature" >Temperature</label > <input type="range" id="temperature" name="temperature" min="0" max="2" step="0.01" value="0.40" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md"> 0.40</output > <label class="text-sm font-medium" for="top-p">Top-p</label> <input type="range" id="top-p" name="top-p" min="0" max="1" step="0.01" value="1.00" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md"> 1.00</output > <label class="text-sm font-medium" for="repeat_penalty" >Repeat Penalty</label > <input type="range" id="repeat_penalty" name="repeat_penalty" min="1" max="2" step="0.01" value="1.10" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md" >1.10</output > <label class="text-sm font-medium" for="seed">Seed</label> <input type="number" id="seed" name="seed" value="299792458" class="font-light border border-gray-700 text-right rounded-md p-2" /> <button id="run" onclick="document.querySelector('#seed').value = BigInt(Math.floor(Math.random() * 2**64-1))" class="bg-gray-700 hover:bg-gray-800 text-white font-normal py-1 w-[50px] rounded disabled:bg-gray-300 disabled:cursor-not-allowed text-sm"> Rand </button> </div> </details> <div> <h3 class="font-medium">Generation:</h3> <div class="min-h-[250px] bg-slate-100 text-gray-500 p-4 rounded-md flex flex-col gap-2"> <div id="output-counter" hidden class="ml-auto font-semibold grid-rows-1 text-sm"></div> <p hidden id="output-generation" class="grid-rows-2"></p> <span id="output-status" class="m-auto font-light" >No output yet</span > </div> </div> </main> </body> </html>
candle/candle-wasm-examples/llama2-c/lib-example.html/0
{ "file_path": "candle/candle-wasm-examples/llama2-c/lib-example.html", "repo_id": "candle", "token_count": 6089 }
42
[package] name = "candle-wasm-example-sam" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true [dependencies] candle = { workspace = true } candle-nn = { workspace = true } candle-transformers = { workspace = true } num-traits = { workspace = true } # App crates. anyhow = { workspace = true } byteorder = { workspace = true } getrandom = { version = "0.2", features = ["js"] } image = { workspace = true } log = { workspace = true } safetensors = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } # Wasm specific crates. console_error_panic_hook = "0.1.7" wasm-bindgen = "0.2.87" serde-wasm-bindgen = "0.6.0"
candle/candle-wasm-examples/segment-anything/Cargo.toml/0
{ "file_path": "candle/candle-wasm-examples/segment-anything/Cargo.toml", "repo_id": "candle", "token_count": 264 }
43
export async function extractEmbeddings( worker, weightsURL, tokenizerURL, configURL, modelID, sentences, updateStatus, normalize_embeddings = true ) { return new Promise((resolve, reject) => { worker.postMessage({ weightsURL, tokenizerURL, configURL, modelID, sentences, normalize_embeddings, }); function messageHandler(event) { if ("error" in event.data) { worker.removeEventListener("message", messageHandler); reject(new Error(event.data.error)); } if (event.data.status === "complete") { worker.removeEventListener("message", messageHandler); resolve(event.data); } if (updateStatus) updateStatus(event.data); } worker.addEventListener("message", messageHandler); }); } export async function generateText( worker, weightsURL, tokenizerURL, configURL, modelID, prompt, params, updateStatus ) { return new Promise((resolve, reject) => { worker.postMessage({ weightsURL, tokenizerURL, configURL, modelID, prompt, params, }); function messageHandler(event) { if ("error" in event.data) { worker.removeEventListener("message", messageHandler); reject(new Error(event.data.error)); } if (event.data.status === "complete") { worker.removeEventListener("message", messageHandler); resolve(event.data); } if (updateStatus) updateStatus(event.data); } worker.addEventListener("message", messageHandler); }); } export const MODELS = { t5_small_quantized: { size: "64.4 MB", base_url: "https://huggingface.co/lmz/candle-quantized-t5/resolve/main/", model: "model.gguf", tokenizer: "tokenizer.json", config: "config.json", tasks: { translation_en_to_de: { prefix: "translate English to German: ", max_length: 300, }, translation_en_to_fr: { prefix: "translate English to French: ", max_length: 300, }, translation_en_to_ro: { prefix: "translate English to Romanian: ", max_length: 300, }, summarization: { prefix: "summarize: ", max_length: 200 }, }, }, t5_small: { size: "242 MB", base_url: "https://huggingface.co/t5-small/resolve/main/", model: "model.safetensors", tokenizer: "tokenizer.json", config: "config.json", tasks: { translation_en_to_de: { prefix: "translate English to German: ", max_length: 300, }, translation_en_to_fr: { prefix: "translate English to French: ", max_length: 300, }, translation_en_to_ro: { prefix: "translate English to Romanian: ", max_length: 300, }, summarization: { prefix: "summarize: ", max_length: 200 }, }, }, flan_t5_small: { size: "308 MB", base_url: "https://huggingface.co/google/flan-t5-small/resolve/refs%2Fpr%2F14/", model: "model.safetensors", tokenizer: "tokenizer.json", config: "config.json", tasks: { translation_en_to_de: { prefix: "translate English to German: ", max_length: 300, }, translation_en_to_fr: { prefix: "translate English to French: ", max_length: 300, }, translation_en_to_ro: { prefix: "translate English to Romanian: ", max_length: 300, }, summarization: { prefix: "summarize: ", max_length: 200 }, }, }, flan_t5_base_quantized: { size: "263 MB", base_url: "https://huggingface.co/lmz/candle-quantized-t5/resolve/main/", model: "model-flan-t5-base.gguf", tokenizer: "tokenizer.json", config: "config-flan-t5-base.json", tasks: { translation_en_to_de: { prefix: "translate English to German: ", max_length: 300, }, translation_en_to_fr: { prefix: "translate English to French: ", max_length: 300, }, translation_en_to_ro: { prefix: "translate English to Romanian: ", max_length: 300, }, summarization: { prefix: "summarize: ", max_length: 200 }, }, }, coedit_large_quantized: { size: "643 MB", base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/", model: "model.gguf", tokenizer: "tokenizer.json", config: "config.json", tasks: { fluency: { prefix: "Fix the grammar: ", max_length: 300, }, coherence: { prefix: "Rewrite to make this easier to understand: ", max_length: 300, }, simplification: { prefix: "translate English to Romanian: ", max_length: 300, }, simplification: { prefix: "Paraphrase this: ", max_length: 300, }, formalization: { prefix: "Write this more formally: ", max_length: 300, }, neutralize: { prefix: "Write in a more neutral way: ", max_length: 300, }, }, }, }; export function getModelInfo(id, taskID) { const model = MODELS[id]; return { modelURL: model.base_url + model.model, configURL: model.base_url + model.config, tokenizerURL: model.base_url + model.tokenizer, maxLength: model.tasks[taskID].max_length, }; }
candle/candle-wasm-examples/t5/utils.js/0
{ "file_path": "candle/candle-wasm-examples/t5/utils.js", "repo_id": "candle", "token_count": 2339 }
44
[package] name = "candle-wasm-example-yolo" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true [dependencies] candle = { workspace = true } candle-nn = { workspace = true } num-traits = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } image = { workspace = true } # App crates. anyhow = { workspace = true } byteorder = { workspace = true } log = { workspace = true } rand = { workspace = true } safetensors = { workspace = true } # Wasm specific crates. console_error_panic_hook = "0.1.7" getrandom = { version = "0.2", features = ["js"] } gloo = "0.11" js-sys = "0.3.64" wasm-bindgen = "0.2.87" wasm-bindgen-futures = "0.4.37" wasm-logger = "0.2" yew-agent = "0.2.0" yew = { version = "0.20.0", features = ["csr"] } [dependencies.web-sys] version = "0.3.64" features = [ 'Blob', 'CanvasRenderingContext2d', 'Document', 'Element', 'HtmlElement', 'HtmlCanvasElement', 'HtmlImageElement', 'ImageData', 'Node', 'Window', 'Request', 'RequestCache', 'RequestInit', 'RequestMode', 'Response', 'Performance', 'TextMetrics', ]
candle/candle-wasm-examples/yolo/Cargo.toml/0
{ "file_path": "candle/candle-wasm-examples/yolo/Cargo.toml", "repo_id": "candle", "token_count": 463 }
45
pub fn add(left: usize, right: usize) -> usize { left + right } #[cfg(test)] mod tests { use super::*; #[test] fn it_works() { let result = add(2, 2); assert_eq!(result, 4); } }
candle/candle-wasm-tests/src/lib.rs/0
{ "file_path": "candle/candle-wasm-tests/src/lib.rs", "repo_id": "candle", "token_count": 108 }
46
## Privacy > Last updated: October 4, 2023 Users of HuggingChat are authenticated through their HF user account. By default, your conversations may be shared with the respective models' authors to improve their training data and model over time. Model authors are the custodians of the data collected by their model, even if it's hosted on our platform. If you disable data sharing in your settings, your conversations will not be used for any downstream usage (including for research or model training purposes), and they will only be stored to let you access past conversations. You can click on the Delete icon to delete any past conversation at any moment. 🗓 Please also consult huggingface.co's main privacy policy at <https://huggingface.co/privacy>. To exercise any of your legal privacy rights, please send an email to <[email protected]>. ## About available LLMs The goal of this app is to showcase that it is now possible to build an open source alternative to ChatGPT. 💪 For now (October 2023), it's running: - [Llama 2 70B](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf) - [CodeLlama 35B](https://about.fb.com/news/2023/08/code-llama-ai-for-coding/) - [Falcon 180B](https://www.tii.ae/news/technology-innovation-institute-introduces-worlds-most-powerful-open-llm-falcon-180b) - [Mistral 7B](https://mistral.ai/news/announcing-mistral-7b/) ## Technical details This app is running in a [Space](https://huggingface.co/docs/hub/spaces-overview), which entails that the code for this UI is publicly visible [inside the Space repo](https://huggingface.co/spaces/huggingchat/chat-ui/tree/main). **Further development takes place on the [huggingface/chat-ui GitHub repo](https://github.com/huggingface/chat-ui).** The inference backend is running the optimized [text-generation-inference](https://github.com/huggingface/text-generation-inference) on HuggingFace's Inference API infrastructure. It is therefore possible to deploy a copy of this app to a Space and customize it (swap model, add some UI elements, or store user messages according to your own Terms and conditions). You can also 1-click deploy your own instance using the [Chat UI Spaces Docker template](https://huggingface.co/new-space?template=huggingchat/chat-ui-template). We welcome any feedback on this app: please participate to the public discussion at <https://huggingface.co/spaces/huggingchat/chat-ui/discussions> <a target="_blank" href="https://huggingface.co/spaces/huggingchat/chat-ui/discussions"><img src="https://huggingface.co/datasets/huggingface/badges/raw/main/open-a-discussion-xl.svg" title="open a discussion"></a>
chat-ui/PRIVACY.md/0
{ "file_path": "chat-ui/PRIVACY.md", "repo_id": "chat-ui", "token_count": 762 }
47
import type { EndpointParameters } from "./server/endpoints/endpoints"; import type { BackendModel } from "./server/models"; type buildPromptOptions = Pick<EndpointParameters, "messages" | "preprompt" | "continueMessage"> & { model: BackendModel; }; export async function buildPrompt({ messages, model, preprompt, continueMessage, }: buildPromptOptions): Promise<string> { const filteredMessages = messages.filter((m) => m.from !== "system"); let prompt = model .chatPromptRender({ messages: filteredMessages, preprompt }) // Not super precise, but it's truncated in the model's backend anyway .split(" ") .slice(-(model.parameters?.truncate ?? 0)) .join(" "); if (continueMessage && model.parameters?.stop) { prompt = model.parameters.stop.reduce((acc: string, curr: string) => { if (acc.endsWith(curr)) { return acc.slice(0, acc.length - curr.length); } return acc; }, prompt.trimEnd()); } return prompt; }
chat-ui/src/lib/buildPrompt.ts/0
{ "file_path": "chat-ui/src/lib/buildPrompt.ts", "repo_id": "chat-ui", "token_count": 327 }
48
<script lang="ts"> import CarbonCaretLeft from "~icons/carbon/caret-left"; import CarbonCaretRight from "~icons/carbon/caret-right"; export let href: string; export let direction: "next" | "previous"; export let isDisabled = false; </script> <a class="flex items-center rounded-lg px-2.5 py-1 hover:bg-gray-50 dark:hover:bg-gray-800 {isDisabled ? 'pointer-events-none opacity-50' : ''}" {href} > {#if direction === "previous"} <CarbonCaretLeft classNames="mr-1.5" /> Previous {:else} Next <CarbonCaretRight classNames="ml-1.5" /> {/if} </a>
chat-ui/src/lib/components/PaginationArrow.svelte/0
{ "file_path": "chat-ui/src/lib/components/PaginationArrow.svelte", "repo_id": "chat-ui", "token_count": 226 }
49
<script lang="ts"> import { onDestroy } from "svelte"; import CarbonImage from "~icons/carbon/image"; // import EosIconsLoading from "~icons/eos-icons/loading"; export let files: File[]; let file_error_message = ""; let errorTimeout: ReturnType<typeof setTimeout>; export let onDrag = false; async function dropHandle(event: DragEvent) { event.preventDefault(); if (event.dataTransfer && event.dataTransfer.items) { // Use DataTransferItemList interface to access the file(s) if (files.length > 0) { files = []; } // get only the first file // optionally: we need to handle multiple files, if we want to support document upload for example // for multimodal we only support one image at a time but we could support multiple PDFs if (event.dataTransfer.items[0].kind === "file") { const file = event.dataTransfer.items[0].getAsFile(); if (file) { if (!event.dataTransfer.items[0].type.startsWith("image")) { setErrorMsg("Only images are supported"); files = []; return; } // if image is bigger than 2MB abort if (file.size > 2 * 1024 * 1024) { setErrorMsg("Image is too big. (2MB max)"); files = []; return; } files = [file]; onDrag = false; } } } } function setErrorMsg(errorMsg: string) { if (errorTimeout) { clearTimeout(errorTimeout); } file_error_message = errorMsg; errorTimeout = setTimeout(() => { file_error_message = ""; onDrag = false; }, 2000); } onDestroy(() => { if (errorTimeout) { clearTimeout(errorTimeout); } }); </script> <div id="dropzone" role="form" on:drop={dropHandle} class="relative flex w-full max-w-4xl flex-col items-center rounded-xl border border-dashed bg-gray-100 focus-within:border-gray-300 dark:border-gray-500 dark:bg-gray-700 dark:focus-within:border-gray-500" > <div class="object-center"> {#if file_error_message} <div class="absolute bottom-0 left-0 right-0 top-0 flex flex-col items-center justify-center gap-2 rounded-xl bg-gray-100 bg-opacity-50 dark:bg-gray-700 dark:bg-opacity-50" > <p class="text-red-500 dark:text-red-400">{file_error_message}</p> <div class="h-2.5 w-1/2 rounded-full bg-gray-200 dark:bg-gray-700"> <div class="animate-progress-bar h-2.5 rounded-full bg-red-500 dark:text-red-400 " /> </div> </div> {/if} <div class="mt-3 flex justify-center" class:opacity-0={file_error_message}> <CarbonImage class="text-xl text-gray-500 dark:text-gray-400" /> </div> <p class="mb-3 mt-1.5 text-sm text-gray-500 dark:text-gray-400" class:opacity-0={file_error_message} > Drag and drop <span class="font-semibold">one image</span> here </p> </div> </div> <style> @keyframes slideInFromLeft { 0% { width: 0; } 100% { width: 100%; } } .animate-progress-bar { /* This section calls the slideInFromLeft animation we defined above */ animation: 2s linear 0s 1 slideInFromLeft; } </style>
chat-ui/src/lib/components/chat/FileDropzone.svelte/0
{ "file_path": "chat-ui/src/lib/components/chat/FileDropzone.svelte", "repo_id": "chat-ui", "token_count": 1232 }
50
import { Issuer, BaseClient, type UserinfoResponse, TokenSet, custom } from "openid-client"; import { addHours, addWeeks } from "date-fns"; import { COOKIE_NAME, OPENID_CLIENT_ID, OPENID_CLIENT_SECRET, OPENID_PROVIDER_URL, OPENID_SCOPES, OPENID_TOLERANCE, OPENID_RESOURCE, OPENID_CONFIG, } from "$env/static/private"; import { sha256 } from "$lib/utils/sha256"; import { z } from "zod"; import { dev } from "$app/environment"; import type { Cookies } from "@sveltejs/kit"; import { collections } from "./database"; import JSON5 from "json5"; export interface OIDCSettings { redirectURI: string; } export interface OIDCUserInfo { token: TokenSet; userData: UserinfoResponse; } const stringWithDefault = (value: string) => z .string() .default(value) .transform((el) => (el ? el : value)); const OIDConfig = z .object({ CLIENT_ID: stringWithDefault(OPENID_CLIENT_ID), CLIENT_SECRET: stringWithDefault(OPENID_CLIENT_SECRET), PROVIDER_URL: stringWithDefault(OPENID_PROVIDER_URL), SCOPES: stringWithDefault(OPENID_SCOPES), TOLERANCE: stringWithDefault(OPENID_TOLERANCE), RESOURCE: stringWithDefault(OPENID_RESOURCE), }) .parse(JSON5.parse(OPENID_CONFIG)); export const requiresUser = !!OIDConfig.CLIENT_ID && !!OIDConfig.CLIENT_SECRET; export function refreshSessionCookie(cookies: Cookies, sessionId: string) { cookies.set(COOKIE_NAME, sessionId, { path: "/", // So that it works inside the space's iframe sameSite: dev ? "lax" : "none", secure: !dev, httpOnly: true, expires: addWeeks(new Date(), 2), }); } export async function findUser(sessionId: string) { const session = await collections.sessions.findOne({ sessionId }); if (!session) { return null; } return await collections.users.findOne({ _id: session.userId }); } export const authCondition = (locals: App.Locals) => { return locals.user ? { userId: locals.user._id } : { sessionId: locals.sessionId, userId: { $exists: false } }; }; /** * Generates a CSRF token using the user sessionId. Note that we don't need a secret because sessionId is enough. */ export async function generateCsrfToken(sessionId: string, redirectUrl: string): Promise<string> { const data = { expiration: addHours(new Date(), 1).getTime(), redirectUrl, }; return Buffer.from( JSON.stringify({ data, signature: await sha256(JSON.stringify(data) + "##" + sessionId), }) ).toString("base64"); } async function getOIDCClient(settings: OIDCSettings): Promise<BaseClient> { const issuer = await Issuer.discover(OIDConfig.PROVIDER_URL); return new issuer.Client({ client_id: OIDConfig.CLIENT_ID, client_secret: OIDConfig.CLIENT_SECRET, redirect_uris: [settings.redirectURI], response_types: ["code"], [custom.clock_tolerance]: OIDConfig.TOLERANCE || undefined, }); } export async function getOIDCAuthorizationUrl( settings: OIDCSettings, params: { sessionId: string } ): Promise<string> { const client = await getOIDCClient(settings); const csrfToken = await generateCsrfToken(params.sessionId, settings.redirectURI); return client.authorizationUrl({ scope: OIDConfig.SCOPES, state: csrfToken, resource: OIDConfig.RESOURCE || undefined, }); } export async function getOIDCUserData(settings: OIDCSettings, code: string): Promise<OIDCUserInfo> { const client = await getOIDCClient(settings); const token = await client.callback(settings.redirectURI, { code }); const userData = await client.userinfo(token); return { token, userData }; } export async function validateAndParseCsrfToken( token: string, sessionId: string ): Promise<{ /** This is the redirect url that was passed to the OIDC provider */ redirectUrl: string; } | null> { try { const { data, signature } = z .object({ data: z.object({ expiration: z.number().int(), redirectUrl: z.string().url(), }), signature: z.string().length(64), }) .parse(JSON.parse(token)); const reconstructSign = await sha256(JSON.stringify(data) + "##" + sessionId); if (data.expiration > Date.now() && signature === reconstructSign) { return { redirectUrl: data.redirectUrl }; } } catch (e) { console.error(e); } return null; }
chat-ui/src/lib/server/auth.ts/0
{ "file_path": "chat-ui/src/lib/server/auth.ts", "repo_id": "chat-ui", "token_count": 1500 }
51
import { error } from "@sveltejs/kit"; import { collections } from "../database"; import type { Conversation } from "$lib/types/Conversation"; import type { SharedConversation } from "$lib/types/SharedConversation"; export async function downloadFile( sha256: string, convId: Conversation["_id"] | SharedConversation["_id"] ) { const fileId = collections.bucket.find({ filename: `${convId.toString()}-${sha256}` }); let mime = ""; const content = await fileId.next().then(async (file) => { if (!file) { throw error(404, "File not found"); } if (file.metadata?.conversation !== convId.toString()) { throw error(403, "You don't have access to this file."); } mime = file.metadata?.mime; const fileStream = collections.bucket.openDownloadStream(file._id); const fileBuffer = await new Promise<Buffer>((resolve, reject) => { const chunks: Uint8Array[] = []; fileStream.on("data", (chunk) => chunks.push(chunk)); fileStream.on("error", reject); fileStream.on("end", () => resolve(Buffer.concat(chunks))); }); return fileBuffer; }); return { content, mime }; }
chat-ui/src/lib/server/files/downloadFile.ts/0
{ "file_path": "chat-ui/src/lib/server/files/downloadFile.ts", "repo_id": "chat-ui", "token_count": 383 }
52
import { base } from "$app/paths"; import { ERROR_MESSAGES, error } from "$lib/stores/errors"; import { share } from "./utils/share"; import { page } from "$app/stores"; import { get } from "svelte/store"; import { getShareUrl } from "./utils/getShareUrl"; export async function shareConversation(id: string, title: string) { try { if (id.length === 7) { const url = get(page).url; await share(getShareUrl(url, id), title); } else { const res = await fetch(`${base}/conversation/${id}/share`, { method: "POST", headers: { "Content-Type": "application/json", }, }); if (!res.ok) { error.set("Error while sharing conversation, try again."); console.error("Error while sharing conversation: " + (await res.text())); return; } const { url } = await res.json(); await share(url, title); } } catch (err) { error.set(ERROR_MESSAGES.default); console.error(err); } }
chat-ui/src/lib/shareConversation.ts/0
{ "file_path": "chat-ui/src/lib/shareConversation.ts", "repo_id": "chat-ui", "token_count": 359 }
53
import type { WebSearchSource } from "./WebSearch"; export type FinalAnswer = { type: "finalAnswer"; text: string; }; export type TextStreamUpdate = { type: "stream"; token: string; }; export type AgentUpdate = { type: "agent"; agent: string; content: string; binary?: Blob; }; export type WebSearchUpdate = { type: "webSearch"; messageType: "update" | "error" | "sources"; message: string; args?: string[]; sources?: WebSearchSource[]; }; export type StatusUpdate = { type: "status"; status: "started" | "pending" | "finished" | "error" | "title"; message?: string; }; export type ErrorUpdate = { type: "error"; message: string; name: string; }; export type MessageUpdate = | FinalAnswer | TextStreamUpdate | AgentUpdate | WebSearchUpdate | StatusUpdate | ErrorUpdate;
chat-ui/src/lib/types/MessageUpdate.ts/0
{ "file_path": "chat-ui/src/lib/types/MessageUpdate.ts", "repo_id": "chat-ui", "token_count": 273 }
54
import { browser } from "$app/environment"; export function cookiesAreEnabled(): boolean { if (!browser) return false; if (navigator.cookieEnabled) return navigator.cookieEnabled; // Create cookie document.cookie = "cookietest=1"; const ret = document.cookie.indexOf("cookietest=") != -1; // Delete cookie document.cookie = "cookietest=1; expires=Thu, 01-Jan-1970 00:00:01 GMT"; return ret; }
chat-ui/src/lib/utils/cookiesAreEnabled.ts/0
{ "file_path": "chat-ui/src/lib/utils/cookiesAreEnabled.ts", "repo_id": "chat-ui", "token_count": 127 }
55
export async function share(url: string, title: string) { if (navigator.share) { navigator.share({ url, title }); } else { await navigator.clipboard.writeText(url); } }
chat-ui/src/lib/utils/share.ts/0
{ "file_path": "chat-ui/src/lib/utils/share.ts", "repo_id": "chat-ui", "token_count": 63 }
56
<script lang="ts"> import { page } from "$app/stores"; </script> <div class="flex items-center justify-center bg-gradient-to-t from-gray-200 text-gray-800 dark:from-gray-700 dark:text-gray-300" > <div class="align-center -mt-24 flex flex-col justify-center rounded-xl border bg-white px-8 pb-2 pt-4 text-center dark:border-gray-700 dark:bg-gray-800" > <h1 class="mb-2 text-5xl font-semibold">{$page.status}</h1> <div class="-mx-8 my-2 h-px bg-gray-200 dark:bg-gray-700" /> <h2 class="max-w-sm text-lg">{$page.error?.message}</h2> </div> </div>
chat-ui/src/routes/+error.svelte/0
{ "file_path": "chat-ui/src/routes/+error.svelte", "repo_id": "chat-ui", "token_count": 241 }
57