code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ : str = {
'''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''],
'''tokenization_deberta''': ['''DebertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['''DebertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Dict = [
'''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DebertaForMaskedLM''',
'''DebertaForQuestionAnswering''',
'''DebertaForSequenceClassification''',
'''DebertaForTokenClassification''',
'''DebertaModel''',
'''DebertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[Any] = [
'''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDebertaForMaskedLM''',
'''TFDebertaForQuestionAnswering''',
'''TFDebertaForSequenceClassification''',
'''TFDebertaForTokenClassification''',
'''TFDebertaModel''',
'''TFDebertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 685 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> bool:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('check_bouncy() accepts only integer arguments' )
SCREAMING_SNAKE_CASE_ : Optional[int] = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = ''.join(sorted(lowerCamelCase_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __UpperCAmelCase ( lowerCamelCase_ : float = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 1_00:
raise ValueError('solution() only accepts values from 0 to 100' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Dict = 1
while True:
if check_bouncy(lowerCamelCase_ ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 685 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCamelCase__ : str = logging.get_logger(__name__)
UpperCamelCase__ : Tuple = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Dict = "perceiver"
def __init__( self ,snake_case__=256 ,snake_case__=1280 ,snake_case__=768 ,snake_case__=1 ,snake_case__=26 ,snake_case__=8 ,snake_case__=8 ,snake_case__=None ,snake_case__=None ,snake_case__="kv" ,snake_case__=1 ,snake_case__=1 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=True ,snake_case__=262 ,snake_case__=2048 ,snake_case__=56 ,snake_case__=[368, 496] ,snake_case__=16 ,snake_case__=1920 ,snake_case__=16 ,snake_case__=[1, 16, 224, 224] ,**snake_case__ ,):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_latents
SCREAMING_SNAKE_CASE_ : Dict = d_latents
SCREAMING_SNAKE_CASE_ : Optional[Any] = d_model
SCREAMING_SNAKE_CASE_ : List[Any] = num_blocks
SCREAMING_SNAKE_CASE_ : List[Any] = num_self_attends_per_block
SCREAMING_SNAKE_CASE_ : Dict = num_self_attention_heads
SCREAMING_SNAKE_CASE_ : Tuple = num_cross_attention_heads
SCREAMING_SNAKE_CASE_ : Dict = qk_channels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = v_channels
SCREAMING_SNAKE_CASE_ : Optional[Any] = cross_attention_shape_for_attention
SCREAMING_SNAKE_CASE_ : Tuple = self_attention_widening_factor
SCREAMING_SNAKE_CASE_ : List[Any] = cross_attention_widening_factor
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = initializer_range
SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Optional[int] = use_query_residual
# masked language modeling attributes
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = max_position_embeddings
# image classification attributes
SCREAMING_SNAKE_CASE_ : List[str] = image_size
# flow attributes
SCREAMING_SNAKE_CASE_ : List[Any] = train_size
# multimodal autoencoding attributes
SCREAMING_SNAKE_CASE_ : str = num_frames
SCREAMING_SNAKE_CASE_ : int = audio_samples_per_frame
SCREAMING_SNAKE_CASE_ : Dict = samples_per_patch
SCREAMING_SNAKE_CASE_ : Tuple = output_shape
class lowerCAmelCase_ ( lowerCamelCase_ ):
@property
def snake_case ( self ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def snake_case ( self ):
return 1E-4
def snake_case ( self ,snake_case__ ,snake_case__ = -1 ,snake_case__ = -1 ,snake_case__ = -1 ,snake_case__ = False ,snake_case__ = None ,snake_case__ = 3 ,snake_case__ = 40 ,snake_case__ = 40 ,):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(snake_case__ ,snake_case__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(
snake_case__ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Optional[int] = preprocessor.num_special_tokens_to_add(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = compute_effective_axis_dimension(
snake_case__ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=snake_case__ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ : Any = [' '.join(['a'] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_ : Dict = dict(preprocessor(snake_case__ ,return_tensors=snake_case__ ) )
SCREAMING_SNAKE_CASE_ : int = inputs.pop('input_ids' )
return inputs
elif isinstance(snake_case__ ,snake_case__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Optional[int] = compute_effective_axis_dimension(snake_case__ ,fixed_dimension=OnnxConfig.default_fixed_batch )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._generate_dummy_images(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = dict(preprocessor(images=snake_case__ ,return_tensors=snake_case__ ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 685 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Dict = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = ['''ChineseCLIPFeatureExtractor''']
UpperCamelCase__ : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 685 | 1 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowerCAmelCase_ :
__a : Dict = BlenderbotSmallConfig
__a : List[Any] = {}
__a : Any = "gelu"
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=7 ,snake_case__=True ,snake_case__=False ,snake_case__=99 ,snake_case__=32 ,snake_case__=2 ,snake_case__=4 ,snake_case__=37 ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=20 ,snake_case__=2 ,snake_case__=1 ,snake_case__=0 ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = seq_length
SCREAMING_SNAKE_CASE_ : Dict = is_training
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE_ : Dict = vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : int = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[Any] = eos_token_id
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pad_token_id
SCREAMING_SNAKE_CASE_ : Union[str, Any] = bos_token_id
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
SCREAMING_SNAKE_CASE_ : List[Any] = tf.concat([input_ids, eos_tensor] ,axis=1 )
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
SCREAMING_SNAKE_CASE_ : List[str] = prepare_blenderbot_small_inputs_dict(snake_case__ ,snake_case__ ,snake_case__ )
return config, inputs_dict
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = TFBlenderbotSmallModel(config=snake_case__ ).get_decoder()
SCREAMING_SNAKE_CASE_ : Optional[Any] = inputs_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Tuple = input_ids[:1, :]
SCREAMING_SNAKE_CASE_ : int = inputs_dict['attention_mask'][:1, :]
SCREAMING_SNAKE_CASE_ : Dict = inputs_dict['head_mask']
SCREAMING_SNAKE_CASE_ : Dict = 1
# first forward pass
SCREAMING_SNAKE_CASE_ : Any = model(snake_case__ ,attention_mask=snake_case__ ,head_mask=snake_case__ ,use_cache=snake_case__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor((self.batch_size, 3) ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Tuple = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : List[str] = tf.concat([input_ids, next_tokens] ,axis=-1 )
SCREAMING_SNAKE_CASE_ : str = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
SCREAMING_SNAKE_CASE_ : List[Any] = model(snake_case__ ,attention_mask=snake_case__ )[0]
SCREAMING_SNAKE_CASE_ : List[str] = model(snake_case__ ,attention_mask=snake_case__ ,past_key_values=snake_case__ )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE_ : List[Any] = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE_ : str = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE_ : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case__ ,snake_case__ ,rtol=1E-3 )
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : str=None , lowerCamelCase_ : int=None , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Union[str, Any]=None , ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.cast(tf.math.not_equal(lowerCamelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE_ : int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : Optional[Any] = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__a : str = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__a : Optional[Any] = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__a : List[str] = True
__a : Tuple = False
__a : List[Any] = False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = TFBlenderbotSmallModelTester(self )
SCREAMING_SNAKE_CASE_ : List[Any] = ConfigTester(self ,config_class=snake_case__ )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ )
@require_tokenizers
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
__a : str = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
__a : Optional[Any] = "facebook/blenderbot_small-90M"
@cached_property
def snake_case ( self ):
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
@cached_property
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer(self.src_text ,return_tensors='tf' )
SCREAMING_SNAKE_CASE_ : List[Any] = self.model.generate(
model_inputs.input_ids ,attention_mask=model_inputs.attention_mask ,num_beams=2 ,use_cache=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=snake_case__ )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 685 |
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> Tuple:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
else:
return a * actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(lowerCamelCase_ , lowerCamelCase_ )
return actual_power(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 685 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCamelCase__ : List[str] = {'''processing_wav2vec2_with_lm''': ['''Wav2Vec2ProcessorWithLM''']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 685 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=7 ,snake_case__=True ,snake_case__=True ,snake_case__=False ,snake_case__=True ,snake_case__=99 ,snake_case__=32 ,snake_case__=5 ,snake_case__=4 ,snake_case__=37 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=16 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=3 ,snake_case__=4 ,snake_case__=None ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = seq_length
SCREAMING_SNAKE_CASE_ : Tuple = is_training
SCREAMING_SNAKE_CASE_ : List[str] = use_input_mask
SCREAMING_SNAKE_CASE_ : List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : int = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[str] = num_choices
SCREAMING_SNAKE_CASE_ : Tuple = scope
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=snake_case__ ,initializer_range=self.initializer_range ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(snake_case__ ,attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE_ : List[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,use_cache=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : str = ids_tensor((self.batch_size, 3) ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_ids, next_tokens] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_mask, next_mask] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Dict = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,past_key_values=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__a : int = (LlamaForCausalLM,) if is_torch_available() else ()
__a : Any = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Tuple = False
__a : Tuple = False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self ,config_class=snake_case__ ,hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : Optional[int] = type
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Dict = 3
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : str = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Any = 3
SCREAMING_SNAKE_CASE_ : int = 'single_label_classification'
SCREAMING_SNAKE_CASE_ : str = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Dict = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Tuple = 3
SCREAMING_SNAKE_CASE_ : str = 'multi_label_classification'
SCREAMING_SNAKE_CASE_ : int = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Tuple = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def snake_case ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([1, 10] ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE_ : int = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : List[Any] = original_model(snake_case__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : List[Any] = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE_ : int = LlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE_ : str = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : Optional[int] = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : List[str] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Dict = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : str = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : int = model(torch.tensor(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
SCREAMING_SNAKE_CASE_ : List[str] = 'Simply put, the theory of relativity states that '
SCREAMING_SNAKE_CASE_ : str = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.encode(snake_case__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Tuple = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' ,device_map='sequential' ,use_safetensors=snake_case__ )
# greedy generation outputs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.generate(snake_case__ ,max_new_tokens=64 ,top_p=snake_case__ ,temperature=1 ,do_sample=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.decode(generated_ids[0] ,skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ ,snake_case__ )
| 685 | 1 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.full((len(lowerCamelCase_ ), sequence_length, 2) , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.full((len(lowerCamelCase_ ), sequence_length) , lowerCamelCase_ )
for i, tensor in enumerate(lowerCamelCase_ ):
if padding_side == "right":
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = tensor[:sequence_length]
else:
SCREAMING_SNAKE_CASE_ : List[Any] = tensor[:sequence_length]
else:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : int = tensor[:sequence_length]
else:
SCREAMING_SNAKE_CASE_ : int = tensor[:sequence_length]
return out_tensor.tolist()
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ord(lowerCamelCase_ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = unicodedata.category(lowerCamelCase_ )
if cat.startswith('P' ):
return True
return False
@dataclass
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : PreTrainedTokenizerBase
__a : Union[bool, str, PaddingStrategy] = True
__a : Optional[int] = None
__a : Optional[int] = None
__a : int = -1_00
__a : str = "pt"
def snake_case ( self ,snake_case__ ):
import torch
SCREAMING_SNAKE_CASE_ : Tuple = 'label' if 'label' in features[0].keys() else 'labels'
SCREAMING_SNAKE_CASE_ : Dict = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer.pad(
snake_case__ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors='pt' if labels is None else None ,)
if labels is None:
return batch
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(batch['entity_ids'] ).shape[1]
SCREAMING_SNAKE_CASE_ : str = self.tokenizer.padding_side
if padding_side == "right":
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
list(snake_case__ ) + [self.label_pad_token_id] * (sequence_length - len(snake_case__ )) for label in labels
]
else:
SCREAMING_SNAKE_CASE_ : str = [
[self.label_pad_token_id] * (sequence_length - len(snake_case__ )) + list(snake_case__ ) for label in labels
]
SCREAMING_SNAKE_CASE_ : List[Any] = [feature['ner_tags'] for feature in features]
SCREAMING_SNAKE_CASE_ : Any = padding_tensor(snake_case__ ,-1 ,snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = [feature['original_entity_spans'] for feature in features]
SCREAMING_SNAKE_CASE_ : Optional[Any] = padding_tensor(snake_case__ ,(-1, -1) ,snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = {k: torch.tensor(snake_case__ ,dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 685 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
UpperCamelCase__ : int = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
UpperCamelCase__ : str = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : List[Any] = collections.OrderedDict()
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Any = f.readlines()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = b
SCREAMING_SNAKE_CASE_ : Dict = idx
for wd in b:
SCREAMING_SNAKE_CASE_ : Any = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : List[str] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__="<|endoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__="<|startoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__=False ,**snake_case__ ,):
super().__init__(
unk_token=snake_case__ ,pad_token=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,do_clean_text=snake_case__ ,**snake_case__ ,)
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
SCREAMING_SNAKE_CASE_ : str = do_clean_text
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = load_vocab_and_emoji(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def snake_case ( self ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def snake_case ( self ):
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.tokenize(snake_case__ ,clean=self.do_clean_text )
def snake_case ( self ,snake_case__ ):
return self.vocab.get(snake_case__ ,self.vocab.get(self.unk_token ) )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.convert_id_to_token(snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = ''.join(snake_case__ ).strip()
return out_string
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case__ ,add_special_tokens=snake_case__ ) + [self.eos_token_id] )
if len(snake_case__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE_ : List[Any] = input_ids[-self.model_max_length :]
return input_ids
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
if os.path.isdir(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
SCREAMING_SNAKE_CASE_ : Tuple = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
SCREAMING_SNAKE_CASE_ : str = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
SCREAMING_SNAKE_CASE_ : Dict = token_index
writer.write(','.join(snake_case__ ) + '\n' )
index += 1
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
json.dump(self.emoji ,snake_case__ )
return vocab_file, emoji_file
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = vocab # same as swe
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_to_tokens # same as bpe
SCREAMING_SNAKE_CASE_ : Dict = emoji
SCREAMING_SNAKE_CASE_ : int = np.max([len(snake_case__ ) for w in self.vocab.keys()] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
SCREAMING_SNAKE_CASE_ : str = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
SCREAMING_SNAKE_CASE_ : int = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
SCREAMING_SNAKE_CASE_ : Tuple = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ):
return len(self.ids_to_tokens )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<URL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = self.content_repattera.sub('<EMAIL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<TEL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<PRICE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = content.replace('<BLOCK><BLOCK>' ,'<BLOCK>' )
return content
def snake_case ( self ,snake_case__ ,snake_case__=False ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('\r\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\r' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : List[str] = text.replace('\t' ,'<TAB>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('—' ,'ー' )
SCREAMING_SNAKE_CASE_ : Optional[int] = text.replace('−' ,'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
SCREAMING_SNAKE_CASE_ : int = text.replace(snake_case__ ,snake_case__ )
if clean:
SCREAMING_SNAKE_CASE_ : str = self.clean_text(snake_case__ )
def check_simbol(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 2:
SCREAMING_SNAKE_CASE_ : str = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2A1 and c <= 0XC2BF)
or (c >= 0XC780 and c <= 0XC783)
or (c >= 0XCAB9 and c <= 0XCBBF)
or (c >= 0XCC80 and c <= 0XCDA2)
):
return True
return False
def checkuae(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 3:
SCREAMING_SNAKE_CASE_ : Dict = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_8080 and c <= 0XE2_B07F:
return True
return False
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List[Any] = []
while pos < len(snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = min(len(snake_case__ ) ,pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
SCREAMING_SNAKE_CASE_ : List[Any] = [] # (token_id, token, pos)
for e in range(snake_case__ ,snake_case__ ,-1 ):
SCREAMING_SNAKE_CASE_ : str = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(snake_case__ ) > 2:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(snake_case__ ) > 0:
# the smallest token_id is adopted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = sorted(snake_case__ ,key=lambda snake_case__ : x[0] )[0]
result.append(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = e
else:
SCREAMING_SNAKE_CASE_ : Any = pos + 1
SCREAMING_SNAKE_CASE_ : Optional[int] = text[pos:end]
if check_simbol(snake_case__ ):
result.append('<KIGOU>' )
elif checkuae(snake_case__ ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
SCREAMING_SNAKE_CASE_ : int = end
return result
def snake_case ( self ,snake_case__ ,snake_case__="\n" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Dict = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : Dict = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(snake_case__ )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(snake_case__ )
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : int = ''.join(snake_case__ )
return text
| 685 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
UpperCamelCase__ : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
__a : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__a : Optional[str] = field(
default=lowerCamelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__a : Optional[str] = field(
default=lowerCamelCase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__a : Optional[str] = field(
default=lowerCamelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__a : bool = field(
default=lowerCamelCase_ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__a : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__a : bool = field(
default=lowerCamelCase_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class lowerCAmelCase_ :
__a : Optional[str] = field(default=lowerCamelCase_ , metadata={"help": "The input training data file (a text file)."} )
__a : Optional[str] = field(
default=lowerCamelCase_ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__a : bool = field(
default=lowerCamelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
__a : Optional[int] = field(
default=lowerCamelCase_ , metadata={"help": "The number of processes to use for the preprocessing."} , )
__a : Optional[int] = field(
default=lowerCamelCase_ , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__a : bool = field(
default=lowerCamelCase_ , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
__a : Optional[int] = field(
default=lowerCamelCase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__a : Optional[int] = field(
default=lowerCamelCase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def snake_case ( self ):
if self.train_file is not None:
SCREAMING_SNAKE_CASE_ : Any = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowerCAmelCase_ :
__a : PreTrainedTokenizerBase
__a : Union[bool, str, PaddingStrategy] = True
__a : Optional[int] = None
__a : Optional[int] = None
def __call__( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = 'label' if 'label' in features[0].keys() else 'labels'
SCREAMING_SNAKE_CASE_ : Any = [feature.pop(snake_case__ ) for feature in features]
SCREAMING_SNAKE_CASE_ : str = len(snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = len(features[0]['input_ids'] )
SCREAMING_SNAKE_CASE_ : int = [
[{k: v[i] for k, v in feature.items()} for i in range(snake_case__ )] for feature in features
]
SCREAMING_SNAKE_CASE_ : Optional[int] = list(chain(*snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[str] = self.tokenizer.pad(
snake_case__ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors='pt' ,)
# Un-flatten
SCREAMING_SNAKE_CASE_ : Dict = {k: v.view(snake_case__ ,snake_case__ ,-1 ) for k, v in batch.items()}
# Add back labels
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor(snake_case__ ,dtype=torch.intaa )
return batch
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , lowerCamelCase_ , lowerCamelCase_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ : str = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase_ )
datasets.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE_ : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE_ : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
if data_args.train_file is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = data_args.train_file
if data_args.validation_file is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = data_args.validation_file
SCREAMING_SNAKE_CASE_ : Dict = data_args.train_file.split('.' )[-1]
SCREAMING_SNAKE_CASE_ : Dict = load_dataset(
lowerCamelCase_ , data_files=lowerCamelCase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
SCREAMING_SNAKE_CASE_ : List[Any] = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE_ : str = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
SCREAMING_SNAKE_CASE_ : Optional[Any] = [F'ending{i}' for i in range(4 )]
SCREAMING_SNAKE_CASE_ : List[str] = 'sent1'
SCREAMING_SNAKE_CASE_ : Any = 'sent2'
if data_args.max_seq_length is None:
SCREAMING_SNAKE_CASE_ : int = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
SCREAMING_SNAKE_CASE_ : Tuple = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
SCREAMING_SNAKE_CASE_ : str = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ : Tuple = [[context] * 4 for context in examples[context_name]]
SCREAMING_SNAKE_CASE_ : int = examples[question_header_name]
SCREAMING_SNAKE_CASE_ : Dict = [
[F'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(lowerCamelCase_ )
]
# Flatten out
SCREAMING_SNAKE_CASE_ : Dict = list(chain(*lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : Tuple = list(chain(*lowerCamelCase_ ) )
# Tokenize
SCREAMING_SNAKE_CASE_ : int = tokenizer(
lowerCamelCase_ , lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
SCREAMING_SNAKE_CASE_ : Tuple = raw_datasets['train']
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = min(len(lowerCamelCase_ ) , data_args.max_train_samples )
SCREAMING_SNAKE_CASE_ : int = train_dataset.select(range(lowerCamelCase_ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
SCREAMING_SNAKE_CASE_ : int = train_dataset.map(
lowerCamelCase_ , batched=lowerCamelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
SCREAMING_SNAKE_CASE_ : Any = raw_datasets['validation']
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE_ : List[str] = min(len(lowerCamelCase_ ) , data_args.max_eval_samples )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = eval_dataset.select(range(lowerCamelCase_ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = eval_dataset.map(
lowerCamelCase_ , batched=lowerCamelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
SCREAMING_SNAKE_CASE_ : List[str] = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = eval_predictions
SCREAMING_SNAKE_CASE_ : Dict = np.argmax(lowerCamelCase_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase_ , data_collator=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE_ : Tuple = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE_ : Any = last_checkpoint
SCREAMING_SNAKE_CASE_ : str = trainer.train(resume_from_checkpoint=lowerCamelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
SCREAMING_SNAKE_CASE_ : Any = train_result.metrics
SCREAMING_SNAKE_CASE_ : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase_ )
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.log_metrics('train' , lowerCamelCase_ )
trainer.save_metrics('train' , lowerCamelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
SCREAMING_SNAKE_CASE_ : Tuple = trainer.evaluate()
SCREAMING_SNAKE_CASE_ : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.log_metrics('eval' , lowerCamelCase_ )
trainer.save_metrics('eval' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase_ )
else:
trainer.create_model_card(**lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Tuple ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 685 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : int=() , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Optional[int]="no" , lowerCamelCase_ : Optional[Any]="29500" ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
SCREAMING_SNAKE_CASE_ : str = True
elif "IPython" in sys.modules:
SCREAMING_SNAKE_CASE_ : Dict = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
SCREAMING_SNAKE_CASE_ : Optional[int] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , lowerCamelCase_ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 8
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='TPU' )
print(F'Launching a training on {num_processes} TPU cores.' )
xmp.spawn(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*lowerCamelCase_ )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port=lowerCamelCase_ , mixed_precision=lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='MULTI_GPU' )
print(F'Launching training on {num_processes} GPUs.' )
try:
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
SCREAMING_SNAKE_CASE_ : Optional[Any] = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple=() , lowerCamelCase_ : str=2 ) -> Union[str, Any]:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
SCREAMING_SNAKE_CASE_ : Tuple = PrepareForLaunch(lowerCamelCase_ , debug=lowerCamelCase_ )
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
| 685 | 1 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
SCREAMING_SNAKE_CASE_ : int = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.parse_args()
if not hasattr(lowerCamelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE_ : Optional[Any] = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 685 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCamelCase__ : Tuple = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 685 | 1 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class lowerCAmelCase_ :
@staticmethod
def snake_case ( *snake_case__ ,**snake_case__ ):
pass
def __UpperCAmelCase ( lowerCamelCase_ : List[str] ) -> str:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
UpperCamelCase__ : List[str] = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
__a : Any = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = pipeline(
'document-question-answering' ,model=snake_case__ ,tokenizer=snake_case__ ,image_processor=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = INVOICE_URL
SCREAMING_SNAKE_CASE_ : Optional[int] = list(zip(*apply_tesseract(load_image(snake_case__ ) ,snake_case__ ,'' ) ) )
SCREAMING_SNAKE_CASE_ : Tuple = 'What is the placebo?'
SCREAMING_SNAKE_CASE_ : List[Any] = [
{
'image': load_image(snake_case__ ),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = dqa_pipeline(snake_case__ ,top_k=2 )
self.assertEqual(
snake_case__ ,[
[
{'score': ANY(snake_case__ ), 'answer': ANY(snake_case__ ), 'start': ANY(snake_case__ ), 'end': ANY(snake_case__ )},
{'score': ANY(snake_case__ ), 'answer': ANY(snake_case__ ), 'start': ANY(snake_case__ ), 'end': ANY(snake_case__ )},
]
]
* 3 ,)
@require_torch
@require_detectrona
@require_pytesseract
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipeline('document-question-answering' ,model='hf-internal-testing/tiny-random-layoutlmv2' )
SCREAMING_SNAKE_CASE_ : Dict = INVOICE_URL
SCREAMING_SNAKE_CASE_ : List[str] = 'How many cats are there?'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
{'score': 0.0001, 'answer': 'oy 2312/2019', 'start': 38, 'end': 39},
{'score': 0.0001, 'answer': 'oy 2312/2019 DUE', 'start': 38, 'end': 40},
]
SCREAMING_SNAKE_CASE_ : Optional[Any] = dqa_pipeline(image=snake_case__ ,question=snake_case__ ,top_k=2 )
self.assertEqual(nested_simplify(snake_case__ ,decimals=4 ) ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = dqa_pipeline({'image': image, 'question': question} ,top_k=2 )
self.assertEqual(nested_simplify(snake_case__ ,decimals=4 ) ,snake_case__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
SCREAMING_SNAKE_CASE_ : Optional[int] = './tests/fixtures/tests_samples/COCO/000000039769.png'
SCREAMING_SNAKE_CASE_ : Optional[int] = dqa_pipeline(image=snake_case__ ,question=snake_case__ ,top_k=2 )
self.assertEqual(snake_case__ ,[] )
# We can optionnally pass directly the words and bounding boxes
SCREAMING_SNAKE_CASE_ : List[str] = './tests/fixtures/tests_samples/COCO/000000039769.png'
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : List[str] = []
SCREAMING_SNAKE_CASE_ : Tuple = dqa_pipeline(image=snake_case__ ,question=snake_case__ ,words=snake_case__ ,boxes=snake_case__ ,top_k=2 )
self.assertEqual(snake_case__ ,[] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipeline(
'document-question-answering' ,model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' ,revision='9977165' ,)
SCREAMING_SNAKE_CASE_ : List[str] = INVOICE_URL
SCREAMING_SNAKE_CASE_ : Optional[int] = 'What is the invoice number?'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dqa_pipeline(image=snake_case__ ,question=snake_case__ ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ ,decimals=4 ) ,[
{'score': 0.9944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0009, 'answer': 'us-001', 'start': 16, 'end': 16},
] ,)
SCREAMING_SNAKE_CASE_ : Dict = dqa_pipeline({'image': image, 'question': question} ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ ,decimals=4 ) ,[
{'score': 0.9944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0009, 'answer': 'us-001', 'start': 16, 'end': 16},
] ,)
SCREAMING_SNAKE_CASE_ : List[Any] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ ,decimals=4 ) ,[
[
{'score': 0.9944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0009, 'answer': 'us-001', 'start': 16, 'end': 16},
],
]
* 2 ,)
@slow
@require_torch
@require_detectrona
@require_pytesseract
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = pipeline(
'document-question-answering' ,model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' ,revision='9977165' ,max_seq_len=50 ,)
SCREAMING_SNAKE_CASE_ : Tuple = INVOICE_URL
SCREAMING_SNAKE_CASE_ : Any = 'What is the invoice number?'
SCREAMING_SNAKE_CASE_ : Any = dqa_pipeline(image=snake_case__ ,question=snake_case__ ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ ,decimals=4 ) ,[
{'score': 0.9974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9948, 'answer': 'us-001', 'start': 16, 'end': 16},
] ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dqa_pipeline({'image': image, 'question': question} ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ ,decimals=4 ) ,[
{'score': 0.9974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9948, 'answer': 'us-001', 'start': 16, 'end': 16},
] ,)
SCREAMING_SNAKE_CASE_ : int = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ ,decimals=4 ) ,[
[
{'score': 0.9974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9948, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 ,)
@slow
@require_torch
@require_pytesseract
@require_vision
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' ,revision='3dc6de3' ,add_prefix_space=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = pipeline(
'document-question-answering' ,model='impira/layoutlm-document-qa' ,tokenizer=snake_case__ ,revision='3dc6de3' ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = INVOICE_URL
SCREAMING_SNAKE_CASE_ : Optional[int] = 'What is the invoice number?'
SCREAMING_SNAKE_CASE_ : Optional[int] = dqa_pipeline(image=snake_case__ ,question=snake_case__ ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ ,decimals=4 ) ,[
{'score': 0.4251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0819, 'answer': '1110212019', 'start': 23, 'end': 23},
] ,)
SCREAMING_SNAKE_CASE_ : int = dqa_pipeline({'image': image, 'question': question} ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ ,decimals=4 ) ,[
{'score': 0.4251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0819, 'answer': '1110212019', 'start': 23, 'end': 23},
] ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ ,decimals=4 ) ,[
[
{'score': 0.4251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0819, 'answer': '1110212019', 'start': 23, 'end': 23},
]
]
* 2 ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = list(zip(*apply_tesseract(load_image(snake_case__ ) ,snake_case__ ,'' ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE_ : Dict = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ ,decimals=4 ) ,[
{'score': 0.4251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0819, 'answer': '1110212019', 'start': 23, 'end': 23},
] ,)
@slow
@require_torch
@require_pytesseract
@require_vision
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' ,revision='3dc6de3' ,add_prefix_space=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = pipeline(
'document-question-answering' ,model='impira/layoutlm-document-qa' ,tokenizer=snake_case__ ,revision='3dc6de3' ,max_seq_len=50 ,)
SCREAMING_SNAKE_CASE_ : Optional[Any] = INVOICE_URL
SCREAMING_SNAKE_CASE_ : str = 'What is the invoice number?'
SCREAMING_SNAKE_CASE_ : Optional[Any] = dqa_pipeline(image=snake_case__ ,question=snake_case__ ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ ,decimals=4 ) ,[
{'score': 0.9999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9998, 'answer': 'us-001', 'start': 16, 'end': 16},
] ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ ,decimals=4 ) ,[
[
{'score': 0.9999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9998, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = list(zip(*apply_tesseract(load_image(snake_case__ ) ,snake_case__ ,'' ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE_ : int = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ ,decimals=4 ) ,[
{'score': 0.9999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9998, 'answer': 'us-001', 'start': 16, 'end': 16},
] ,)
@slow
@require_torch
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = pipeline(
'document-question-answering' ,model='naver-clova-ix/donut-base-finetuned-docvqa' ,tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) ,feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' ,)
SCREAMING_SNAKE_CASE_ : List[str] = INVOICE_URL
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'What is the invoice number?'
SCREAMING_SNAKE_CASE_ : Optional[Any] = dqa_pipeline(image=snake_case__ ,question=snake_case__ ,top_k=2 )
self.assertEqual(nested_simplify(snake_case__ ,decimals=4 ) ,[{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def snake_case ( self ):
pass
| 685 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Union[str, Any] = CLIPTokenizer
__a : List[str] = CLIPTokenizerFast
__a : List[str] = True
__a : Tuple = {}
__a : Tuple = False
def snake_case ( self ):
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dict(zip(snake_case__ ,range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
SCREAMING_SNAKE_CASE_ : Any = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Tuple = 'lower newer'
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
SCREAMING_SNAKE_CASE_ : List[Any] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,snake_case__ )
@require_ftfy
def snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
SCREAMING_SNAKE_CASE_ : Dict = 'xa\u0303y' + ' ' + 'x\xe3y'
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of space type
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of line break type
SCREAMING_SNAKE_CASE_ : Tuple = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE_ : Tuple = F'{text_of_1_token} {text_of_1_token}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : str = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(snake_case__ ) + 1, len(snake_case__ ) + 1 + len(snake_case__ )) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F' {text}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : int = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(snake_case__ ) + 1, 1 + len(snake_case__ ) + 1 + len(snake_case__ )) ,)
def snake_case ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(snake_case__ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def snake_case ( self ):
super().test_tokenization_python_rust_equals()
def snake_case ( self ):
# CLIP always lower cases letters
pass
| 685 | 1 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = torch.load(lowerCamelCase_ , map_location='cpu' )
SCREAMING_SNAKE_CASE_ : List[str] = chkpt['model']
# We have the base model one level deeper than the original XLM repository
SCREAMING_SNAKE_CASE_ : List[str] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
SCREAMING_SNAKE_CASE_ : List[str] = v
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = v
SCREAMING_SNAKE_CASE_ : int = chkpt['params']
SCREAMING_SNAKE_CASE_ : str = {n: v for n, v in config.items() if not isinstance(lowerCamelCase_ , (torch.FloatTensor, numpy.ndarray) )}
SCREAMING_SNAKE_CASE_ : Optional[Any] = chkpt['dico_word2id']
SCREAMING_SNAKE_CASE_ : Optional[int] = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@' , '' ): i for s, i in vocab.items()}
# Save pytorch-model
SCREAMING_SNAKE_CASE_ : Optional[Any] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
SCREAMING_SNAKE_CASE_ : Any = pytorch_dump_folder_path + '/' + CONFIG_NAME
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(lowerCamelCase_ , lowerCamelCase_ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(lowerCamelCase_ , indent=2 ) + '\n' )
print(F'Save vocab file to {pytorch_config_dump_path}' )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(lowerCamelCase_ , indent=2 ) + '\n' )
if __name__ == "__main__":
UpperCamelCase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase__ : List[Any] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 685 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
SCREAMING_SNAKE_CASE_ : int = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.parse_args()
if not hasattr(lowerCamelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE_ : Optional[Any] = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 685 | 1 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[int] = "new-model"
if is_tf_available():
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[int] = NewModelConfig
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = 'bert-base-cased'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = TFAutoModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = 'bert-base-cased'
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = TFAutoModelForPreTraining.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
@slow
def snake_case ( self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = TFAutoModelForCausalLM.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = TFAutoModelForCausalLM.from_pretrained(snake_case__ ,output_loading_info=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
@slow
def snake_case ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : List[str] = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = TFAutoModelWithLMHead.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
@slow
def snake_case ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = TFAutoModelForMaskedLM.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = TFAutoModelForMaskedLM.from_pretrained(snake_case__ ,output_loading_info=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
@slow
def snake_case ( self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(snake_case__ ,output_loading_info=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
@slow
def snake_case ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = TFAutoModelForSequenceClassification.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
@slow
def snake_case ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE_ : str = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = TFAutoModelForQuestionAnswering.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
@slow
@require_tensorflow_probability
def snake_case ( self ):
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
SCREAMING_SNAKE_CASE_ : Any = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = TFAutoModelForTableQuestionAnswering.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(
snake_case__ ,output_loading_info=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = TFAutoModelWithLMHead.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
self.assertEqual(model.num_parameters() ,14410 )
self.assertEqual(model.num_parameters(only_trainable=snake_case__ ) ,14410 )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = TFAutoModelWithLMHead.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
self.assertEqual(model.num_parameters() ,14410 )
self.assertEqual(model.num_parameters(only_trainable=snake_case__ ) ,14410 )
def snake_case ( self ):
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
SCREAMING_SNAKE_CASE_ : List[Any] = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = copy.deepcopy(model.config )
SCREAMING_SNAKE_CASE_ : Any = ['FunnelBaseModel']
SCREAMING_SNAKE_CASE_ : Optional[Any] = TFAutoModel.from_config(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = TFAutoModel.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
def snake_case ( self ):
try:
AutoConfig.register('new-model' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(snake_case__ ):
auto_class.register(snake_case__ ,snake_case__ )
auto_class.register(snake_case__ ,snake_case__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
auto_class.register(snake_case__ ,snake_case__ )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE_ : Optional[Any] = BertModelTester(self ).get_config()
SCREAMING_SNAKE_CASE_ : List[Any] = NewModelConfig(**tiny_config.to_dict() )
SCREAMING_SNAKE_CASE_ : List[Any] = auto_class.from_config(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = auto_class.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def snake_case ( self ):
with self.assertRaisesRegex(
snake_case__ ,'bert-base is not a local folder and is not a valid model identifier' ):
SCREAMING_SNAKE_CASE_ : List[Any] = TFAutoModel.from_pretrained('bert-base' )
def snake_case ( self ):
with self.assertRaisesRegex(
snake_case__ ,R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = TFAutoModel.from_pretrained(snake_case__ ,revision='aaaaaa' )
def snake_case ( self ):
with self.assertRaisesRegex(
snake_case__ ,'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' ,):
SCREAMING_SNAKE_CASE_ : List[Any] = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def snake_case ( self ):
with self.assertRaisesRegex(snake_case__ ,'Use `from_pt=True` to load this model' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def snake_case ( self ):
# Make sure we have cached the model.
SCREAMING_SNAKE_CASE_ : Tuple = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE_ : List[str] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 )
# With a sharded checkpoint
SCREAMING_SNAKE_CASE_ : Tuple = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE_ : Dict = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 )
| 685 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
UpperCamelCase__ : Dict = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
UpperCamelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'https://pypi.org/pypi/diffusers/json'
SCREAMING_SNAKE_CASE_ : Optional[int] = json.loads(request.urlopen(lowerCamelCase_ ).read() )['releases'].keys()
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : version.Version(lowerCamelCase_ ) )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCamelCase_ )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Path(lowerCamelCase_ ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] ) -> Any:
"""simple docstring"""
init_hf_modules()
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : List[Any] = f.read()
# Imports of the form `import .xxx`
SCREAMING_SNAKE_CASE_ : Tuple = re.findall('^\s*import\s+\.(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCamelCase_ ) )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [module_file]
SCREAMING_SNAKE_CASE_ : Tuple = []
# Let's recurse through all relative imports
while not no_change:
SCREAMING_SNAKE_CASE_ : int = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ).parent
SCREAMING_SNAKE_CASE_ : int = [str(module_path / m ) for m in new_imports]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [f for f in new_import_files if f not in all_relative_imports]
SCREAMING_SNAKE_CASE_ : Any = [F'{f}.py' for f in new_import_files]
SCREAMING_SNAKE_CASE_ : Optional[int] = len(lowerCamelCase_ ) == 0
all_relative_imports.extend(lowerCamelCase_ )
return all_relative_imports
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] = f.read()
# Imports of the form `import xxx`
SCREAMING_SNAKE_CASE_ : List[str] = re.findall('^\s*import\s+(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Only keep the top-level module
SCREAMING_SNAKE_CASE_ : List[str] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(set(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : List[str] = []
for imp in imports:
try:
importlib.import_module(lowerCamelCase_ )
except ImportError:
missing_packages.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
F'{", ".join(lowerCamelCase_ )}. Run `pip install {" ".join(lowerCamelCase_ )}`' )
return get_relative_imports(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = module_path.replace(os.path.sep , '.' )
SCREAMING_SNAKE_CASE_ : Any = importlib.import_module(lowerCamelCase_ )
if class_name is None:
return find_pipeline_class(lowerCamelCase_ )
return getattr(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
SCREAMING_SNAKE_CASE_ : List[Any] = dict(inspect.getmembers(lowerCamelCase_ , inspect.isclass ) )
SCREAMING_SNAKE_CASE_ : List[str] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCamelCase_ )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.' )
SCREAMING_SNAKE_CASE_ : Any = cls
return pipeline_class
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if os.path.isfile(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = module_file_or_url
SCREAMING_SNAKE_CASE_ : Dict = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
SCREAMING_SNAKE_CASE_ : List[str] = get_diffusers_versions()
# cut ".dev0"
SCREAMING_SNAKE_CASE_ : Dict = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
SCREAMING_SNAKE_CASE_ : List[Any] = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(F'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
SCREAMING_SNAKE_CASE_ : int = F'v{revision}'
elif revision == "main":
SCREAMING_SNAKE_CASE_ : List[Any] = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
SCREAMING_SNAKE_CASE_ : Tuple = COMMUNITY_PIPELINES_URL.format(revision=lowerCamelCase_ , pipeline=lowerCamelCase_ )
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = cached_download(
lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Tuple = 'git'
SCREAMING_SNAKE_CASE_ : Dict = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE_ : List[str] = hf_hub_download(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
SCREAMING_SNAKE_CASE_ : Dict = check_imports(lowerCamelCase_ )
# Now we move the module inside our cached dynamic modules.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path(lowerCamelCase_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
for module_needed in modules_needed:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F'{module_needed}.py'
shutil.copy(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple = use_auth_token
elif use_auth_token is True:
SCREAMING_SNAKE_CASE_ : int = HfFolder.get_token()
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[Any] = model_info(lowerCamelCase_ , revision=lowerCamelCase_ , token=lowerCamelCase_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
SCREAMING_SNAKE_CASE_ : Any = submodule_path / commit_hash
SCREAMING_SNAKE_CASE_ : List[Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCamelCase_ )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCamelCase_ , F'{module_needed}.py' , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return os.path.join(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , **lowerCamelCase_ : Dict , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = get_cached_module_file(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return get_class_in_module(lowerCamelCase_ , final_module.replace('.py' , '' ) )
| 685 | 1 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
UpperCamelCase__ : Dict = '''<<<<<<< This should probably be modified because it mentions: '''
UpperCamelCase__ : Union[str, Any] = '''=======
>>>>>>>
'''
UpperCamelCase__ : Optional[Any] = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
UpperCamelCase__ : Union[str, Any] = [
# (pattern, replacement)
# Order is important here for some replacements
(r'''tfds\.core''', r'''datasets'''),
(r'''tf\.io\.gfile\.GFile''', r'''open'''),
(r'''tf\.([\w\d]+)''', r'''datasets.Value(\'\1\')'''),
(r'''tfds\.features\.Text\(\)''', r'''datasets.Value(\'string\')'''),
(r'''tfds\.features\.Text\(''', r'''datasets.Value(\'string\'),'''),
(r'''features\s*=\s*tfds.features.FeaturesDict\(''', r'''features=datasets.Features('''),
(r'''tfds\.features\.FeaturesDict\(''', r'''dict('''),
(r'''The TensorFlow Datasets Authors''', r'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(r'''tfds\.''', r'''datasets.'''),
(r'''dl_manager\.manual_dir''', r'''self.config.data_dir'''),
(r'''self\.builder_config''', r'''self.config'''),
]
def __UpperCAmelCase ( lowerCamelCase_ : Namespace ) -> List[str]:
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowerCAmelCase_ ( lowerCamelCase_ ):
@staticmethod
def snake_case ( snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = parser.add_parser(
'convert' ,help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' ,)
train_parser.add_argument(
'--tfds_path' ,type=snake_case__ ,required=snake_case__ ,help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' ,)
train_parser.add_argument(
'--datasets_directory' ,type=snake_case__ ,required=snake_case__ ,help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=snake_case__ )
def __init__( self ,snake_case__ ,snake_case__ ,*snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = get_logger('datasets-cli/converting' )
SCREAMING_SNAKE_CASE_ : List[str] = tfds_path
SCREAMING_SNAKE_CASE_ : str = datasets_directory
def snake_case ( self ):
if os.path.isdir(self._tfds_path ):
SCREAMING_SNAKE_CASE_ : Any = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
SCREAMING_SNAKE_CASE_ : Dict = os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.abspath(self._datasets_directory )
self._logger.info(F'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : Any = {}
if os.path.isdir(self._tfds_path ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.listdir(snake_case__ )
else:
SCREAMING_SNAKE_CASE_ : Dict = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'Looking at file {f_name}' )
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(snake_case__ ,snake_case__ )
if not os.path.isfile(snake_case__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(snake_case__ ,encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : List[str] = f.readlines()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Any = []
for line in lines:
SCREAMING_SNAKE_CASE_ : List[str] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
SCREAMING_SNAKE_CASE_ : Optional[int] = 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
SCREAMING_SNAKE_CASE_ : str = ''
continue
elif "from absl import logging" in out_line:
SCREAMING_SNAKE_CASE_ : str = 'from datasets import logging\n'
elif "getLogger" in out_line:
SCREAMING_SNAKE_CASE_ : int = out_line.replace('getLogger' ,'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : List[Any] = list(filter(lambda snake_case__ : e in out_line ,snake_case__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(snake_case__ ) + '\n' )
out_lines.append(snake_case__ )
out_lines.append(snake_case__ )
continue
else:
for pattern, replacement in TO_CONVERT:
SCREAMING_SNAKE_CASE_ : Dict = re.sub(snake_case__ ,snake_case__ ,snake_case__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
SCREAMING_SNAKE_CASE_ : List[Any] = re.match(R'from\stensorflow_datasets.*import\s([^\.\r\n]+)' ,snake_case__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
SCREAMING_SNAKE_CASE_ : List[Any] = 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
SCREAMING_SNAKE_CASE_ : List[str] = True
out_lines.append(snake_case__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = f_name.replace('.py' ,'' )
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = os.path.join(snake_case__ ,snake_case__ )
os.makedirs(snake_case__ ,exist_ok=snake_case__ )
self._logger.info(F'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(snake_case__ )
if needs_manual_update:
with_manual_update.append(snake_case__ )
with open(snake_case__ ,'w' ,encoding='utf-8' ) as f:
f.writelines(snake_case__ )
self._logger.info(F'Converted in {output_file}' )
for utils_file in utils_files:
try:
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.basename(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = imports_to_builder_map[f_name.replace('.py' ,'' )]
self._logger.info(F'Moving {dest_folder} to {utils_file}' )
shutil.copy(snake_case__ ,snake_case__ )
except KeyError:
self._logger.error(F'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 685 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[int] = "visual_bert"
def __init__( self ,snake_case__=30522 ,snake_case__=768 ,snake_case__=512 ,snake_case__=12 ,snake_case__=12 ,snake_case__=3072 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=False ,snake_case__=True ,snake_case__=1 ,snake_case__=0 ,snake_case__=2 ,**snake_case__ ,):
super().__init__(pad_token_id=snake_case__ ,bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = visual_embedding_dim
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : int = bypass_transformer
SCREAMING_SNAKE_CASE_ : Optional[Any] = special_visual_initialize
| 685 | 1 |
def __UpperCAmelCase ( lowerCamelCase_ : list[int] ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = len(lowerCamelCase_ )
for i in range(lowerCamelCase_ ):
for j in range(i + 1 , lowerCamelCase_ ):
if numbers[j] < numbers[i]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
UpperCamelCase__ : List[Any] = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase__ : str = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 685 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
def is_in_circle(lowerCamelCase_ : float , lowerCamelCase_ : float ) -> bool:
SCREAMING_SNAKE_CASE_ : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
SCREAMING_SNAKE_CASE_ : Optional[int] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase_ ) )
# The ratio of the area for circle to square is pi/4.
SCREAMING_SNAKE_CASE_ : Tuple = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Callable[[float], float] , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(lowerCamelCase_ , lowerCamelCase_ ) ) for _ in range(lowerCamelCase_ ) ) * (max_value - min_value)
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 ) -> None:
"""simple docstring"""
def identity_function(lowerCamelCase_ : float ) -> float:
return x
SCREAMING_SNAKE_CASE_ : str = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print('******************' )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> None:
"""simple docstring"""
def function_to_integrate(lowerCamelCase_ : float ) -> float:
return sqrt(4.0 - x * x )
SCREAMING_SNAKE_CASE_ : Dict = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 1 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, nicht wahr?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
SCREAMING_SNAKE_CASE_ : str = {
'wmt16-en-de-dist-12-1': [2_8.3, 2_7.5_2],
'wmt16-en-de-dist-6-1': [2_7.4, 2_7.1_1],
'wmt16-en-de-12-1': [2_6.9, 2_5.7_5],
}
SCREAMING_SNAKE_CASE_ : int = F'{src_lang}-{tgt_lang}'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join(lowerCamelCase_ , 'README.md' )
print(F'Generating {path}' )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(lowerCamelCase_ )
# make sure we are under the root of the project
UpperCamelCase__ : List[str] = Path(__file__).resolve().parent.parent.parent
UpperCamelCase__ : Union[str, Any] = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
UpperCamelCase__ : Optional[Any] = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 685 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self ,snake_case__ ,snake_case__=7 ,snake_case__=3 ,snake_case__=18 ,snake_case__=30 ,snake_case__=400 ,snake_case__=True ,snake_case__=None ,snake_case__=True ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : List[str] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = num_channels
SCREAMING_SNAKE_CASE_ : Dict = image_size
SCREAMING_SNAKE_CASE_ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE_ : int = max_resolution
SCREAMING_SNAKE_CASE_ : Dict = do_resize
SCREAMING_SNAKE_CASE_ : Dict = size
SCREAMING_SNAKE_CASE_ : str = apply_ocr
def snake_case ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Dict = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = LayoutLMvaImageProcessingTester(self )
@property
def snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ ,'do_resize' ) )
self.assertTrue(hasattr(snake_case__ ,'size' ) )
self.assertTrue(hasattr(snake_case__ ,'apply_ocr' ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def snake_case ( self ):
pass
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
self.assertIsInstance(encoding.words ,snake_case__ )
self.assertIsInstance(encoding.boxes ,snake_case__ )
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Tuple = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# with apply_OCR = True
SCREAMING_SNAKE_CASE_ : Tuple = LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_dataset('hf-internal-testing/fixtures_docvqa' ,split='test' )
SCREAMING_SNAKE_CASE_ : str = Image.open(ds[0]['file'] ).convert('RGB' )
SCREAMING_SNAKE_CASE_ : Any = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE_ : Any = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
SCREAMING_SNAKE_CASE_ : Any = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,snake_case__ )
self.assertListEqual(encoding.boxes ,snake_case__ )
# with apply_OCR = False
SCREAMING_SNAKE_CASE_ : Optional[int] = LayoutLMvaImageProcessor(apply_ocr=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
| 685 | 1 |
UpperCamelCase__ : str = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
UpperCamelCase__ : List[Any] = ['''a''', '''b''', '''c''', '''d''', '''e''']
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = start
# add current to visited
visited.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Dict = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
SCREAMING_SNAKE_CASE_ : int = topological_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# if all neighbors visited add current to sort
sort.append(lowerCamelCase_ )
# if all vertices haven't been visited select a new one to visit
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
for vertice in vertices:
if vertice not in visited:
SCREAMING_SNAKE_CASE_ : Any = topological_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# return sort
return sort
if __name__ == "__main__":
UpperCamelCase__ : str = topological_sort('''a''', [], [])
print(sort)
| 685 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ : str = logging.getLogger(__name__)
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : str
__a : str
__a : Optional[str] = None
__a : Optional[str] = None
__a : Optional[str] = None
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : List[int]
__a : Optional[List[int]] = None
__a : Optional[List[int]] = None
__a : Optional[Union[int, float]] = None
__a : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = None ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(
snake_case__ ,'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' ,tokenizer.__class__.__name__ ,str(snake_case__ ) ,snake_case__ ,) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE_ : Dict = cached_features_file + '.lock'
with FileLock(snake_case__ ):
if os.path.exists(snake_case__ ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.load(snake_case__ )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
SCREAMING_SNAKE_CASE_ : List[Any] = (
processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
)
logger.info('Training examples: %s' ,len(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[str] = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
logger.info('Saving features into cached file %s' ,snake_case__ )
torch.save(self.features ,snake_case__ )
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ :
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = 128 ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[int] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list
SCREAMING_SNAKE_CASE_ : int = processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(snake_case__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
SCREAMING_SNAKE_CASE_ : List[Any] = tf.data.Dataset.from_generator(
snake_case__ ,(
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) ,(
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case ( self ):
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
class lowerCAmelCase_ ( lowerCamelCase_ ):
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_train_set.txt' ) ) ,'train' )
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_evaluation_set.txt' ) ) ,'dev' )
def snake_case ( self ):
return ["contradiction", "entailment", "neutral"]
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i, line in enumerate(snake_case__ ):
if i == 0:
continue
SCREAMING_SNAKE_CASE_ : List[str] = '%s-%s' % (set_type, line[0])
SCREAMING_SNAKE_CASE_ : Dict = line[5]
SCREAMING_SNAKE_CASE_ : Dict = line[6]
SCREAMING_SNAKE_CASE_ : Tuple = line[7][2:] if line[7].startswith('ex' ) else line[7]
SCREAMING_SNAKE_CASE_ : Optional[int] = line[0]
examples.append(InputExample(guid=snake_case__ ,text_a=snake_case__ ,text_b=snake_case__ ,label=snake_case__ ,pairID=snake_case__ ) )
return examples
def __UpperCAmelCase ( lowerCamelCase_ : List[InputExample] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : PreTrainedTokenizer , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase_ )}
SCREAMING_SNAKE_CASE_ : Dict = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
SCREAMING_SNAKE_CASE_ : Any = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='max_length' , truncation=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : List[Any] = label_map[example.label] if example.label in label_map else 0
SCREAMING_SNAKE_CASE_ : List[str] = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase_ , label=lowerCamelCase_ , pairID=lowerCamelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F'guid: {example}' )
logger.info(F'features: {features[i]}' )
return features
UpperCamelCase__ : str = {
'''hans''': 3,
}
UpperCamelCase__ : Dict = {
'''hans''': HansProcessor,
}
| 685 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Union[str, Any] = "roberta-prelayernorm"
def __init__( self ,snake_case__=50265 ,snake_case__=768 ,snake_case__=12 ,snake_case__=12 ,snake_case__=3072 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=1 ,snake_case__=0 ,snake_case__=2 ,snake_case__="absolute" ,snake_case__=True ,snake_case__=None ,**snake_case__ ,):
super().__init__(pad_token_id=snake_case__ ,bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_size
SCREAMING_SNAKE_CASE_ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE_ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : int = type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Any = position_embedding_type
SCREAMING_SNAKE_CASE_ : Any = use_cache
SCREAMING_SNAKE_CASE_ : int = classifier_dropout
class lowerCAmelCase_ ( lowerCamelCase_ ):
@property
def snake_case ( self ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 685 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 1_00 * 2**20, 9_00 * 2**20] )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] ) -> int:
"""simple docstring"""
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE_ : str = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = is_small_dataset(lowerCamelCase_ )
assert result == expected
| 685 | 1 |
UpperCamelCase__ : Optional[Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
UpperCamelCase__ : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __UpperCAmelCase ( lowerCamelCase_ : dict[int, list[int]] , lowerCamelCase_ : int , lowerCamelCase_ : list[bool] ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : int = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
order.append(lowerCamelCase_ )
return order
def __UpperCAmelCase ( lowerCamelCase_ : dict[int, list[int]] , lowerCamelCase_ : int , lowerCamelCase_ : list[bool] ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Dict = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return component
def __UpperCAmelCase ( lowerCamelCase_ : dict[int, list[int]] ) -> list[list[int]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = len(lowerCamelCase_ ) * [False]
SCREAMING_SNAKE_CASE_ : dict[int, list[int]] = {vert: [] for vert in range(len(lowerCamelCase_ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for i, was_visited in enumerate(lowerCamelCase_ ):
if not was_visited:
order += topology_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = []
SCREAMING_SNAKE_CASE_ : Dict = len(lowerCamelCase_ ) * [False]
for i in range(len(lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE_ : List[str] = order[len(lowerCamelCase_ ) - i - 1]
if not visited[vert]:
SCREAMING_SNAKE_CASE_ : Any = find_components(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
components_list.append(lowerCamelCase_ )
return components_list
| 685 |
from math import log
from scipy.constants import Boltzmann, physical_constants
UpperCamelCase__ : Any = 3_00 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
UpperCamelCase__ : Optional[int] = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : int = "albert"
def __init__( self ,snake_case__=30000 ,snake_case__=128 ,snake_case__=4096 ,snake_case__=12 ,snake_case__=1 ,snake_case__=64 ,snake_case__=16384 ,snake_case__=1 ,snake_case__="gelu_new" ,snake_case__=0 ,snake_case__=0 ,snake_case__=512 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=0.1 ,snake_case__="absolute" ,snake_case__=0 ,snake_case__=2 ,snake_case__=3 ,**snake_case__ ,):
super().__init__(pad_token_id=snake_case__ ,bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE_ : str = embedding_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Any = num_hidden_groups
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = inner_group_num
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE_ : Dict = intermediate_size
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Dict = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : str = classifier_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[Any] = position_embedding_type
class lowerCAmelCase_ ( lowerCamelCase_ ):
@property
def snake_case ( self ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 685 |
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[],
[],
[],
]
def snake_case ( self ,snake_case__ ,snake_case__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(snake_case__ )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def snake_case ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self ):
return "\n".join(F'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : List[str] = []
def snake_case ( self ,snake_case__ ):
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(snake_case__ )
def snake_case ( self ):
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = min(self.queue )
self.queue.remove(snake_case__ )
return data
def __str__( self ):
return str(self.queue )
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 1 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
UpperCamelCase__ : Dict = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[float] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
__a : bool = field(default=lowerCamelCase_ , metadata={"help": "Whether to SortishSamler or not."} )
__a : bool = field(
default=lowerCamelCase_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
__a : bool = field(default=lowerCamelCase_ , metadata={"help": "whether to use adafactor"} )
__a : Optional[float] = field(
default=lowerCamelCase_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
__a : Optional[float] = field(
default=lowerCamelCase_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
__a : Optional[float] = field(default=lowerCamelCase_ , metadata={"help": "Dropout probability. Goes into model.config."} )
__a : Optional[float] = field(
default=lowerCamelCase_ , metadata={"help": "Attention dropout probability. Goes into model.config."} )
__a : Optional[str] = field(
default="linear" , metadata={"help": F"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
| 685 |
def __UpperCAmelCase ( lowerCamelCase_ : int = 10_00 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , lowerCamelCase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 685 | 1 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : List[Any] = (PNDMScheduler,)
__a : Union[str, Any] = (("num_inference_steps", 50),)
def snake_case ( self ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**snake_case__ )
return config
def snake_case ( self ,snake_case__=0 ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = kwargs.pop('num_inference_steps' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = self.dummy_sample
SCREAMING_SNAKE_CASE_ : Any = 0.1 * sample
SCREAMING_SNAKE_CASE_ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_scheduler_config(**snake_case__ )
SCREAMING_SNAKE_CASE_ : str = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE_ : str = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = scheduler_class.from_pretrained(snake_case__ )
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE_ : str = scheduler.step_prk(snake_case__ ,snake_case__ ,snake_case__ ,**snake_case__ ).prev_sample
SCREAMING_SNAKE_CASE_ : List[str] = new_scheduler.step_prk(snake_case__ ,snake_case__ ,snake_case__ ,**snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler.step_plms(snake_case__ ,snake_case__ ,snake_case__ ,**snake_case__ ).prev_sample
SCREAMING_SNAKE_CASE_ : Union[str, Any] = new_scheduler.step_plms(snake_case__ ,snake_case__ ,snake_case__ ,**snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case ( self ):
pass
def snake_case ( self ,snake_case__=0 ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.pop('num_inference_steps' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_sample
SCREAMING_SNAKE_CASE_ : Any = 0.1 * sample
SCREAMING_SNAKE_CASE_ : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE_ : Dict = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler_class.from_pretrained(snake_case__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE_ : List[Any] = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler.step_prk(snake_case__ ,snake_case__ ,snake_case__ ,**snake_case__ ).prev_sample
SCREAMING_SNAKE_CASE_ : Optional[int] = new_scheduler.step_prk(snake_case__ ,snake_case__ ,snake_case__ ,**snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
SCREAMING_SNAKE_CASE_ : int = scheduler.step_plms(snake_case__ ,snake_case__ ,snake_case__ ,**snake_case__ ).prev_sample
SCREAMING_SNAKE_CASE_ : Any = new_scheduler.step_plms(snake_case__ ,snake_case__ ,snake_case__ ,**snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case ( self ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_scheduler_config(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_class(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 10
SCREAMING_SNAKE_CASE_ : Any = self.dummy_model()
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_sample_deter
scheduler.set_timesteps(snake_case__ )
for i, t in enumerate(scheduler.prk_timesteps ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler.step_prk(snake_case__ ,snake_case__ ,snake_case__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
SCREAMING_SNAKE_CASE_ : List[Any] = model(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = scheduler.step_plms(snake_case__ ,snake_case__ ,snake_case__ ).prev_sample
return sample
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop('num_inference_steps' ,snake_case__ )
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Tuple = scheduler_class(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_sample
SCREAMING_SNAKE_CASE_ : Optional[int] = 0.1 * sample
if num_inference_steps is not None and hasattr(snake_case__ ,'set_timesteps' ):
scheduler.set_timesteps(snake_case__ )
elif num_inference_steps is not None and not hasattr(snake_case__ ,'set_timesteps' ):
SCREAMING_SNAKE_CASE_ : Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
SCREAMING_SNAKE_CASE_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
SCREAMING_SNAKE_CASE_ : List[str] = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE_ : Dict = scheduler.step_prk(snake_case__ ,0 ,snake_case__ ,**snake_case__ ).prev_sample
SCREAMING_SNAKE_CASE_ : Tuple = scheduler.step_prk(snake_case__ ,1 ,snake_case__ ,**snake_case__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler.step_plms(snake_case__ ,0 ,snake_case__ ,**snake_case__ ).prev_sample
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler.step_plms(snake_case__ ,1 ,snake_case__ ,**snake_case__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def snake_case ( self ):
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def snake_case ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_scheduler_config(steps_offset=1 )
SCREAMING_SNAKE_CASE_ : int = scheduler_class(**snake_case__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps ,torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) ,)
def snake_case ( self ):
for beta_start, beta_end in zip([0.0001, 0.001] ,[0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case__ ,beta_end=snake_case__ )
def snake_case ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case__ )
def snake_case ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def snake_case ( self ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=snake_case__ )
def snake_case ( self ):
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=snake_case__ )
def snake_case ( self ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
SCREAMING_SNAKE_CASE_ : int = 27
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_sample
SCREAMING_SNAKE_CASE_ : Optional[int] = 0.1 * sample
SCREAMING_SNAKE_CASE_ : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : str = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler.step_prk(snake_case__ ,snake_case__ ,snake_case__ ).prev_sample
def snake_case ( self ):
with self.assertRaises(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Tuple = scheduler_class(**snake_case__ )
scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample ).prev_sample
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.full_loop()
SCREAMING_SNAKE_CASE_ : List[str] = torch.sum(torch.abs(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : str = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.full_loop(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.sum(torch.abs(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : Dict = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def snake_case ( self ):
# We specify different beta, so that the first alpha is 0.99
SCREAMING_SNAKE_CASE_ : Optional[int] = self.full_loop(set_alpha_to_one=snake_case__ ,beta_start=0.01 )
SCREAMING_SNAKE_CASE_ : int = torch.sum(torch.abs(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[str] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def snake_case ( self ):
# We specify different beta, so that the first alpha is 0.99
SCREAMING_SNAKE_CASE_ : Tuple = self.full_loop(set_alpha_to_one=snake_case__ ,beta_start=0.01 )
SCREAMING_SNAKE_CASE_ : Tuple = torch.sum(torch.abs(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : Any = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 685 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Tuple = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Any = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Dict = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
| 685 | 1 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] ) -> List[str]:
"""simple docstring"""
if hor == 1_28:
SCREAMING_SNAKE_CASE_ : List[Any] = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
SCREAMING_SNAKE_CASE_ : int = (32, 1_28, 2_56)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
SCREAMING_SNAKE_CASE_ : Tuple = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
SCREAMING_SNAKE_CASE_ : Tuple = (32, 64, 1_28, 2_56)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.load(F'/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch' )
SCREAMING_SNAKE_CASE_ : Optional[int] = model.state_dict()
SCREAMING_SNAKE_CASE_ : Dict = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_55_36,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
SCREAMING_SNAKE_CASE_ : List[Any] = UNetaDModel(**lowerCamelCase_ )
print(F'length of state dict: {len(state_dict.keys() )}' )
print(F'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
SCREAMING_SNAKE_CASE_ : List[str] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = state_dict.pop(lowerCamelCase_ )
hf_value_function.load_state_dict(lowerCamelCase_ )
torch.save(hf_value_function.state_dict() , F'hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin' )
with open(F'hub/hopper-medium-v2/unet/hor{hor}/config.json' , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 1_28, 2_56),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_55_36,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
SCREAMING_SNAKE_CASE_ : int = model
SCREAMING_SNAKE_CASE_ : Optional[Any] = UNetaDModel(**lowerCamelCase_ )
print(F'length of state dict: {len(state_dict.keys() )}' )
print(F'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
SCREAMING_SNAKE_CASE_ : int = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
SCREAMING_SNAKE_CASE_ : List[str] = state_dict.pop(lowerCamelCase_ )
hf_value_function.load_state_dict(lowerCamelCase_ )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 685 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCamelCase__ : Union[str, Any] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Any = []
UpperCamelCase__ : Optional[Any] = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
UpperCamelCase__ : Any = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'''emoji''': True,
},
}
]
UpperCamelCase__ : Union[str, Any] = 0
for log in Path().glob('''*.log'''):
UpperCamelCase__ : Optional[int] = 0
with open(log, '''r''') as f:
for line in f:
UpperCamelCase__ : Any = json.loads(line)
if line.get('''nodeid''', '''''') != "":
UpperCamelCase__ : Tuple = line['''nodeid''']
if line.get('''duration''', None) is not None:
UpperCamelCase__ : List[Any] = F"""{line["duration"]:.4f}"""
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCamelCase__ : Tuple = []
log.unlink()
UpperCamelCase__ : List[Any] = ''''''
UpperCamelCase__ : List[str] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : Optional[int] = {}
for test in failed_tests:
UpperCamelCase__ : str = test[0].split('''::''')
UpperCamelCase__ : List[Any] = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
UpperCamelCase__ : int = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCamelCase__ : str = [test[0] for test in failed_table]
UpperCamelCase__ : Union[str, Any] = list(set(files))
# Count number of instances in failed_tests
UpperCamelCase__ : Dict = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCamelCase__ : str = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 30_00:
UpperCamelCase__ : List[Any] = '''Too many failed tests, please see the full report in the Action results.'''
UpperCamelCase__ : Optional[Any] = len(err) + 10
UpperCamelCase__ : List[str] = message[: 30_00 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
UpperCamelCase__ : Optional[Any] = '''No failed tests! 🤗'''
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
UpperCamelCase__ : int = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
UpperCamelCase__ : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
UpperCamelCase__ : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCamelCase__ : Optional[Any] = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCamelCase__ : Tuple = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
UpperCamelCase__ : Any = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCamelCase__ : int = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCamelCase__ : str = row[0]
else:
UpperCamelCase__ : str = ''''''
UpperCamelCase__ : Optional[Any] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 685 | 1 |
from typing import Any
import numpy as np
def __UpperCAmelCase ( lowerCamelCase_ : np.ndarray ) -> bool:
"""simple docstring"""
return np.array_equal(lowerCamelCase_ , matrix.conjugate().T )
def __UpperCAmelCase ( lowerCamelCase_ : np.ndarray , lowerCamelCase_ : np.ndarray ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = v.conjugate().T
SCREAMING_SNAKE_CASE_ : Any = v_star.dot(lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , np.ndarray )
return (v_star_dot.dot(lowerCamelCase_ )) / (v_star.dot(lowerCamelCase_ ))
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array([[1], [2], [3]] )
assert is_hermitian(lowerCamelCase_ ), F'{a} is not hermitian.'
print(rayleigh_quotient(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : str = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowerCamelCase_ ), F'{a} is not hermitian.'
assert rayleigh_quotient(lowerCamelCase_ , lowerCamelCase_ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 685 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE_ : Tuple = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 1 |
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
SCREAMING_SNAKE_CASE_ : List[Any] = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE_ : List[Any] = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE_ : Optional[int] = max(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase_ ) , b_binary.zfill(lowerCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 |
import qiskit
def __UpperCAmelCase ( lowerCamelCase_ : int = 2 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = qubits
# Using Aer's simulator
SCREAMING_SNAKE_CASE_ : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE_ : str = qiskit.QuantumCircuit(lowerCamelCase_ , lowerCamelCase_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , lowerCamelCase_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , lowerCamelCase_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(lowerCamelCase_ ) ) , list(range(lowerCamelCase_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
SCREAMING_SNAKE_CASE_ : Tuple = qiskit.execute(lowerCamelCase_ , lowerCamelCase_ , shots=10_00 )
return job.result().get_counts(lowerCamelCase_ )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 685 | 1 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : str = logging.get_logger()
@dataclass
class lowerCAmelCase_ :
__a : nn.Module
__a : List[nn.Module] = field(default_factory=lowerCamelCase_ )
__a : list = field(default_factory=lowerCamelCase_ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = len(list(m.modules() ) ) == 1 or isinstance(snake_case__ ,nn.Convad ) or isinstance(snake_case__ ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case__ )
def __call__( self ,snake_case__ ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case__ )
[x.remove() for x in self.handles]
return self
@property
def snake_case ( self ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case__ : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class lowerCAmelCase_ :
__a : nn.Module
__a : nn.Module
__a : int = 0
__a : List = field(default_factory=lowerCamelCase_ )
__a : List = field(default_factory=lowerCamelCase_ )
def __call__( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Tracker(self.dest )(snake_case__ ).parametrized
SCREAMING_SNAKE_CASE_ : int = Tracker(self.src )(snake_case__ ).parametrized
SCREAMING_SNAKE_CASE_ : Tuple = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.src_skip ,snake_case__ ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.dest_skip ,snake_case__ ) )
if len(snake_case__ ) != len(snake_case__ ):
raise Exception(
F'Numbers of operations are different. Source module has {len(snake_case__ )} operations while'
F' destination module has {len(snake_case__ )}.' )
for dest_m, src_m in zip(snake_case__ ,snake_case__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : ResNetConfig , lowerCamelCase_ : Path , lowerCamelCase_ : bool = True ) -> List[Any]:
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : str = timm.create_model(lowerCamelCase_ , pretrained=lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE_ : Tuple = ResNetForImageClassification(lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = ModuleTransfer(src=lowerCamelCase_ , dest=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(lowerCamelCase_ )
assert torch.allclose(from_model(lowerCamelCase_ ) , our_model(lowerCamelCase_ ).logits ), "The model logits don't match the original one."
SCREAMING_SNAKE_CASE_ : Tuple = F'resnet{"-".join(name.split("resnet" ) )}'
print(lowerCamelCase_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , )
# we can use the convnext one
SCREAMING_SNAKE_CASE_ : Dict = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , )
print(F'Pushed {checkpoint_name}' )
def __UpperCAmelCase ( lowerCamelCase_ : Path , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = True ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE_ : Tuple = 10_00
SCREAMING_SNAKE_CASE_ : List[str] = (1, num_labels)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'huggingface/label-files'
SCREAMING_SNAKE_CASE_ : List[str] = num_labels
SCREAMING_SNAKE_CASE_ : int = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE_ : List[Any] = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = idalabel
SCREAMING_SNAKE_CASE_ : Tuple = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : Optional[int] = partial(lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(lowerCamelCase_ , names_to_config[model_name] , lowerCamelCase_ , lowerCamelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return config, expected_shape
if __name__ == "__main__":
UpperCamelCase__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
UpperCamelCase__ : str = parser.parse_args()
UpperCamelCase__ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 685 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> bool:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('check_bouncy() accepts only integer arguments' )
SCREAMING_SNAKE_CASE_ : Optional[int] = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = ''.join(sorted(lowerCamelCase_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __UpperCAmelCase ( lowerCamelCase_ : float = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 1_00:
raise ValueError('solution() only accepts values from 0 to 100' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Dict = 1
while True:
if check_bouncy(lowerCamelCase_ ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 685 | 1 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=7 ,snake_case__=True ,snake_case__=True ,snake_case__=True ,snake_case__=True ,snake_case__=99 ,snake_case__=32 ,snake_case__=5 ,snake_case__=4 ,snake_case__=37 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=16 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=4 ,):
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : List[str] = batch_size
SCREAMING_SNAKE_CASE_ : int = seq_length
SCREAMING_SNAKE_CASE_ : List[Any] = is_training
SCREAMING_SNAKE_CASE_ : Tuple = use_attention_mask
SCREAMING_SNAKE_CASE_ : int = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE_ : str = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[int] = num_choices
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[int] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=snake_case__ ,)
return config, input_ids, attention_mask
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Optional[int] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = FlaxDistilBertModelTester(self )
@slow
def snake_case ( self ):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : int = model_class_name.from_pretrained('distilbert-base-uncased' )
SCREAMING_SNAKE_CASE_ : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case__ )
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' )
SCREAMING_SNAKE_CASE_ : Tuple = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE_ : Dict = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(snake_case__ ,attention_mask=snake_case__ )[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (1, 11, 768)
self.assertEqual(output.shape ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,snake_case__ ,atol=1E-4 ) )
| 685 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Dict = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = ['''ChineseCLIPFeatureExtractor''']
UpperCamelCase__ : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 685 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=lowerCamelCase_ , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=lowerCamelCase_ , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=lowerCamelCase_ )
return parser.parse_args()
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE_ : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE_ : Dict = script_fpath.stem
SCREAMING_SNAKE_CASE_ : int = importlib.import_module(lowerCamelCase_ )
# Patch sys.argv
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 685 |
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> Tuple:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
else:
return a * actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(lowerCamelCase_ , lowerCamelCase_ )
return actual_power(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 685 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Union[str, Any] = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Union[str, Any] = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 685 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=7 ,snake_case__=True ,snake_case__=True ,snake_case__=False ,snake_case__=True ,snake_case__=99 ,snake_case__=32 ,snake_case__=5 ,snake_case__=4 ,snake_case__=37 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=16 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=3 ,snake_case__=4 ,snake_case__=None ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = seq_length
SCREAMING_SNAKE_CASE_ : Tuple = is_training
SCREAMING_SNAKE_CASE_ : List[str] = use_input_mask
SCREAMING_SNAKE_CASE_ : List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : int = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[str] = num_choices
SCREAMING_SNAKE_CASE_ : Tuple = scope
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=snake_case__ ,initializer_range=self.initializer_range ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(snake_case__ ,attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE_ : List[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,use_cache=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : str = ids_tensor((self.batch_size, 3) ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_ids, next_tokens] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_mask, next_mask] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Dict = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,past_key_values=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__a : int = (LlamaForCausalLM,) if is_torch_available() else ()
__a : Any = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Tuple = False
__a : Tuple = False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self ,config_class=snake_case__ ,hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : Optional[int] = type
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Dict = 3
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : str = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Any = 3
SCREAMING_SNAKE_CASE_ : int = 'single_label_classification'
SCREAMING_SNAKE_CASE_ : str = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Dict = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Tuple = 3
SCREAMING_SNAKE_CASE_ : str = 'multi_label_classification'
SCREAMING_SNAKE_CASE_ : int = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Tuple = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def snake_case ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([1, 10] ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE_ : int = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : List[Any] = original_model(snake_case__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : List[Any] = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE_ : int = LlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE_ : str = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : Optional[int] = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : List[str] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Dict = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : str = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : int = model(torch.tensor(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
SCREAMING_SNAKE_CASE_ : List[str] = 'Simply put, the theory of relativity states that '
SCREAMING_SNAKE_CASE_ : str = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.encode(snake_case__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Tuple = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' ,device_map='sequential' ,use_safetensors=snake_case__ )
# greedy generation outputs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.generate(snake_case__ ,max_new_tokens=64 ,top_p=snake_case__ ,temperature=1 ,do_sample=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.decode(generated_ids[0] ,skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ ,snake_case__ )
| 685 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
UpperCamelCase__ : List[str] = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(lowerCamelCase_ )
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : int = "rag"
__a : Any = True
def __init__( self ,snake_case__=None ,snake_case__=True ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__=" / " ,snake_case__=" // " ,snake_case__=5 ,snake_case__=300 ,snake_case__=768 ,snake_case__=8 ,snake_case__="wiki_dpr" ,snake_case__="train" ,snake_case__="compressed" ,snake_case__=None ,snake_case__=None ,snake_case__=False ,snake_case__=False ,snake_case__=0.0 ,snake_case__=True ,snake_case__=False ,snake_case__=False ,snake_case__=False ,snake_case__=True ,snake_case__=None ,**snake_case__ ,):
super().__init__(
bos_token_id=snake_case__ ,pad_token_id=snake_case__ ,eos_token_id=snake_case__ ,decoder_start_token_id=snake_case__ ,forced_eos_token_id=snake_case__ ,is_encoder_decoder=snake_case__ ,prefix=snake_case__ ,vocab_size=snake_case__ ,**snake_case__ ,)
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = kwargs.pop('question_encoder' )
SCREAMING_SNAKE_CASE_ : List[str] = question_encoder_config.pop('model_type' )
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop('generator' )
SCREAMING_SNAKE_CASE_ : Optional[int] = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE_ : str = AutoConfig.for_model(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = AutoConfig.for_model(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = reduce_loss
SCREAMING_SNAKE_CASE_ : List[str] = label_smoothing
SCREAMING_SNAKE_CASE_ : str = exclude_bos_score
SCREAMING_SNAKE_CASE_ : str = do_marginalize
SCREAMING_SNAKE_CASE_ : Dict = title_sep
SCREAMING_SNAKE_CASE_ : str = doc_sep
SCREAMING_SNAKE_CASE_ : Any = n_docs
SCREAMING_SNAKE_CASE_ : str = max_combined_length
SCREAMING_SNAKE_CASE_ : Tuple = dataset
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dataset_split
SCREAMING_SNAKE_CASE_ : List[Any] = index_name
SCREAMING_SNAKE_CASE_ : Optional[Any] = retrieval_vector_size
SCREAMING_SNAKE_CASE_ : Tuple = retrieval_batch_size
SCREAMING_SNAKE_CASE_ : Optional[int] = passages_path
SCREAMING_SNAKE_CASE_ : Dict = index_path
SCREAMING_SNAKE_CASE_ : Any = use_dummy_dataset
SCREAMING_SNAKE_CASE_ : List[Any] = output_retrieved
SCREAMING_SNAKE_CASE_ : Any = do_deduplication
SCREAMING_SNAKE_CASE_ : Dict = use_cache
if self.forced_eos_token_id is None:
SCREAMING_SNAKE_CASE_ : int = getattr(self.generator ,'forced_eos_token_id' ,snake_case__ )
@classmethod
def snake_case ( cls ,snake_case__ ,snake_case__ ,**snake_case__ ):
return cls(question_encoder=question_encoder_config.to_dict() ,generator=generator_config.to_dict() ,**snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ : List[str] = self.question_encoder.to_dict()
SCREAMING_SNAKE_CASE_ : Tuple = self.generator.to_dict()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.__class__.model_type
return output
| 685 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
UpperCamelCase__ : int = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
UpperCamelCase__ : str = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : List[Any] = collections.OrderedDict()
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Any = f.readlines()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = b
SCREAMING_SNAKE_CASE_ : Dict = idx
for wd in b:
SCREAMING_SNAKE_CASE_ : Any = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : List[str] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__="<|endoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__="<|startoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__=False ,**snake_case__ ,):
super().__init__(
unk_token=snake_case__ ,pad_token=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,do_clean_text=snake_case__ ,**snake_case__ ,)
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
SCREAMING_SNAKE_CASE_ : str = do_clean_text
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = load_vocab_and_emoji(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def snake_case ( self ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def snake_case ( self ):
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.tokenize(snake_case__ ,clean=self.do_clean_text )
def snake_case ( self ,snake_case__ ):
return self.vocab.get(snake_case__ ,self.vocab.get(self.unk_token ) )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.convert_id_to_token(snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = ''.join(snake_case__ ).strip()
return out_string
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case__ ,add_special_tokens=snake_case__ ) + [self.eos_token_id] )
if len(snake_case__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE_ : List[Any] = input_ids[-self.model_max_length :]
return input_ids
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
if os.path.isdir(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
SCREAMING_SNAKE_CASE_ : Tuple = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
SCREAMING_SNAKE_CASE_ : str = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
SCREAMING_SNAKE_CASE_ : Dict = token_index
writer.write(','.join(snake_case__ ) + '\n' )
index += 1
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
json.dump(self.emoji ,snake_case__ )
return vocab_file, emoji_file
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = vocab # same as swe
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_to_tokens # same as bpe
SCREAMING_SNAKE_CASE_ : Dict = emoji
SCREAMING_SNAKE_CASE_ : int = np.max([len(snake_case__ ) for w in self.vocab.keys()] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
SCREAMING_SNAKE_CASE_ : str = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
SCREAMING_SNAKE_CASE_ : int = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
SCREAMING_SNAKE_CASE_ : Tuple = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ):
return len(self.ids_to_tokens )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<URL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = self.content_repattera.sub('<EMAIL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<TEL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<PRICE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = content.replace('<BLOCK><BLOCK>' ,'<BLOCK>' )
return content
def snake_case ( self ,snake_case__ ,snake_case__=False ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('\r\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\r' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : List[str] = text.replace('\t' ,'<TAB>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('—' ,'ー' )
SCREAMING_SNAKE_CASE_ : Optional[int] = text.replace('−' ,'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
SCREAMING_SNAKE_CASE_ : int = text.replace(snake_case__ ,snake_case__ )
if clean:
SCREAMING_SNAKE_CASE_ : str = self.clean_text(snake_case__ )
def check_simbol(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 2:
SCREAMING_SNAKE_CASE_ : str = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2A1 and c <= 0XC2BF)
or (c >= 0XC780 and c <= 0XC783)
or (c >= 0XCAB9 and c <= 0XCBBF)
or (c >= 0XCC80 and c <= 0XCDA2)
):
return True
return False
def checkuae(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 3:
SCREAMING_SNAKE_CASE_ : Dict = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_8080 and c <= 0XE2_B07F:
return True
return False
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List[Any] = []
while pos < len(snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = min(len(snake_case__ ) ,pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
SCREAMING_SNAKE_CASE_ : List[Any] = [] # (token_id, token, pos)
for e in range(snake_case__ ,snake_case__ ,-1 ):
SCREAMING_SNAKE_CASE_ : str = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(snake_case__ ) > 2:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(snake_case__ ) > 0:
# the smallest token_id is adopted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = sorted(snake_case__ ,key=lambda snake_case__ : x[0] )[0]
result.append(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = e
else:
SCREAMING_SNAKE_CASE_ : Any = pos + 1
SCREAMING_SNAKE_CASE_ : Optional[int] = text[pos:end]
if check_simbol(snake_case__ ):
result.append('<KIGOU>' )
elif checkuae(snake_case__ ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
SCREAMING_SNAKE_CASE_ : int = end
return result
def snake_case ( self ,snake_case__ ,snake_case__="\n" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Dict = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : Dict = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(snake_case__ )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(snake_case__ )
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : int = ''.join(snake_case__ )
return text
| 685 | 1 |
from string import ascii_lowercase, ascii_uppercase
def __UpperCAmelCase ( lowerCamelCase_ : str ) -> str:
"""simple docstring"""
if not sentence:
return ""
SCREAMING_SNAKE_CASE_ : Optional[int] = dict(zip(lowerCamelCase_ , lowerCamelCase_ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 685 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : int=() , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Optional[int]="no" , lowerCamelCase_ : Optional[Any]="29500" ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
SCREAMING_SNAKE_CASE_ : str = True
elif "IPython" in sys.modules:
SCREAMING_SNAKE_CASE_ : Dict = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
SCREAMING_SNAKE_CASE_ : Optional[int] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , lowerCamelCase_ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 8
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='TPU' )
print(F'Launching a training on {num_processes} TPU cores.' )
xmp.spawn(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*lowerCamelCase_ )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port=lowerCamelCase_ , mixed_precision=lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='MULTI_GPU' )
print(F'Launching training on {num_processes} GPUs.' )
try:
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
SCREAMING_SNAKE_CASE_ : Optional[Any] = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple=() , lowerCamelCase_ : str=2 ) -> Union[str, Any]:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
SCREAMING_SNAKE_CASE_ : Tuple = PrepareForLaunch(lowerCamelCase_ , debug=lowerCamelCase_ )
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
| 685 | 1 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Any = logging.get_logger(__name__)
# TODO Update this
UpperCamelCase__ : Optional[Any] = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : int = "esm"
def __init__( self ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__=768 ,snake_case__=12 ,snake_case__=12 ,snake_case__=3072 ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=1026 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__="absolute" ,snake_case__=True ,snake_case__=None ,snake_case__=False ,snake_case__=False ,snake_case__=None ,snake_case__=None ,**snake_case__ ,):
super().__init__(pad_token_id=snake_case__ ,mask_token_id=snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = hidden_size
SCREAMING_SNAKE_CASE_ : str = num_hidden_layers
SCREAMING_SNAKE_CASE_ : int = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE_ : List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE_ : Dict = use_cache
SCREAMING_SNAKE_CASE_ : Tuple = emb_layer_norm_before
SCREAMING_SNAKE_CASE_ : Optional[int] = token_dropout
SCREAMING_SNAKE_CASE_ : str = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
SCREAMING_SNAKE_CASE_ : Optional[int] = EsmFoldConfig()
elif isinstance(snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = EsmFoldConfig(**snake_case__ )
SCREAMING_SNAKE_CASE_ : str = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
SCREAMING_SNAKE_CASE_ : str = get_default_vocab_list()
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = vocab_list
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : int = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,'use_esm_attn_map' ,snake_case__ ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = super().to_dict()
if isinstance(self.esmfold_config ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = self.esmfold_config.to_dict()
return output
@dataclass
class lowerCAmelCase_ :
__a : str = None
__a : bool = True
__a : bool = False
__a : bool = False
__a : bool = False
__a : float = 0
__a : bool = True
__a : bool = False
__a : int = 1_28
__a : "TrunkConfig" = None
def snake_case ( self ):
if self.trunk is None:
SCREAMING_SNAKE_CASE_ : Any = TrunkConfig()
elif isinstance(self.trunk ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TrunkConfig(**self.trunk )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = asdict(self )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.trunk.to_dict()
return output
@dataclass
class lowerCAmelCase_ :
__a : int = 48
__a : int = 10_24
__a : int = 1_28
__a : int = 32
__a : int = 32
__a : int = 32
__a : float = 0
__a : float = 0
__a : bool = False
__a : int = 4
__a : Optional[int] = 1_28
__a : "StructureModuleConfig" = None
def snake_case ( self ):
if self.structure_module is None:
SCREAMING_SNAKE_CASE_ : List[str] = StructureModuleConfig()
elif isinstance(self.structure_module ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'`max_recycles` should be positive, got {self.max_recycles}.' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
F' {self.sequence_state_dim} and {self.sequence_state_dim}.' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
F' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.sequence_state_dim // self.sequence_head_width
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
F' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
F' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' )
if self.dropout >= 0.4:
raise ValueError(F'`dropout` should not be greater than 0.4, got {self.dropout}.' )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = asdict(self )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.structure_module.to_dict()
return output
@dataclass
class lowerCAmelCase_ :
__a : int = 3_84
__a : int = 1_28
__a : int = 16
__a : int = 1_28
__a : int = 12
__a : int = 4
__a : int = 8
__a : float = 0.1
__a : int = 8
__a : int = 1
__a : int = 2
__a : int = 7
__a : int = 10
__a : float = 1E-8
__a : float = 1E5
def snake_case ( self ):
return asdict(self )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 685 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCamelCase__ : Tuple = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 685 | 1 |
def __UpperCAmelCase ( lowerCamelCase_ : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
SCREAMING_SNAKE_CASE_ : Dict = hex_num[0] == '-'
if is_negative:
SCREAMING_SNAKE_CASE_ : str = hex_num[1:]
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(lowerCamelCase_ , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ''
while int_num > 0:
SCREAMING_SNAKE_CASE_ : int = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Union[str, Any] = CLIPTokenizer
__a : List[str] = CLIPTokenizerFast
__a : List[str] = True
__a : Tuple = {}
__a : Tuple = False
def snake_case ( self ):
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dict(zip(snake_case__ ,range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
SCREAMING_SNAKE_CASE_ : Any = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Tuple = 'lower newer'
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
SCREAMING_SNAKE_CASE_ : List[Any] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,snake_case__ )
@require_ftfy
def snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
SCREAMING_SNAKE_CASE_ : Dict = 'xa\u0303y' + ' ' + 'x\xe3y'
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of space type
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of line break type
SCREAMING_SNAKE_CASE_ : Tuple = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE_ : Tuple = F'{text_of_1_token} {text_of_1_token}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : str = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(snake_case__ ) + 1, len(snake_case__ ) + 1 + len(snake_case__ )) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F' {text}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : int = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(snake_case__ ) + 1, 1 + len(snake_case__ ) + 1 + len(snake_case__ )) ,)
def snake_case ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(snake_case__ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def snake_case ( self ):
super().test_tokenization_python_rust_equals()
def snake_case ( self ):
# CLIP always lower cases letters
pass
| 685 | 1 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ = True ,snake_case__ = False ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler
SCREAMING_SNAKE_CASE_ : Optional[Any] = optimizers if isinstance(snake_case__ ,(list, tuple) ) else [optimizers]
SCREAMING_SNAKE_CASE_ : Optional[Any] = split_batches
SCREAMING_SNAKE_CASE_ : Any = step_with_optimizer
SCREAMING_SNAKE_CASE_ : Union[str, Any] = GradientState()
def snake_case ( self ,*snake_case__ ,**snake_case__ ):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*snake_case__ ,**snake_case__ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*snake_case__ ,**snake_case__ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
SCREAMING_SNAKE_CASE_ : Dict = AcceleratorState().num_processes
for _ in range(snake_case__ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler ,'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*snake_case__ ,**snake_case__ )
else:
self.scheduler.step(*snake_case__ ,**snake_case__ )
def snake_case ( self ):
return self.scheduler.get_last_lr()
def snake_case ( self ):
return self.scheduler.state_dict()
def snake_case ( self ,snake_case__ ):
self.scheduler.load_state_dict(snake_case__ )
def snake_case ( self ):
return self.scheduler.get_lr()
def snake_case ( self ,*snake_case__ ,**snake_case__ ):
return self.scheduler.print_lr(*snake_case__ ,**snake_case__ )
| 685 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
SCREAMING_SNAKE_CASE_ : int = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.parse_args()
if not hasattr(lowerCamelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE_ : Optional[Any] = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 685 | 1 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCamelCase__ : Any = logging.getLogger()
@unittest.skip("Temporarily disable the doc tests." )
@require_torch
@require_tf
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ,snake_case__ ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = True ,):
SCREAMING_SNAKE_CASE_ : List[str] = [file for file in os.listdir(snake_case__ ) if os.path.isfile(os.path.join(snake_case__ ,snake_case__ ) )]
if identifier is not None:
SCREAMING_SNAKE_CASE_ : int = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(snake_case__ ,snake_case__ ):
for n_ in n_identifier:
SCREAMING_SNAKE_CASE_ : Dict = [file for file in files if n_ not in file]
else:
SCREAMING_SNAKE_CASE_ : str = [file for file in files if n_identifier not in file]
SCREAMING_SNAKE_CASE_ : List[str] = ignore_files or []
ignore_files.append('__init__.py' )
SCREAMING_SNAKE_CASE_ : Any = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' ,snake_case__ )
if only_modules:
SCREAMING_SNAKE_CASE_ : Optional[Any] = file.split('.' )[0]
try:
SCREAMING_SNAKE_CASE_ : Dict = getattr(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = doctest.DocTestSuite(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = unittest.TextTestRunner().run(snake_case__ )
self.assertIs(len(result.failures ) ,0 )
except AttributeError:
logger.info(F'{module_identifier} is not a module.' )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = doctest.testfile(str('..' / directory / file ) ,optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed ,0 )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = Path('src/transformers' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'modeling'
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(snake_case__ ,identifier=snake_case__ ,ignore_files=snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = Path('src/transformers' )
SCREAMING_SNAKE_CASE_ : Tuple = 'tokenization'
self.analyze_directory(snake_case__ ,identifier=snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path('src/transformers' )
SCREAMING_SNAKE_CASE_ : Tuple = 'configuration'
self.analyze_directory(snake_case__ ,identifier=snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = Path('src/transformers' )
SCREAMING_SNAKE_CASE_ : Tuple = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(snake_case__ ,n_identifier=snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Path('docs/source' )
SCREAMING_SNAKE_CASE_ : Optional[int] = ['favicon.ico']
self.analyze_directory(snake_case__ ,ignore_files=snake_case__ ,only_modules=snake_case__ )
| 685 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
UpperCamelCase__ : Dict = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
UpperCamelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'https://pypi.org/pypi/diffusers/json'
SCREAMING_SNAKE_CASE_ : Optional[int] = json.loads(request.urlopen(lowerCamelCase_ ).read() )['releases'].keys()
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : version.Version(lowerCamelCase_ ) )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCamelCase_ )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Path(lowerCamelCase_ ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] ) -> Any:
"""simple docstring"""
init_hf_modules()
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : List[Any] = f.read()
# Imports of the form `import .xxx`
SCREAMING_SNAKE_CASE_ : Tuple = re.findall('^\s*import\s+\.(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCamelCase_ ) )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [module_file]
SCREAMING_SNAKE_CASE_ : Tuple = []
# Let's recurse through all relative imports
while not no_change:
SCREAMING_SNAKE_CASE_ : int = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ).parent
SCREAMING_SNAKE_CASE_ : int = [str(module_path / m ) for m in new_imports]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [f for f in new_import_files if f not in all_relative_imports]
SCREAMING_SNAKE_CASE_ : Any = [F'{f}.py' for f in new_import_files]
SCREAMING_SNAKE_CASE_ : Optional[int] = len(lowerCamelCase_ ) == 0
all_relative_imports.extend(lowerCamelCase_ )
return all_relative_imports
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] = f.read()
# Imports of the form `import xxx`
SCREAMING_SNAKE_CASE_ : List[str] = re.findall('^\s*import\s+(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Only keep the top-level module
SCREAMING_SNAKE_CASE_ : List[str] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(set(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : List[str] = []
for imp in imports:
try:
importlib.import_module(lowerCamelCase_ )
except ImportError:
missing_packages.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
F'{", ".join(lowerCamelCase_ )}. Run `pip install {" ".join(lowerCamelCase_ )}`' )
return get_relative_imports(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = module_path.replace(os.path.sep , '.' )
SCREAMING_SNAKE_CASE_ : Any = importlib.import_module(lowerCamelCase_ )
if class_name is None:
return find_pipeline_class(lowerCamelCase_ )
return getattr(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
SCREAMING_SNAKE_CASE_ : List[Any] = dict(inspect.getmembers(lowerCamelCase_ , inspect.isclass ) )
SCREAMING_SNAKE_CASE_ : List[str] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCamelCase_ )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.' )
SCREAMING_SNAKE_CASE_ : Any = cls
return pipeline_class
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if os.path.isfile(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = module_file_or_url
SCREAMING_SNAKE_CASE_ : Dict = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
SCREAMING_SNAKE_CASE_ : List[str] = get_diffusers_versions()
# cut ".dev0"
SCREAMING_SNAKE_CASE_ : Dict = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
SCREAMING_SNAKE_CASE_ : List[Any] = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(F'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
SCREAMING_SNAKE_CASE_ : int = F'v{revision}'
elif revision == "main":
SCREAMING_SNAKE_CASE_ : List[Any] = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
SCREAMING_SNAKE_CASE_ : Tuple = COMMUNITY_PIPELINES_URL.format(revision=lowerCamelCase_ , pipeline=lowerCamelCase_ )
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = cached_download(
lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Tuple = 'git'
SCREAMING_SNAKE_CASE_ : Dict = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE_ : List[str] = hf_hub_download(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
SCREAMING_SNAKE_CASE_ : Dict = check_imports(lowerCamelCase_ )
# Now we move the module inside our cached dynamic modules.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path(lowerCamelCase_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
for module_needed in modules_needed:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F'{module_needed}.py'
shutil.copy(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple = use_auth_token
elif use_auth_token is True:
SCREAMING_SNAKE_CASE_ : int = HfFolder.get_token()
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[Any] = model_info(lowerCamelCase_ , revision=lowerCamelCase_ , token=lowerCamelCase_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
SCREAMING_SNAKE_CASE_ : Any = submodule_path / commit_hash
SCREAMING_SNAKE_CASE_ : List[Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCamelCase_ )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCamelCase_ , F'{module_needed}.py' , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return os.path.join(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , **lowerCamelCase_ : Dict , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = get_cached_module_file(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return get_class_in_module(lowerCamelCase_ , final_module.replace('.py' , '' ) )
| 685 | 1 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : List[Any] = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def snake_case ( self ,snake_case__=0 ):
SCREAMING_SNAKE_CASE_ : Tuple = floats_tensor((1, 3, 128, 128) ,rng=random.Random(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.RandomState(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE_ : str = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : List[str] = pipe(**snake_case__ ).images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_ : str = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE_ : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE_ : str = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Tuple = pipe(**snake_case__ ).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_ : List[Any] = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE_ : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
# warmup pass to apply optimizations
SCREAMING_SNAKE_CASE_ : Tuple = pipe(**self.get_dummy_inputs() )
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : List[str] = pipe(**snake_case__ ).images
SCREAMING_SNAKE_CASE_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_ : Dict = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE_ : Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe(**snake_case__ ).images
SCREAMING_SNAKE_CASE_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE_ : str = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : str = pipe(**snake_case__ ).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE_ : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : str = pipe(**snake_case__ ).images
SCREAMING_SNAKE_CASE_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
@property
def snake_case ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = ort.SessionOptions()
SCREAMING_SNAKE_CASE_ : List[str] = False
return options
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((768, 512) )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE_ : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=snake_case__ ,feature_extractor=snake_case__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'A fantasy landscape, trending on artstation'
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = pipe(
prompt=snake_case__ ,image=snake_case__ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=snake_case__ ,output_type='np' ,)
SCREAMING_SNAKE_CASE_ : str = output.images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
SCREAMING_SNAKE_CASE_ : Optional[int] = init_image.resize((768, 512) )
SCREAMING_SNAKE_CASE_ : str = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' )
SCREAMING_SNAKE_CASE_ : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=snake_case__ ,safety_checker=snake_case__ ,feature_extractor=snake_case__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = 'A fantasy landscape, trending on artstation'
SCREAMING_SNAKE_CASE_ : str = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = pipe(
prompt=snake_case__ ,image=snake_case__ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=snake_case__ ,output_type='np' ,)
SCREAMING_SNAKE_CASE_ : Tuple = output.images
SCREAMING_SNAKE_CASE_ : List[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
SCREAMING_SNAKE_CASE_ : str = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 685 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[int] = "visual_bert"
def __init__( self ,snake_case__=30522 ,snake_case__=768 ,snake_case__=512 ,snake_case__=12 ,snake_case__=12 ,snake_case__=3072 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=False ,snake_case__=True ,snake_case__=1 ,snake_case__=0 ,snake_case__=2 ,**snake_case__ ,):
super().__init__(pad_token_id=snake_case__ ,bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = visual_embedding_dim
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : int = bypass_transformer
SCREAMING_SNAKE_CASE_ : Optional[Any] = special_visual_initialize
| 685 | 1 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : Dict = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCamelCase__ : List[str] = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
UpperCamelCase__ : List[Any] = {
'''facebook/blenderbot_small-90M''': 5_12,
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Tuple = BlenderbotSmallTokenizer
def __init__( self ,snake_case__=None ,snake_case__=None ,snake_case__="<|endoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__=False ,snake_case__=True ,**snake_case__ ,):
super().__init__(
ByteLevelBPETokenizer(
vocab=snake_case__ ,merges=snake_case__ ,add_prefix_space=snake_case__ ,trim_offsets=snake_case__ ,) ,bos_token=snake_case__ ,eos_token=snake_case__ ,unk_token=snake_case__ ,**snake_case__ ,)
SCREAMING_SNAKE_CASE_ : List[str] = add_prefix_space
def snake_case ( self ,snake_case__ ,snake_case__=None ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 685 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
def is_in_circle(lowerCamelCase_ : float , lowerCamelCase_ : float ) -> bool:
SCREAMING_SNAKE_CASE_ : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
SCREAMING_SNAKE_CASE_ : Optional[int] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase_ ) )
# The ratio of the area for circle to square is pi/4.
SCREAMING_SNAKE_CASE_ : Tuple = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Callable[[float], float] , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(lowerCamelCase_ , lowerCamelCase_ ) ) for _ in range(lowerCamelCase_ ) ) * (max_value - min_value)
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 ) -> None:
"""simple docstring"""
def identity_function(lowerCamelCase_ : float ) -> float:
return x
SCREAMING_SNAKE_CASE_ : str = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print('******************' )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> None:
"""simple docstring"""
def function_to_integrate(lowerCamelCase_ : float ) -> float:
return sqrt(4.0 - x * x )
SCREAMING_SNAKE_CASE_ : Dict = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 1 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCamelCase__ : str = logging.get_logger(__name__)
# General docstring
UpperCamelCase__ : str = '''RegNetConfig'''
# Base docstring
UpperCamelCase__ : List[str] = '''facebook/regnet-y-040'''
UpperCamelCase__ : Optional[int] = [1, 10_88, 7, 7]
# Image classification docstring
UpperCamelCase__ : Dict = '''facebook/regnet-y-040'''
UpperCamelCase__ : List[str] = '''tabby, tabby cat'''
UpperCamelCase__ : Any = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self ,snake_case__ ,snake_case__ = 3 ,snake_case__ = 1 ,snake_case__ = 1 ,snake_case__ = "relu" ,**snake_case__ ,):
super().__init__(**snake_case__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
SCREAMING_SNAKE_CASE_ : Dict = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
SCREAMING_SNAKE_CASE_ : Any = tf.keras.layers.ConvaD(
filters=snake_case__ ,kernel_size=snake_case__ ,strides=snake_case__ ,padding='VALID' ,groups=snake_case__ ,use_bias=snake_case__ ,name='convolution' ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name='normalization' )
SCREAMING_SNAKE_CASE_ : int = ACTaFN[activation] if activation is not None else tf.identity
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.convolution(self.padding(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : int = self.normalization(snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = self.activation(snake_case__ )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self ,snake_case__ ,**snake_case__ ):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = config.num_channels
SCREAMING_SNAKE_CASE_ : int = TFRegNetConvLayer(
out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name='embedder' ,)
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = shape_list(snake_case__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
SCREAMING_SNAKE_CASE_ : str = tf.transpose(snake_case__ ,perm=(0, 2, 3, 1) )
SCREAMING_SNAKE_CASE_ : Dict = self.embedder(snake_case__ )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self ,snake_case__ ,snake_case__ = 2 ,**snake_case__ ):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tf.keras.layers.ConvaD(
filters=snake_case__ ,kernel_size=1 ,strides=snake_case__ ,use_bias=snake_case__ ,name='convolution' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name='normalization' )
def snake_case ( self ,snake_case__ ,snake_case__ = False ):
return self.normalization(self.convolution(snake_case__ ) ,training=snake_case__ )
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self ,snake_case__ ,snake_case__ ,**snake_case__ ):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ ,name='pooler' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
tf.keras.layers.ConvaD(filters=snake_case__ ,kernel_size=1 ,activation='relu' ,name='attention.0' ),
tf.keras.layers.ConvaD(filters=snake_case__ ,kernel_size=1 ,activation='sigmoid' ,name='attention.2' ),
]
def snake_case ( self ,snake_case__ ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.pooler(snake_case__ )
for layer_module in self.attention:
SCREAMING_SNAKE_CASE_ : List[Any] = layer_module(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_state * pooled
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = 1 ,**snake_case__ ):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE_ : Tuple = max(1 ,out_channels // config.groups_width )
SCREAMING_SNAKE_CASE_ : Any = (
TFRegNetShortCut(snake_case__ ,stride=snake_case__ ,name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' ,name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
SCREAMING_SNAKE_CASE_ : Any = [
TFRegNetConvLayer(snake_case__ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0' ),
TFRegNetConvLayer(
snake_case__ ,stride=snake_case__ ,groups=snake_case__ ,activation=config.hidden_act ,name='layer.1' ),
TFRegNetConvLayer(snake_case__ ,kernel_size=1 ,activation=snake_case__ ,name='layer.2' ),
]
SCREAMING_SNAKE_CASE_ : Dict = ACTaFN[config.hidden_act]
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = hidden_state
for layer_module in self.layers:
SCREAMING_SNAKE_CASE_ : Any = layer_module(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.shortcut(snake_case__ )
hidden_state += residual
SCREAMING_SNAKE_CASE_ : Any = self.activation(snake_case__ )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = 1 ,**snake_case__ ):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE_ : Tuple = max(1 ,out_channels // config.groups_width )
SCREAMING_SNAKE_CASE_ : List[str] = (
TFRegNetShortCut(snake_case__ ,stride=snake_case__ ,name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' ,name='shortcut' )
)
SCREAMING_SNAKE_CASE_ : List[str] = [
TFRegNetConvLayer(snake_case__ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0' ),
TFRegNetConvLayer(
snake_case__ ,stride=snake_case__ ,groups=snake_case__ ,activation=config.hidden_act ,name='layer.1' ),
TFRegNetSELayer(snake_case__ ,reduced_channels=int(round(in_channels / 4 ) ) ,name='layer.2' ),
TFRegNetConvLayer(snake_case__ ,kernel_size=1 ,activation=snake_case__ ,name='layer.3' ),
]
SCREAMING_SNAKE_CASE_ : int = ACTaFN[config.hidden_act]
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_state
for layer_module in self.layers:
SCREAMING_SNAKE_CASE_ : Dict = layer_module(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.shortcut(snake_case__ )
hidden_state += residual
SCREAMING_SNAKE_CASE_ : Tuple = self.activation(snake_case__ )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = 2 ,snake_case__ = 2 ,**snake_case__ ):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : int = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
SCREAMING_SNAKE_CASE_ : str = [
# downsampling is done in the first layer with stride of 2
layer(snake_case__ ,snake_case__ ,snake_case__ ,stride=snake_case__ ,name='layers.0' ),
*[layer(snake_case__ ,snake_case__ ,snake_case__ ,name=F'layers.{i+1}' ) for i in range(depth - 1 )],
]
def snake_case ( self ,snake_case__ ):
for layer_module in self.layers:
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_module(snake_case__ )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self ,snake_case__ ,**snake_case__ ):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
snake_case__ ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name='stages.0' ,) )
SCREAMING_SNAKE_CASE_ : Any = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(snake_case__ ,config.depths[1:] ) ):
self.stages.append(TFRegNetStage(snake_case__ ,snake_case__ ,snake_case__ ,depth=snake_case__ ,name=F'stages.{i+1}' ) )
def snake_case ( self ,snake_case__ ,snake_case__ = False ,snake_case__ = True ):
SCREAMING_SNAKE_CASE_ : Optional[int] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_states + (hidden_state,)
SCREAMING_SNAKE_CASE_ : int = stage_module(snake_case__ )
if output_hidden_states:
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=snake_case__ ,hidden_states=snake_case__ )
@keras_serializable
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
__a : Tuple = RegNetConfig
def __init__( self ,snake_case__ ,**snake_case__ ):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : str = config
SCREAMING_SNAKE_CASE_ : Any = TFRegNetEmbeddings(snake_case__ ,name='embedder' )
SCREAMING_SNAKE_CASE_ : Dict = TFRegNetEncoder(snake_case__ ,name='encoder' )
SCREAMING_SNAKE_CASE_ : Dict = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ ,name='pooler' )
@unpack_inputs
def snake_case ( self ,snake_case__ ,snake_case__ = None ,snake_case__ = None ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ : int = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ : Tuple = self.embedder(snake_case__ ,training=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.encoder(
snake_case__ ,output_hidden_states=snake_case__ ,return_dict=snake_case__ ,training=snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = encoder_outputs[0]
SCREAMING_SNAKE_CASE_ : int = self.pooler(snake_case__ )
# Change to NCHW output format have uniformity in the modules
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.transpose(snake_case__ ,perm=(0, 3, 1, 2) )
SCREAMING_SNAKE_CASE_ : Dict = tf.transpose(snake_case__ ,perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple([tf.transpose(snake_case__ ,perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case__ ,pooler_output=snake_case__ ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,)
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Union[str, Any] = RegNetConfig
__a : Dict = "regnet"
__a : Any = "pixel_values"
@property
def snake_case ( self ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) ,dtype=tf.floataa )}
UpperCamelCase__ : Any = r'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCamelCase__ : List[str] = r'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , lowerCamelCase_ , )
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ ,*snake_case__ ,**snake_case__ ):
super().__init__(snake_case__ ,*snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = TFRegNetMainLayer(snake_case__ ,name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=snake_case__ ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def snake_case ( self ,snake_case__ ,snake_case__ = None ,snake_case__ = None ,snake_case__=False ,):
SCREAMING_SNAKE_CASE_ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ : List[Any] = self.regnet(
pixel_values=snake_case__ ,output_hidden_states=snake_case__ ,return_dict=snake_case__ ,training=snake_case__ ,)
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,)
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCamelCase_ , )
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
def __init__( self ,snake_case__ ,*snake_case__ ,**snake_case__ ):
super().__init__(snake_case__ ,*snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : str = config.num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = TFRegNetMainLayer(snake_case__ ,name='regnet' )
# classification head
SCREAMING_SNAKE_CASE_ : List[Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels ,name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=snake_case__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def snake_case ( self ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__=False ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ : int = self.regnet(
snake_case__ ,output_hidden_states=snake_case__ ,return_dict=snake_case__ ,training=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE_ : List[str] = self.classifier[0](snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.classifier[1](snake_case__ )
SCREAMING_SNAKE_CASE_ : int = None if labels is None else self.hf_compute_loss(labels=snake_case__ ,logits=snake_case__ )
if not return_dict:
SCREAMING_SNAKE_CASE_ : Optional[int] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=snake_case__ ,logits=snake_case__ ,hidden_states=outputs.hidden_states )
| 685 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self ,snake_case__ ,snake_case__=7 ,snake_case__=3 ,snake_case__=18 ,snake_case__=30 ,snake_case__=400 ,snake_case__=True ,snake_case__=None ,snake_case__=True ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : List[str] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = num_channels
SCREAMING_SNAKE_CASE_ : Dict = image_size
SCREAMING_SNAKE_CASE_ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE_ : int = max_resolution
SCREAMING_SNAKE_CASE_ : Dict = do_resize
SCREAMING_SNAKE_CASE_ : Dict = size
SCREAMING_SNAKE_CASE_ : str = apply_ocr
def snake_case ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Dict = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = LayoutLMvaImageProcessingTester(self )
@property
def snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ ,'do_resize' ) )
self.assertTrue(hasattr(snake_case__ ,'size' ) )
self.assertTrue(hasattr(snake_case__ ,'apply_ocr' ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def snake_case ( self ):
pass
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
self.assertIsInstance(encoding.words ,snake_case__ )
self.assertIsInstance(encoding.boxes ,snake_case__ )
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Tuple = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# with apply_OCR = True
SCREAMING_SNAKE_CASE_ : Tuple = LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_dataset('hf-internal-testing/fixtures_docvqa' ,split='test' )
SCREAMING_SNAKE_CASE_ : str = Image.open(ds[0]['file'] ).convert('RGB' )
SCREAMING_SNAKE_CASE_ : Any = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE_ : Any = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
SCREAMING_SNAKE_CASE_ : Any = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,snake_case__ )
self.assertListEqual(encoding.boxes ,snake_case__ )
# with apply_OCR = False
SCREAMING_SNAKE_CASE_ : Optional[int] = LayoutLMvaImageProcessor(apply_ocr=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
| 685 | 1 |
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[],
[],
[],
]
def snake_case ( self ,snake_case__ ,snake_case__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(snake_case__ )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def snake_case ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self ):
return "\n".join(F'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : List[str] = []
def snake_case ( self ,snake_case__ ):
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(snake_case__ )
def snake_case ( self ):
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = min(self.queue )
self.queue.remove(snake_case__ )
return data
def __str__( self ):
return str(self.queue )
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ : str = logging.getLogger(__name__)
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : str
__a : str
__a : Optional[str] = None
__a : Optional[str] = None
__a : Optional[str] = None
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : List[int]
__a : Optional[List[int]] = None
__a : Optional[List[int]] = None
__a : Optional[Union[int, float]] = None
__a : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = None ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(
snake_case__ ,'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' ,tokenizer.__class__.__name__ ,str(snake_case__ ) ,snake_case__ ,) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE_ : Dict = cached_features_file + '.lock'
with FileLock(snake_case__ ):
if os.path.exists(snake_case__ ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.load(snake_case__ )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
SCREAMING_SNAKE_CASE_ : List[Any] = (
processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
)
logger.info('Training examples: %s' ,len(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[str] = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
logger.info('Saving features into cached file %s' ,snake_case__ )
torch.save(self.features ,snake_case__ )
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ :
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = 128 ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[int] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list
SCREAMING_SNAKE_CASE_ : int = processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(snake_case__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
SCREAMING_SNAKE_CASE_ : List[Any] = tf.data.Dataset.from_generator(
snake_case__ ,(
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) ,(
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case ( self ):
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
class lowerCAmelCase_ ( lowerCamelCase_ ):
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_train_set.txt' ) ) ,'train' )
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_evaluation_set.txt' ) ) ,'dev' )
def snake_case ( self ):
return ["contradiction", "entailment", "neutral"]
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i, line in enumerate(snake_case__ ):
if i == 0:
continue
SCREAMING_SNAKE_CASE_ : List[str] = '%s-%s' % (set_type, line[0])
SCREAMING_SNAKE_CASE_ : Dict = line[5]
SCREAMING_SNAKE_CASE_ : Dict = line[6]
SCREAMING_SNAKE_CASE_ : Tuple = line[7][2:] if line[7].startswith('ex' ) else line[7]
SCREAMING_SNAKE_CASE_ : Optional[int] = line[0]
examples.append(InputExample(guid=snake_case__ ,text_a=snake_case__ ,text_b=snake_case__ ,label=snake_case__ ,pairID=snake_case__ ) )
return examples
def __UpperCAmelCase ( lowerCamelCase_ : List[InputExample] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : PreTrainedTokenizer , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase_ )}
SCREAMING_SNAKE_CASE_ : Dict = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
SCREAMING_SNAKE_CASE_ : Any = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='max_length' , truncation=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : List[Any] = label_map[example.label] if example.label in label_map else 0
SCREAMING_SNAKE_CASE_ : List[str] = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase_ , label=lowerCamelCase_ , pairID=lowerCamelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F'guid: {example}' )
logger.info(F'features: {features[i]}' )
return features
UpperCamelCase__ : str = {
'''hans''': 3,
}
UpperCamelCase__ : Dict = {
'''hans''': HansProcessor,
}
| 685 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : str = {
'''configuration_trajectory_transformer''': [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TrajectoryTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Dict = [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrajectoryTransformerModel''',
'''TrajectoryTransformerPreTrainedModel''',
'''load_tf_weights_in_trajectory_transformer''',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
UpperCamelCase__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 685 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 1_00 * 2**20, 9_00 * 2**20] )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] ) -> int:
"""simple docstring"""
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE_ : str = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = is_small_dataset(lowerCamelCase_ )
assert result == expected
| 685 | 1 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCamelCase__ : Tuple = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 685 |
from math import log
from scipy.constants import Boltzmann, physical_constants
UpperCamelCase__ : Any = 3_00 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : int = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ : Any = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
UpperCamelCase__ : List[str] = {
'''facebook/bart-base''': 10_24,
'''facebook/bart-large''': 10_24,
'''facebook/bart-large-mnli''': 10_24,
'''facebook/bart-large-cnn''': 10_24,
'''facebook/bart-large-xsum''': 10_24,
'''yjernite/bart_eli5''': 10_24,
}
@lru_cache()
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
SCREAMING_SNAKE_CASE_ : List[str] = bs[:]
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase_ )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_ : Dict = [chr(lowerCamelCase_ ) for n in cs]
return dict(zip(lowerCamelCase_ , lowerCamelCase_ ) )
def __UpperCAmelCase ( lowerCamelCase_ : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = set()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ : List[str] = char
return pairs
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : int = VOCAB_FILES_NAMES
__a : int = PRETRAINED_VOCAB_FILES_MAP
__a : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : List[str] = ["input_ids", "attention_mask"]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__="replace" ,snake_case__="<s>" ,snake_case__="</s>" ,snake_case__="</s>" ,snake_case__="<s>" ,snake_case__="<unk>" ,snake_case__="<pad>" ,snake_case__="<mask>" ,snake_case__=False ,**snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(snake_case__ ,lstrip=snake_case__ ,rstrip=snake_case__ ) if isinstance(snake_case__ ,snake_case__ ) else bos_token
SCREAMING_SNAKE_CASE_ : Any = AddedToken(snake_case__ ,lstrip=snake_case__ ,rstrip=snake_case__ ) if isinstance(snake_case__ ,snake_case__ ) else eos_token
SCREAMING_SNAKE_CASE_ : Any = AddedToken(snake_case__ ,lstrip=snake_case__ ,rstrip=snake_case__ ) if isinstance(snake_case__ ,snake_case__ ) else sep_token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AddedToken(snake_case__ ,lstrip=snake_case__ ,rstrip=snake_case__ ) if isinstance(snake_case__ ,snake_case__ ) else cls_token
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(snake_case__ ,lstrip=snake_case__ ,rstrip=snake_case__ ) if isinstance(snake_case__ ,snake_case__ ) else unk_token
SCREAMING_SNAKE_CASE_ : str = AddedToken(snake_case__ ,lstrip=snake_case__ ,rstrip=snake_case__ ) if isinstance(snake_case__ ,snake_case__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : Any = AddedToken(snake_case__ ,lstrip=snake_case__ ,rstrip=snake_case__ ) if isinstance(snake_case__ ,snake_case__ ) else mask_token
super().__init__(
errors=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,unk_token=snake_case__ ,sep_token=snake_case__ ,cls_token=snake_case__ ,pad_token=snake_case__ ,mask_token=snake_case__ ,add_prefix_space=snake_case__ ,**snake_case__ ,)
with open(snake_case__ ,encoding='utf-8' ) as vocab_handle:
SCREAMING_SNAKE_CASE_ : Any = json.load(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_ : int = bytes_to_unicode()
SCREAMING_SNAKE_CASE_ : Any = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case__ ,encoding='utf-8' ) as merges_handle:
SCREAMING_SNAKE_CASE_ : str = merges_handle.read().split('\n' )[1:-1]
SCREAMING_SNAKE_CASE_ : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_ : List[str] = dict(zip(snake_case__ ,range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE_ : str = {}
SCREAMING_SNAKE_CASE_ : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_ : int = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def snake_case ( self ):
return len(self.encoder )
def snake_case ( self ):
return dict(self.encoder ,**self.added_tokens_encoder )
def snake_case ( self ,snake_case__ ):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tuple(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = get_pairs(snake_case__ )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_ : List[str] = min(snake_case__ ,key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = bigram
SCREAMING_SNAKE_CASE_ : List[str] = []
SCREAMING_SNAKE_CASE_ : Any = 0
while i < len(snake_case__ ):
try:
SCREAMING_SNAKE_CASE_ : Optional[int] = word.index(snake_case__ ,snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ : str = j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = new_word
if len(snake_case__ ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ : int = get_pairs(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = ' '.join(snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = word
return word
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = []
for token in re.findall(self.pat ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case__ ).split(' ' ) )
return bpe_tokens
def snake_case ( self ,snake_case__ ):
return self.encoder.get(snake_case__ ,self.encoder.get(self.unk_token ) )
def snake_case ( self ,snake_case__ ):
return self.decoder.get(snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''.join(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' ,errors=self.errors )
return text
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(snake_case__ ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=snake_case__ ,ensure_ascii=snake_case__ ) + '\n' )
SCREAMING_SNAKE_CASE_ : Dict = 0
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
SCREAMING_SNAKE_CASE_ : str = token_index
writer.write(' '.join(snake_case__ ) + '\n' )
index += 1
return vocab_file, merge_file
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case ( self ,snake_case__ ,snake_case__ = None ,snake_case__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ ,token_ids_a=snake_case__ ,already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case ( self ,snake_case__ ,snake_case__=False ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('add_prefix_space' ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case__ ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_ : List[Any] = ' ' + text
return (text, kwargs)
| 685 |
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[],
[],
[],
]
def snake_case ( self ,snake_case__ ,snake_case__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(snake_case__ )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def snake_case ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self ):
return "\n".join(F'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : List[str] = []
def snake_case ( self ,snake_case__ ):
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(snake_case__ )
def snake_case ( self ):
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = min(self.queue )
self.queue.remove(snake_case__ )
return data
def __str__( self ):
return str(self.queue )
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 1 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ : Dict = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Union[str, Any] = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 685 |
def __UpperCAmelCase ( lowerCamelCase_ : int = 10_00 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , lowerCamelCase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 685 | 1 |
import qiskit
def __UpperCAmelCase ( lowerCamelCase_ : int = 2 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = qubits
# Using Aer's simulator
SCREAMING_SNAKE_CASE_ : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE_ : str = qiskit.QuantumCircuit(lowerCamelCase_ , lowerCamelCase_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , lowerCamelCase_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , lowerCamelCase_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(lowerCamelCase_ ) ) , list(range(lowerCamelCase_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
SCREAMING_SNAKE_CASE_ : Tuple = qiskit.execute(lowerCamelCase_ , lowerCamelCase_ , shots=10_00 )
return job.result().get_counts(lowerCamelCase_ )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 685 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Tuple = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Any = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Dict = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
| 685 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase__ : List[str] = {'''vocab_file''': '''vocab.txt'''}
UpperCamelCase__ : Optional[Any] = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
UpperCamelCase__ : List[Any] = {
'''YituTech/conv-bert-base''': 5_12,
'''YituTech/conv-bert-medium-small''': 5_12,
'''YituTech/conv-bert-small''': 5_12,
}
UpperCamelCase__ : Any = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : int = VOCAB_FILES_NAMES
__a : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__a : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
__a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Dict = ConvBertTokenizer
def __init__( self ,snake_case__=None ,snake_case__=None ,snake_case__=True ,snake_case__="[UNK]" ,snake_case__="[SEP]" ,snake_case__="[PAD]" ,snake_case__="[CLS]" ,snake_case__="[MASK]" ,snake_case__=True ,snake_case__=None ,**snake_case__ ,):
super().__init__(
snake_case__ ,tokenizer_file=snake_case__ ,do_lower_case=snake_case__ ,unk_token=snake_case__ ,sep_token=snake_case__ ,pad_token=snake_case__ ,cls_token=snake_case__ ,mask_token=snake_case__ ,tokenize_chinese_chars=snake_case__ ,strip_accents=snake_case__ ,**snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,snake_case__ ) != do_lower_case
or normalizer_state.get('strip_accents' ,snake_case__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,snake_case__ ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(snake_case__ ,normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE_ : Optional[int] = strip_accents
SCREAMING_SNAKE_CASE_ : List[Any] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE_ : List[Any] = normalizer_class(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = do_lower_case
def snake_case ( self ,snake_case__ ,snake_case__=None ):
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Dict = self._tokenizer.model.save(snake_case__ ,name=snake_case__ )
return tuple(snake_case__ )
| 685 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCamelCase__ : Union[str, Any] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Any = []
UpperCamelCase__ : Optional[Any] = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
UpperCamelCase__ : Any = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'''emoji''': True,
},
}
]
UpperCamelCase__ : Union[str, Any] = 0
for log in Path().glob('''*.log'''):
UpperCamelCase__ : Optional[int] = 0
with open(log, '''r''') as f:
for line in f:
UpperCamelCase__ : Any = json.loads(line)
if line.get('''nodeid''', '''''') != "":
UpperCamelCase__ : Tuple = line['''nodeid''']
if line.get('''duration''', None) is not None:
UpperCamelCase__ : List[Any] = F"""{line["duration"]:.4f}"""
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCamelCase__ : Tuple = []
log.unlink()
UpperCamelCase__ : List[Any] = ''''''
UpperCamelCase__ : List[str] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : Optional[int] = {}
for test in failed_tests:
UpperCamelCase__ : str = test[0].split('''::''')
UpperCamelCase__ : List[Any] = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
UpperCamelCase__ : int = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCamelCase__ : str = [test[0] for test in failed_table]
UpperCamelCase__ : Union[str, Any] = list(set(files))
# Count number of instances in failed_tests
UpperCamelCase__ : Dict = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCamelCase__ : str = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 30_00:
UpperCamelCase__ : List[Any] = '''Too many failed tests, please see the full report in the Action results.'''
UpperCamelCase__ : Optional[Any] = len(err) + 10
UpperCamelCase__ : List[str] = message[: 30_00 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
UpperCamelCase__ : Optional[Any] = '''No failed tests! 🤗'''
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
UpperCamelCase__ : int = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
UpperCamelCase__ : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
UpperCamelCase__ : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCamelCase__ : Optional[Any] = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCamelCase__ : Tuple = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
UpperCamelCase__ : Any = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCamelCase__ : int = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCamelCase__ : str = row[0]
else:
UpperCamelCase__ : str = ''''''
UpperCamelCase__ : Optional[Any] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 685 | 1 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
UpperCamelCase__ : Dict = TypeVar('''DatasetType''', Dataset, IterableDataset)
def __UpperCAmelCase ( lowerCamelCase_ : List[DatasetType] , lowerCamelCase_ : Optional[List[float]] = None , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[DatasetInfo] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(lowerCamelCase_ ):
if not isinstance(lowerCamelCase_ , (Dataset, IterableDataset) ):
if isinstance(lowerCamelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(lowerCamelCase_ )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(lowerCamelCase_ ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCamelCase_ ).__name__}.' )
if i == 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = (
(Dataset, IterableDataset) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , info=lowerCamelCase_ , split=lowerCamelCase_ , stopping_strategy=lowerCamelCase_ )
else:
return _interleave_iterable_datasets(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , info=lowerCamelCase_ , split=lowerCamelCase_ , stopping_strategy=lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[DatasetType] , lowerCamelCase_ : Optional[DatasetInfo] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : int = 0 , ) -> DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(lowerCamelCase_ ):
if not isinstance(lowerCamelCase_ , (Dataset, IterableDataset) ):
if isinstance(lowerCamelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(lowerCamelCase_ )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(lowerCamelCase_ ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCamelCase_ ).__name__}.' )
if i == 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = (
(Dataset, IterableDataset) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowerCamelCase_ , info=lowerCamelCase_ , split=lowerCamelCase_ , axis=lowerCamelCase_ )
else:
return _concatenate_iterable_datasets(lowerCamelCase_ , info=lowerCamelCase_ , split=lowerCamelCase_ , axis=lowerCamelCase_ )
| 685 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE_ : Tuple = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase__ : Union[str, Any] = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
UpperCamelCase__ : List[Any] = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = torch.load(lowerCamelCase_ , map_location='cpu' )
return sd
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=rename_keys_prefix ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = OrderedDict()
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE_ : List[str] = key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE_ : Optional[int] = new_key.replace(name_pair[0] , name_pair[1] )
SCREAMING_SNAKE_CASE_ : List[Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE_ : Optional[int] = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Dict ) -> List[Any]:
"""simple docstring"""
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE_ : Any = 'pretraining'
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE_ : Dict = {'visual_embedding_dim': 5_12}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'visual_embedding_dim': 20_48}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'visual_embedding_dim': 20_48}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'visual_embedding_dim': 10_24}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE_ : List[Any] = {'visual_embedding_dim': 5_12}
SCREAMING_SNAKE_CASE_ : Tuple = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE_ : List[Any] = {'visual_embedding_dim': 20_48}
SCREAMING_SNAKE_CASE_ : Any = 'vqa_advanced'
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE_ : int = {'visual_embedding_dim': 20_48, 'num_labels': 31_29}
SCREAMING_SNAKE_CASE_ : Tuple = 'vqa'
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE_ : List[Any] = {
'visual_embedding_dim': 10_24,
'num_labels': 2,
}
SCREAMING_SNAKE_CASE_ : Tuple = 'nlvr'
SCREAMING_SNAKE_CASE_ : Optional[int] = VisualBertConfig(**lowerCamelCase_ )
# Load State Dict
SCREAMING_SNAKE_CASE_ : Tuple = load_state_dict(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = get_new_dict(lowerCamelCase_ , lowerCamelCase_ )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE_ : Optional[int] = VisualBertForPreTraining(lowerCamelCase_ )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE_ : str = VisualBertForQuestionAnswering(lowerCamelCase_ )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = VisualBertForVisualReasoning(lowerCamelCase_ )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE_ : Optional[int] = VisualBertForMultipleChoice(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# Save Checkpoints
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
UpperCamelCase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
UpperCamelCase__ : str = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 685 |
import qiskit
def __UpperCAmelCase ( lowerCamelCase_ : int = 2 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = qubits
# Using Aer's simulator
SCREAMING_SNAKE_CASE_ : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE_ : str = qiskit.QuantumCircuit(lowerCamelCase_ , lowerCamelCase_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , lowerCamelCase_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , lowerCamelCase_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(lowerCamelCase_ ) ) , list(range(lowerCamelCase_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
SCREAMING_SNAKE_CASE_ : Tuple = qiskit.execute(lowerCamelCase_ , lowerCamelCase_ , shots=10_00 )
return job.result().get_counts(lowerCamelCase_ )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 685 | 1 |
def __UpperCAmelCase ( lowerCamelCase_ : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __UpperCAmelCase ( lowerCamelCase_ : str ) -> dict[str, str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
SCREAMING_SNAKE_CASE_ : Any = remove_duplicates(key.upper() )
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowerCamelCase_ )
# First fill cipher with key characters
SCREAMING_SNAKE_CASE_ : List[Any] = {alphabet[i]: char for i, char in enumerate(lowerCamelCase_ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(lowerCamelCase_ ) , 26 ):
SCREAMING_SNAKE_CASE_ : List[str] = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
SCREAMING_SNAKE_CASE_ : List[str] = alphabet[i - offset]
SCREAMING_SNAKE_CASE_ : int = char
return cipher_alphabet
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : dict[str, str] ) -> str:
"""simple docstring"""
return "".join(cipher_map.get(lowerCamelCase_ , lowerCamelCase_ ) for ch in message.upper() )
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : dict[str, str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(lowerCamelCase_ , lowerCamelCase_ ) for ch in message.upper() )
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input('Enter message to encode or decode: ' ).strip()
SCREAMING_SNAKE_CASE_ : List[str] = input('Enter keyword: ' ).strip()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
SCREAMING_SNAKE_CASE_ : List[str] = {'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
SCREAMING_SNAKE_CASE_ : Dict = create_cipher_map(lowerCamelCase_ )
print(func(lowerCamelCase_ , lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 685 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> bool:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('check_bouncy() accepts only integer arguments' )
SCREAMING_SNAKE_CASE_ : Optional[int] = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = ''.join(sorted(lowerCamelCase_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __UpperCAmelCase ( lowerCamelCase_ : float = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 1_00:
raise ValueError('solution() only accepts values from 0 to 100' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Dict = 1
while True:
if check_bouncy(lowerCamelCase_ ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 685 | 1 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE_ : Tuple = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Dict = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = ['''ChineseCLIPFeatureExtractor''']
UpperCamelCase__ : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 685 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''adapter_layer''': '''encoder.layers.*.adapter_layer''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
'''pooling_layer.linear''': '''projector''',
'''pooling_layer.projection''': '''classifier''',
}
UpperCamelCase__ : List[str] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''projector''',
'''classifier''',
]
def __UpperCAmelCase ( lowerCamelCase_ : str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = {}
with open(lowerCamelCase_ , 'r' ) as file:
for line_number, line in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = line.strip()
if line:
SCREAMING_SNAKE_CASE_ : List[Any] = line.split()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = line_number
SCREAMING_SNAKE_CASE_ : Optional[int] = words[0]
SCREAMING_SNAKE_CASE_ : List[str] = value
return result
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple ) -> int:
"""simple docstring"""
for attribute in key.split('.' ):
SCREAMING_SNAKE_CASE_ : Dict = getattr(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : str = PARAM_MAPPING[full_name.split('.' )[-1]]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'param'
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE_ : Tuple = getattr(lowerCamelCase_ , lowerCamelCase_ ).shape
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE_ : Any = hf_pointer
for attribute in hf_param_name.split('.' ):
SCREAMING_SNAKE_CASE_ : List[str] = getattr(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = shape_pointer.shape
# let's reduce dimension
SCREAMING_SNAKE_CASE_ : Tuple = value[0]
else:
SCREAMING_SNAKE_CASE_ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
SCREAMING_SNAKE_CASE_ : int = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE_ : Dict = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE_ : str = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE_ : Tuple = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = value
else:
SCREAMING_SNAKE_CASE_ : List[str] = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __UpperCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = PARAM_MAPPING[full_name.split('.' )[-1]]
SCREAMING_SNAKE_CASE_ : Tuple = 'param'
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE_ : int = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE_ : str = '.'.join([key, hf_param_name] )
else:
SCREAMING_SNAKE_CASE_ : List[str] = key
SCREAMING_SNAKE_CASE_ : Optional[Any] = value if 'lm_head' in full_key else value[0]
UpperCamelCase__ : Optional[Any] = {
'''W_a''': '''linear_1.weight''',
'''W_b''': '''linear_2.weight''',
'''b_a''': '''linear_1.bias''',
'''b_b''': '''linear_2.bias''',
'''ln_W''': '''norm.weight''',
'''ln_b''': '''norm.bias''',
}
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[Any]=None ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE_ : List[Any] = name.split(lowerCamelCase_ )[0].split('.' )[-2]
SCREAMING_SNAKE_CASE_ : str = mapped_key.replace('*' , lowerCamelCase_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE_ : str = 'weight_g'
elif "weight_v" in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'weight_v'
elif "bias" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'weight'
else:
SCREAMING_SNAKE_CASE_ : Dict = None
if hf_dict is not None:
rename_dict(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return is_used
return is_used
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = []
SCREAMING_SNAKE_CASE_ : List[Any] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE_ : Any = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE_ : List[str] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
else:
SCREAMING_SNAKE_CASE_ : Tuple = load_wavaveca_layer(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if not is_used:
unused_weights.append(lowerCamelCase_ )
logger.warning(F'Unused weights: {unused_weights}' )
def __UpperCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = full_name.split('conv_layers.' )[-1]
SCREAMING_SNAKE_CASE_ : List[Any] = name.split('.' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = int(items[0] )
SCREAMING_SNAKE_CASE_ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
SCREAMING_SNAKE_CASE_ : List[Any] = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
SCREAMING_SNAKE_CASE_ : int = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
SCREAMING_SNAKE_CASE_ : int = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
SCREAMING_SNAKE_CASE_ : List[Any] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowerCamelCase_ )
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : str=None , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : str=False ) -> Tuple:
"""simple docstring"""
if config_path is not None:
SCREAMING_SNAKE_CASE_ : str = WavaVecaConfig.from_pretrained(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = WavaVecaConfig()
if is_seq_class:
SCREAMING_SNAKE_CASE_ : Tuple = read_txt_into_dict(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = idalabel
SCREAMING_SNAKE_CASE_ : Optional[int] = WavaVecaForSequenceClassification(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
feature_extractor.save_pretrained(lowerCamelCase_ )
elif is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE_ : Optional[Any] = Dictionary.load(lowerCamelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE_ : Optional[Any] = target_dict.pad_index
SCREAMING_SNAKE_CASE_ : List[str] = target_dict.bos_index
SCREAMING_SNAKE_CASE_ : Optional[int] = target_dict.eos_index
SCREAMING_SNAKE_CASE_ : List[str] = len(target_dict.symbols )
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(lowerCamelCase_ , 'vocab.json' )
if not os.path.isdir(lowerCamelCase_ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase_ ) )
return
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : int = 1
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = WavaVecaCTCTokenizer(
lowerCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = True if config.feat_extract_norm == 'layer' else False
SCREAMING_SNAKE_CASE_ : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = WavaVecaProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = WavaVecaForCTC(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE_ : Dict = WavaVecaForPreTraining(lowerCamelCase_ )
if is_finetuned or is_seq_class:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = argparse.Namespace(task='audio_pretraining' )
SCREAMING_SNAKE_CASE_ : Any = fairseq.tasks.setup_task(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model[0].eval()
recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
UpperCamelCase__ : str = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
parser.add_argument(
'''--is_seq_class''',
action='''store_true''',
help='''Whether the model to convert is a fine-tuned sequence classification model or not''',
)
UpperCamelCase__ : Dict = parser.parse_args()
UpperCamelCase__ : Union[str, Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 685 |
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> Tuple:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
else:
return a * actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(lowerCamelCase_ , lowerCamelCase_ )
return actual_power(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 685 | 1 |
from collections.abc import Callable
class lowerCAmelCase_ :
def __init__( self ,snake_case__ = None ):
# Stores actual heap items.
SCREAMING_SNAKE_CASE_ : list = []
# Stores indexes of each item for supporting updates and deletion.
SCREAMING_SNAKE_CASE_ : dict = {}
# Stores current size of heap.
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
SCREAMING_SNAKE_CASE_ : int = key or (lambda snake_case__ : x)
def snake_case ( self ,snake_case__ ):
return int((i - 1) / 2 ) if i > 0 else None
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = int(2 * i + 2 )
return right if 0 < right < self.size else None
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.arr[j], self.arr[i]
def snake_case ( self ,snake_case__ ,snake_case__ ):
return self.arr[i][1] < self.arr[j][1]
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = self._left(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._right(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = i
if left is not None and not self._cmp(snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = left
if right is not None and not self._cmp(snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = right
return valid_parent
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = self._parent(snake_case__ )
while parent is not None and not self._cmp(snake_case__ ,snake_case__ ):
self._swap(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = parent, self._parent(snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self._get_valid_parent(snake_case__ )
while valid_parent != index:
self._swap(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = valid_parent, self._get_valid_parent(snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ ):
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.pos_map[item]
SCREAMING_SNAKE_CASE_ : Dict = [item, self.key(snake_case__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(snake_case__ )
self._heapify_down(snake_case__ )
def snake_case ( self ,snake_case__ ):
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE_ : Optional[int] = self.pos_map[item]
del self.pos_map[item]
SCREAMING_SNAKE_CASE_ : Tuple = self.arr[self.size - 1]
SCREAMING_SNAKE_CASE_ : Optional[int] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(snake_case__ )
self._heapify_down(snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(snake_case__ )] )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = [item, self.key(snake_case__ )]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def snake_case ( self ):
return self.arr[0] if self.size else None
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=7 ,snake_case__=True ,snake_case__=True ,snake_case__=False ,snake_case__=True ,snake_case__=99 ,snake_case__=32 ,snake_case__=5 ,snake_case__=4 ,snake_case__=37 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=16 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=3 ,snake_case__=4 ,snake_case__=None ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = seq_length
SCREAMING_SNAKE_CASE_ : Tuple = is_training
SCREAMING_SNAKE_CASE_ : List[str] = use_input_mask
SCREAMING_SNAKE_CASE_ : List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : int = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[str] = num_choices
SCREAMING_SNAKE_CASE_ : Tuple = scope
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=snake_case__ ,initializer_range=self.initializer_range ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(snake_case__ ,attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE_ : List[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,use_cache=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : str = ids_tensor((self.batch_size, 3) ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_ids, next_tokens] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_mask, next_mask] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Dict = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,past_key_values=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__a : int = (LlamaForCausalLM,) if is_torch_available() else ()
__a : Any = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Tuple = False
__a : Tuple = False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self ,config_class=snake_case__ ,hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : Optional[int] = type
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Dict = 3
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : str = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Any = 3
SCREAMING_SNAKE_CASE_ : int = 'single_label_classification'
SCREAMING_SNAKE_CASE_ : str = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Dict = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Tuple = 3
SCREAMING_SNAKE_CASE_ : str = 'multi_label_classification'
SCREAMING_SNAKE_CASE_ : int = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Tuple = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def snake_case ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([1, 10] ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE_ : int = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : List[Any] = original_model(snake_case__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : List[Any] = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE_ : int = LlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE_ : str = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : Optional[int] = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : List[str] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Dict = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : str = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : int = model(torch.tensor(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
SCREAMING_SNAKE_CASE_ : List[str] = 'Simply put, the theory of relativity states that '
SCREAMING_SNAKE_CASE_ : str = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.encode(snake_case__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Tuple = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' ,device_map='sequential' ,use_safetensors=snake_case__ )
# greedy generation outputs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.generate(snake_case__ ,max_new_tokens=64 ,top_p=snake_case__ ,temperature=1 ,do_sample=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.decode(generated_ids[0] ,skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ ,snake_case__ )
| 685 | 1 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : Optional[Any] = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
UpperCamelCase__ : Dict = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : int ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(re.match(R'.*layer_(\d*).*' , lowerCamelCase_ )[1] )
layer_number -= 3
return F'h.{layer_number}.' + key
def __UpperCAmelCase ( lowerCamelCase_ : Tuple ) -> Optional[int]:
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
SCREAMING_SNAKE_CASE_ : Optional[int] = re.search(R'[^\d](\d+)$' , str(lowerCamelCase_ ) )
if bit_search is None:
raise ValueError(F'`dtype` is not a valid dtype: {dtype}.' )
SCREAMING_SNAKE_CASE_ : Tuple = int(bit_search.groups()[0] )
return bit_size // 8
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : int ) -> int:
"""simple docstring"""
if bloom_config_file == "":
SCREAMING_SNAKE_CASE_ : List[Any] = BloomConfig()
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = BloomConfig.from_json_file(lowerCamelCase_ )
if shard_model:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.listdir(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = sorted(filter(lambda lowerCamelCase_ : s.startswith('layer' ) and "model_00" in s , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : List[Any] = {'weight_map': {}, 'metadata': {}}
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
SCREAMING_SNAKE_CASE_ : List[Any] = BloomConfig()
for j, file in enumerate(lowerCamelCase_ ):
print('Processing file: {}'.format(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
for i in range(lowerCamelCase_ ):
# load all TP files
SCREAMING_SNAKE_CASE_ : Dict = file.replace('model_00' , F'model_0{i}' )
SCREAMING_SNAKE_CASE_ : str = torch.load(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , map_location='cpu' )
# Rename keys in the transformers names
SCREAMING_SNAKE_CASE_ : int = list(temp.keys() )
for key in keys:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = temp.pop(lowerCamelCase_ )
if tensors is None:
SCREAMING_SNAKE_CASE_ : Tuple = temp
else:
for key in tensors.keys():
if any(key.endswith(lowerCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
SCREAMING_SNAKE_CASE_ : List[str] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([tensors[key], temp[key]] , dim=lowerCamelCase_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(lowerCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
SCREAMING_SNAKE_CASE_ : Dict = tensors[key] / pretraining_tp
torch.save(
lowerCamelCase_ , os.path.join(
lowerCamelCase_ , 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) , str(len(lowerCamelCase_ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
SCREAMING_SNAKE_CASE_ : Tuple = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
SCREAMING_SNAKE_CASE_ : str = 'pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) , str(len(lowerCamelCase_ ) ).zfill(5 ) )
SCREAMING_SNAKE_CASE_ : List[str] = BloomConfig()
SCREAMING_SNAKE_CASE_ : Optional[Any] = pytorch_dump_folder_path + '/' + CONFIG_NAME
SCREAMING_SNAKE_CASE_ : Optional[Any] = total_size
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(lowerCamelCase_ , WEIGHTS_NAME + '.index.json' ) , 'w' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : List[Any] = json.dumps(lowerCamelCase_ , indent=2 , sort_keys=lowerCamelCase_ ) + '\n'
f.write(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = BloomModel(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.listdir(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = sorted(filter(lambda lowerCamelCase_ : s.startswith('layer' ) and "model_00" in s , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
for i, file in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : List[str] = None
for i in range(lowerCamelCase_ ):
# load all TP files
SCREAMING_SNAKE_CASE_ : Optional[int] = file.replace('model_00' , F'model_0{i}' )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.load(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , map_location='cpu' )
# Rename keys in the transformers names
SCREAMING_SNAKE_CASE_ : int = list(temp.keys() )
for key in keys:
SCREAMING_SNAKE_CASE_ : Optional[Any] = temp.pop(lowerCamelCase_ )
if tensors is None:
SCREAMING_SNAKE_CASE_ : Dict = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(lowerCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
SCREAMING_SNAKE_CASE_ : int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
SCREAMING_SNAKE_CASE_ : Tuple = torch.cat([tensors[key], temp[key]] , dim=lowerCamelCase_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(lowerCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tensors[key] / pretraining_tp
SCREAMING_SNAKE_CASE_ : Dict = model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
assert not other_keys.unexpected_keys, F'The keys {other_keys.unexpected_keys} are unexpected'
if missing_keys is None:
SCREAMING_SNAKE_CASE_ : Tuple = set(other_keys.missing_keys )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'The keys {missing_keys} are missing'
# Save pytorch-model
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
SCREAMING_SNAKE_CASE_ : Optional[Any] = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}' )
if config.torch_dtype is not None:
SCREAMING_SNAKE_CASE_ : Tuple = model.to(config.torch_dtype )
torch.save(model.state_dict() , lowerCamelCase_ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
UpperCamelCase__ : Any = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 685 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
UpperCamelCase__ : int = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
UpperCamelCase__ : str = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : List[Any] = collections.OrderedDict()
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Any = f.readlines()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = b
SCREAMING_SNAKE_CASE_ : Dict = idx
for wd in b:
SCREAMING_SNAKE_CASE_ : Any = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : List[str] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__="<|endoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__="<|startoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__=False ,**snake_case__ ,):
super().__init__(
unk_token=snake_case__ ,pad_token=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,do_clean_text=snake_case__ ,**snake_case__ ,)
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
SCREAMING_SNAKE_CASE_ : str = do_clean_text
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = load_vocab_and_emoji(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def snake_case ( self ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def snake_case ( self ):
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.tokenize(snake_case__ ,clean=self.do_clean_text )
def snake_case ( self ,snake_case__ ):
return self.vocab.get(snake_case__ ,self.vocab.get(self.unk_token ) )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.convert_id_to_token(snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = ''.join(snake_case__ ).strip()
return out_string
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case__ ,add_special_tokens=snake_case__ ) + [self.eos_token_id] )
if len(snake_case__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE_ : List[Any] = input_ids[-self.model_max_length :]
return input_ids
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
if os.path.isdir(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
SCREAMING_SNAKE_CASE_ : Tuple = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
SCREAMING_SNAKE_CASE_ : str = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
SCREAMING_SNAKE_CASE_ : Dict = token_index
writer.write(','.join(snake_case__ ) + '\n' )
index += 1
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
json.dump(self.emoji ,snake_case__ )
return vocab_file, emoji_file
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = vocab # same as swe
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_to_tokens # same as bpe
SCREAMING_SNAKE_CASE_ : Dict = emoji
SCREAMING_SNAKE_CASE_ : int = np.max([len(snake_case__ ) for w in self.vocab.keys()] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
SCREAMING_SNAKE_CASE_ : str = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
SCREAMING_SNAKE_CASE_ : int = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
SCREAMING_SNAKE_CASE_ : Tuple = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ):
return len(self.ids_to_tokens )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<URL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = self.content_repattera.sub('<EMAIL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<TEL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<PRICE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = content.replace('<BLOCK><BLOCK>' ,'<BLOCK>' )
return content
def snake_case ( self ,snake_case__ ,snake_case__=False ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('\r\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\r' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : List[str] = text.replace('\t' ,'<TAB>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('—' ,'ー' )
SCREAMING_SNAKE_CASE_ : Optional[int] = text.replace('−' ,'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
SCREAMING_SNAKE_CASE_ : int = text.replace(snake_case__ ,snake_case__ )
if clean:
SCREAMING_SNAKE_CASE_ : str = self.clean_text(snake_case__ )
def check_simbol(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 2:
SCREAMING_SNAKE_CASE_ : str = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2A1 and c <= 0XC2BF)
or (c >= 0XC780 and c <= 0XC783)
or (c >= 0XCAB9 and c <= 0XCBBF)
or (c >= 0XCC80 and c <= 0XCDA2)
):
return True
return False
def checkuae(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 3:
SCREAMING_SNAKE_CASE_ : Dict = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_8080 and c <= 0XE2_B07F:
return True
return False
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List[Any] = []
while pos < len(snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = min(len(snake_case__ ) ,pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
SCREAMING_SNAKE_CASE_ : List[Any] = [] # (token_id, token, pos)
for e in range(snake_case__ ,snake_case__ ,-1 ):
SCREAMING_SNAKE_CASE_ : str = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(snake_case__ ) > 2:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(snake_case__ ) > 0:
# the smallest token_id is adopted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = sorted(snake_case__ ,key=lambda snake_case__ : x[0] )[0]
result.append(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = e
else:
SCREAMING_SNAKE_CASE_ : Any = pos + 1
SCREAMING_SNAKE_CASE_ : Optional[int] = text[pos:end]
if check_simbol(snake_case__ ):
result.append('<KIGOU>' )
elif checkuae(snake_case__ ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
SCREAMING_SNAKE_CASE_ : int = end
return result
def snake_case ( self ,snake_case__ ,snake_case__="\n" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Dict = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : Dict = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(snake_case__ )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(snake_case__ )
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : int = ''.join(snake_case__ )
return text
| 685 | 1 |
UpperCamelCase__ : Optional[int] = range(2, 20 + 1)
UpperCamelCase__ : Dict = [10**k for k in range(ks[-1] + 1)]
UpperCamelCase__ : dict[int, dict[int, list[list[int]]]] = {}
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = sum(a_i[j] for j in range(lowerCamelCase_ , len(lowerCamelCase_ ) ) )
SCREAMING_SNAKE_CASE_ : Dict = sum(a_i[j] * base[j] for j in range(min(len(lowerCamelCase_ ) , lowerCamelCase_ ) ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = 0, 0
SCREAMING_SNAKE_CASE_ : Tuple = n - i
SCREAMING_SNAKE_CASE_ : str = memo.get(lowerCamelCase_ )
if sub_memo is not None:
SCREAMING_SNAKE_CASE_ : Dict = sub_memo.get(lowerCamelCase_ )
if jumps is not None and len(lowerCamelCase_ ) > 0:
# find and make the largest jump without going over
SCREAMING_SNAKE_CASE_ : List[Any] = -1
for _k in range(len(lowerCamelCase_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
SCREAMING_SNAKE_CASE_ : List[Any] = _k
break
if max_jump >= 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = jumps[max_jump]
# since the difference between jumps is cached, add c
SCREAMING_SNAKE_CASE_ : str = diff + c
for j in range(min(lowerCamelCase_ , len(lowerCamelCase_ ) ) ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = divmod(lowerCamelCase_ , 10 )
if new_c > 0:
add(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = {c: []}
SCREAMING_SNAKE_CASE_ : Any = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = next_term(lowerCamelCase_ , k - 1 , i + dn , lowerCamelCase_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = compute(lowerCamelCase_ , lowerCamelCase_ , i + dn , lowerCamelCase_ )
diff += _diff
dn += terms_jumped
SCREAMING_SNAKE_CASE_ : Dict = sub_memo[c]
# keep jumps sorted by # of terms skipped
SCREAMING_SNAKE_CASE_ : Any = 0
while j < len(lowerCamelCase_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCamelCase_ , (diff, dn, k) )
return (diff, dn)
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any ) -> Optional[Any]:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(lowerCamelCase_ ):
a_i.extend([0 for _ in range(k - len(lowerCamelCase_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = i
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = 0, 0, 0
for j in range(len(lowerCamelCase_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = ds_c + ds_b
diff += addend
SCREAMING_SNAKE_CASE_ : Tuple = 0
for j in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = a_i[j] + addend
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = divmod(lowerCamelCase_ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return diff, i - start_i
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[str] , lowerCamelCase_ : str ) -> List[Any]:
"""simple docstring"""
for j in range(lowerCamelCase_ , len(lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE_ : List[Any] = digits[j] + addend
if s >= 10:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = divmod(lowerCamelCase_ , 10 )
SCREAMING_SNAKE_CASE_ : Dict = addend // 10 + quotient
else:
SCREAMING_SNAKE_CASE_ : Tuple = s
SCREAMING_SNAKE_CASE_ : int = addend // 10
if addend == 0:
break
while addend > 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = divmod(lowerCamelCase_ , 10 )
digits.append(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : int = 10**15 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = [1]
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : List[Any] = 0
while True:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = next_term(lowerCamelCase_ , 20 , i + dn , lowerCamelCase_ )
dn += terms_jumped
if dn == n - i:
break
SCREAMING_SNAKE_CASE_ : List[str] = 0
for j in range(len(lowerCamelCase_ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 685 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : int=() , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Optional[int]="no" , lowerCamelCase_ : Optional[Any]="29500" ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
SCREAMING_SNAKE_CASE_ : str = True
elif "IPython" in sys.modules:
SCREAMING_SNAKE_CASE_ : Dict = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
SCREAMING_SNAKE_CASE_ : Optional[int] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , lowerCamelCase_ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 8
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='TPU' )
print(F'Launching a training on {num_processes} TPU cores.' )
xmp.spawn(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*lowerCamelCase_ )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port=lowerCamelCase_ , mixed_precision=lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='MULTI_GPU' )
print(F'Launching training on {num_processes} GPUs.' )
try:
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
SCREAMING_SNAKE_CASE_ : Optional[Any] = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple=() , lowerCamelCase_ : str=2 ) -> Union[str, Any]:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
SCREAMING_SNAKE_CASE_ : Tuple = PrepareForLaunch(lowerCamelCase_ , debug=lowerCamelCase_ )
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
| 685 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Tuple = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Tuple = "rwkv"
__a : int = {"max_position_embeddings": "context_length"}
def __init__( self ,snake_case__=50277 ,snake_case__=1024 ,snake_case__=4096 ,snake_case__=32 ,snake_case__=None ,snake_case__=None ,snake_case__=1E-5 ,snake_case__=0 ,snake_case__=0 ,snake_case__=6 ,snake_case__=False ,snake_case__=True ,**snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Dict = vocab_size
SCREAMING_SNAKE_CASE_ : Any = context_length
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size
SCREAMING_SNAKE_CASE_ : Any = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ : Tuple = rescale_every
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE_ : Any = bos_token_id
SCREAMING_SNAKE_CASE_ : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=snake_case__ ,bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,**snake_case__ )
| 685 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCamelCase__ : Tuple = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 685 | 1 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 685 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Union[str, Any] = CLIPTokenizer
__a : List[str] = CLIPTokenizerFast
__a : List[str] = True
__a : Tuple = {}
__a : Tuple = False
def snake_case ( self ):
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dict(zip(snake_case__ ,range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
SCREAMING_SNAKE_CASE_ : Any = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Tuple = 'lower newer'
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
SCREAMING_SNAKE_CASE_ : List[Any] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,snake_case__ )
@require_ftfy
def snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
SCREAMING_SNAKE_CASE_ : Dict = 'xa\u0303y' + ' ' + 'x\xe3y'
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of space type
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of line break type
SCREAMING_SNAKE_CASE_ : Tuple = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE_ : Tuple = F'{text_of_1_token} {text_of_1_token}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : str = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(snake_case__ ) + 1, len(snake_case__ ) + 1 + len(snake_case__ )) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F' {text}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : int = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(snake_case__ ) + 1, 1 + len(snake_case__ ) + 1 + len(snake_case__ )) ,)
def snake_case ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(snake_case__ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def snake_case ( self ):
super().test_tokenization_python_rust_equals()
def snake_case ( self ):
# CLIP always lower cases letters
pass
| 685 | 1 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Dict = CTRLTokenizer
__a : List[str] = False
__a : List[str] = False
def snake_case ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
SCREAMING_SNAKE_CASE_ : Dict = dict(zip(snake_case__ ,range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
SCREAMING_SNAKE_CASE_ : Tuple = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = 'adapt react readapt apt'
SCREAMING_SNAKE_CASE_ : Tuple = 'adapt react readapt apt'
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
SCREAMING_SNAKE_CASE_ : Optional[int] = 'adapt react readapt apt'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
SCREAMING_SNAKE_CASE_ : str = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,snake_case__ )
| 685 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
SCREAMING_SNAKE_CASE_ : int = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.parse_args()
if not hasattr(lowerCamelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE_ : Optional[Any] = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 685 | 1 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=7 ,snake_case__=True ,snake_case__=True ,snake_case__=True ,snake_case__=True ,snake_case__=99 ,snake_case__=32 ,snake_case__=5 ,snake_case__=4 ,snake_case__=37 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=128 ,snake_case__=32 ,snake_case__=16 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=3 ,snake_case__=4 ,snake_case__=None ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : Any = batch_size
SCREAMING_SNAKE_CASE_ : Dict = seq_length
SCREAMING_SNAKE_CASE_ : Tuple = is_training
SCREAMING_SNAKE_CASE_ : Dict = use_input_mask
SCREAMING_SNAKE_CASE_ : str = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE_ : str = vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : int = intermediate_size
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : int = type_vocab_size
SCREAMING_SNAKE_CASE_ : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Dict = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[int] = num_labels
SCREAMING_SNAKE_CASE_ : Tuple = num_choices
SCREAMING_SNAKE_CASE_ : List[Any] = scope
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Optional[int] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE_ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
return NezhaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=snake_case__ ,initializer_range=self.initializer_range ,)
def snake_case ( self ):
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = NezhaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : int = model(snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = model(snake_case__ ,token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : List[Any] = NezhaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : str = model(
snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = model(
snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ ,encoder_hidden_states=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = NezhaForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = NezhaForNextSentencePrediction(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(
snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ ,labels=snake_case__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = NezhaForPreTraining(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ ,labels=snake_case__ ,next_sentence_label=snake_case__ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = NezhaForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(
snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ ,start_positions=snake_case__ ,end_positions=snake_case__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = NezhaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : List[str] = NezhaForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = self.num_choices
SCREAMING_SNAKE_CASE_ : List[str] = NezhaForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Any = model(
snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ ,labels=snake_case__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE_ : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : Dict = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
__a : str = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Optional[Any] = True
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__=False ):
SCREAMING_SNAKE_CASE_ : List[Any] = super()._prepare_for_class(snake_case__ ,snake_case__ ,return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=snake_case__ )
return inputs_dict
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = NezhaModelTester(self )
SCREAMING_SNAKE_CASE_ : List[Any] = ConfigTester(self ,config_class=snake_case__ ,hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*snake_case__ )
def snake_case ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : int = self.model_tester.prepare_config_and_inputs_for_decoder()
SCREAMING_SNAKE_CASE_ : Any = None
self.model_tester.create_and_check_model_as_decoder(
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,)
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def snake_case ( self ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Tuple = NezhaModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@slow
@require_torch_gpu
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(config=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = self._prepare_for_class(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = torch.jit.trace(
snake_case__ ,(inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(snake_case__ ,os.path.join(snake_case__ ,'bert.pt' ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.jit.load(os.path.join(snake_case__ ,'bert.pt' ) ,map_location=snake_case__ )
loaded(inputs_dict['input_ids'].to(snake_case__ ) ,inputs_dict['attention_mask'].to(snake_case__ ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = NezhaModel.from_pretrained('sijunhe/nezha-cn-base' )
SCREAMING_SNAKE_CASE_ : str = torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] = model(snake_case__ ,attention_mask=snake_case__ )[0]
SCREAMING_SNAKE_CASE_ : str = torch.Size((1, 6, 768) )
self.assertEqual(output.shape ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,snake_case__ ,atol=1E-4 ) )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base' )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ : str = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ ,attention_mask=snake_case__ )[0]
SCREAMING_SNAKE_CASE_ : List[str] = torch.Size((1, 6, 21128) )
self.assertEqual(output.shape ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,snake_case__ ,atol=1E-4 ) )
| 685 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
UpperCamelCase__ : Dict = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
UpperCamelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'https://pypi.org/pypi/diffusers/json'
SCREAMING_SNAKE_CASE_ : Optional[int] = json.loads(request.urlopen(lowerCamelCase_ ).read() )['releases'].keys()
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : version.Version(lowerCamelCase_ ) )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCamelCase_ )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Path(lowerCamelCase_ ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] ) -> Any:
"""simple docstring"""
init_hf_modules()
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : List[Any] = f.read()
# Imports of the form `import .xxx`
SCREAMING_SNAKE_CASE_ : Tuple = re.findall('^\s*import\s+\.(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCamelCase_ ) )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [module_file]
SCREAMING_SNAKE_CASE_ : Tuple = []
# Let's recurse through all relative imports
while not no_change:
SCREAMING_SNAKE_CASE_ : int = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ).parent
SCREAMING_SNAKE_CASE_ : int = [str(module_path / m ) for m in new_imports]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [f for f in new_import_files if f not in all_relative_imports]
SCREAMING_SNAKE_CASE_ : Any = [F'{f}.py' for f in new_import_files]
SCREAMING_SNAKE_CASE_ : Optional[int] = len(lowerCamelCase_ ) == 0
all_relative_imports.extend(lowerCamelCase_ )
return all_relative_imports
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] = f.read()
# Imports of the form `import xxx`
SCREAMING_SNAKE_CASE_ : List[str] = re.findall('^\s*import\s+(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Only keep the top-level module
SCREAMING_SNAKE_CASE_ : List[str] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(set(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : List[str] = []
for imp in imports:
try:
importlib.import_module(lowerCamelCase_ )
except ImportError:
missing_packages.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
F'{", ".join(lowerCamelCase_ )}. Run `pip install {" ".join(lowerCamelCase_ )}`' )
return get_relative_imports(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = module_path.replace(os.path.sep , '.' )
SCREAMING_SNAKE_CASE_ : Any = importlib.import_module(lowerCamelCase_ )
if class_name is None:
return find_pipeline_class(lowerCamelCase_ )
return getattr(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
SCREAMING_SNAKE_CASE_ : List[Any] = dict(inspect.getmembers(lowerCamelCase_ , inspect.isclass ) )
SCREAMING_SNAKE_CASE_ : List[str] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCamelCase_ )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.' )
SCREAMING_SNAKE_CASE_ : Any = cls
return pipeline_class
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if os.path.isfile(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = module_file_or_url
SCREAMING_SNAKE_CASE_ : Dict = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
SCREAMING_SNAKE_CASE_ : List[str] = get_diffusers_versions()
# cut ".dev0"
SCREAMING_SNAKE_CASE_ : Dict = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
SCREAMING_SNAKE_CASE_ : List[Any] = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(F'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
SCREAMING_SNAKE_CASE_ : int = F'v{revision}'
elif revision == "main":
SCREAMING_SNAKE_CASE_ : List[Any] = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
SCREAMING_SNAKE_CASE_ : Tuple = COMMUNITY_PIPELINES_URL.format(revision=lowerCamelCase_ , pipeline=lowerCamelCase_ )
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = cached_download(
lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Tuple = 'git'
SCREAMING_SNAKE_CASE_ : Dict = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE_ : List[str] = hf_hub_download(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
SCREAMING_SNAKE_CASE_ : Dict = check_imports(lowerCamelCase_ )
# Now we move the module inside our cached dynamic modules.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path(lowerCamelCase_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
for module_needed in modules_needed:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F'{module_needed}.py'
shutil.copy(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple = use_auth_token
elif use_auth_token is True:
SCREAMING_SNAKE_CASE_ : int = HfFolder.get_token()
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[Any] = model_info(lowerCamelCase_ , revision=lowerCamelCase_ , token=lowerCamelCase_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
SCREAMING_SNAKE_CASE_ : Any = submodule_path / commit_hash
SCREAMING_SNAKE_CASE_ : List[Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCamelCase_ )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCamelCase_ , F'{module_needed}.py' , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return os.path.join(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , **lowerCamelCase_ : Dict , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = get_cached_module_file(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return get_class_in_module(lowerCamelCase_ , final_module.replace('.py' , '' ) )
| 685 | 1 |
UpperCamelCase__ : Any = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [False] * len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = [s]
SCREAMING_SNAKE_CASE_ : Dict = True
while queue:
SCREAMING_SNAKE_CASE_ : Optional[int] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : List[Any] = u
return visited[t]
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = [-1] * (len(lowerCamelCase_ ))
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : List[str] = []
SCREAMING_SNAKE_CASE_ : str = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = float('Inf' )
SCREAMING_SNAKE_CASE_ : List[str] = sink
while s != source:
# Find the minimum value in select path
SCREAMING_SNAKE_CASE_ : int = min(lowerCamelCase_ , graph[parent[s]][s] )
SCREAMING_SNAKE_CASE_ : Tuple = parent[s]
max_flow += path_flow
SCREAMING_SNAKE_CASE_ : List[Any] = sink
while v != source:
SCREAMING_SNAKE_CASE_ : Tuple = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
SCREAMING_SNAKE_CASE_ : int = parent[v]
for i in range(len(lowerCamelCase_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 685 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[int] = "visual_bert"
def __init__( self ,snake_case__=30522 ,snake_case__=768 ,snake_case__=512 ,snake_case__=12 ,snake_case__=12 ,snake_case__=3072 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=False ,snake_case__=True ,snake_case__=1 ,snake_case__=0 ,snake_case__=2 ,**snake_case__ ,):
super().__init__(pad_token_id=snake_case__ ,bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = visual_embedding_dim
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : int = bypass_transformer
SCREAMING_SNAKE_CASE_ : Optional[Any] = special_visual_initialize
| 685 | 1 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=7 ,snake_case__=True ,snake_case__=True ,snake_case__=False ,snake_case__=True ,snake_case__=99 ,snake_case__=32 ,snake_case__=5 ,snake_case__=4 ,snake_case__=37 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=16 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=3 ,snake_case__=4 ,snake_case__=None ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = seq_length
SCREAMING_SNAKE_CASE_ : Tuple = is_training
SCREAMING_SNAKE_CASE_ : List[str] = use_input_mask
SCREAMING_SNAKE_CASE_ : List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : int = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[str] = num_choices
SCREAMING_SNAKE_CASE_ : Tuple = scope
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=snake_case__ ,initializer_range=self.initializer_range ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(snake_case__ ,attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE_ : List[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,use_cache=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : str = ids_tensor((self.batch_size, 3) ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_ids, next_tokens] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_mask, next_mask] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Dict = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,past_key_values=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__a : int = (LlamaForCausalLM,) if is_torch_available() else ()
__a : Any = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Tuple = False
__a : Tuple = False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self ,config_class=snake_case__ ,hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : Optional[int] = type
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Dict = 3
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : str = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Any = 3
SCREAMING_SNAKE_CASE_ : int = 'single_label_classification'
SCREAMING_SNAKE_CASE_ : str = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Dict = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Tuple = 3
SCREAMING_SNAKE_CASE_ : str = 'multi_label_classification'
SCREAMING_SNAKE_CASE_ : int = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Tuple = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def snake_case ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([1, 10] ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE_ : int = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : List[Any] = original_model(snake_case__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : List[Any] = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE_ : int = LlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE_ : str = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : Optional[int] = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : List[str] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Dict = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : str = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : int = model(torch.tensor(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
SCREAMING_SNAKE_CASE_ : List[str] = 'Simply put, the theory of relativity states that '
SCREAMING_SNAKE_CASE_ : str = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.encode(snake_case__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Tuple = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' ,device_map='sequential' ,use_safetensors=snake_case__ )
# greedy generation outputs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.generate(snake_case__ ,max_new_tokens=64 ,top_p=snake_case__ ,temperature=1 ,do_sample=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.decode(generated_ids[0] ,skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ ,snake_case__ )
| 685 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
def is_in_circle(lowerCamelCase_ : float , lowerCamelCase_ : float ) -> bool:
SCREAMING_SNAKE_CASE_ : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
SCREAMING_SNAKE_CASE_ : Optional[int] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase_ ) )
# The ratio of the area for circle to square is pi/4.
SCREAMING_SNAKE_CASE_ : Tuple = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Callable[[float], float] , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(lowerCamelCase_ , lowerCamelCase_ ) ) for _ in range(lowerCamelCase_ ) ) * (max_value - min_value)
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 ) -> None:
"""simple docstring"""
def identity_function(lowerCamelCase_ : float ) -> float:
return x
SCREAMING_SNAKE_CASE_ : str = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print('******************' )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> None:
"""simple docstring"""
def function_to_integrate(lowerCamelCase_ : float ) -> float:
return sqrt(4.0 - x * x )
SCREAMING_SNAKE_CASE_ : Dict = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 1 |
import copy
import random
from transformers import CLIPTokenizer
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,*snake_case__ ,**snake_case__ ):
super().__init__(*snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : int = {}
def snake_case ( self ,snake_case__ ,*snake_case__ ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = super().add_tokens(snake_case__ ,*snake_case__ ,**snake_case__ )
if num_added_tokens == 0:
raise ValueError(
F'The tokenizer already contains the token {placeholder_token}. Please pass a different'
' `placeholder_token` that is not already in the tokenizer.' )
def snake_case ( self ,snake_case__ ,*snake_case__ ,snake_case__=1 ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = []
if num_vec_per_token == 1:
self.try_adding_tokens(snake_case__ ,*snake_case__ ,**snake_case__ )
output.append(snake_case__ )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = []
for i in range(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = placeholder_token + F'_{i}'
self.try_adding_tokens(snake_case__ ,*snake_case__ ,**snake_case__ )
output.append(snake_case__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'The tokenizer already has placeholder token {token} that can get confused with'
F' {placeholder_token}keep placeholder tokens independent' )
SCREAMING_SNAKE_CASE_ : str = output
def snake_case ( self ,snake_case__ ,snake_case__=False ,snake_case__=1.0 ):
if isinstance(snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = []
for i in range(len(snake_case__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] ,vector_shuffle=snake_case__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
SCREAMING_SNAKE_CASE_ : List[str] = self.token_map[placeholder_token]
SCREAMING_SNAKE_CASE_ : Tuple = tokens[: 1 + int(len(snake_case__ ) * prop_tokens_to_load )]
if vector_shuffle:
SCREAMING_SNAKE_CASE_ : Tuple = copy.copy(snake_case__ )
random.shuffle(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = text.replace(snake_case__ ,' '.join(snake_case__ ) )
return text
def __call__( self ,snake_case__ ,*snake_case__ ,snake_case__=False ,snake_case__=1.0 ,**snake_case__ ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
snake_case__ ,vector_shuffle=snake_case__ ,prop_tokens_to_load=snake_case__ ) ,*snake_case__ ,**snake_case__ ,)
def snake_case ( self ,snake_case__ ,*snake_case__ ,snake_case__=False ,snake_case__=1.0 ,**snake_case__ ):
return super().encode(
self.replace_placeholder_tokens_in_text(
snake_case__ ,vector_shuffle=snake_case__ ,prop_tokens_to_load=snake_case__ ) ,*snake_case__ ,**snake_case__ ,)
| 685 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self ,snake_case__ ,snake_case__=7 ,snake_case__=3 ,snake_case__=18 ,snake_case__=30 ,snake_case__=400 ,snake_case__=True ,snake_case__=None ,snake_case__=True ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : List[str] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = num_channels
SCREAMING_SNAKE_CASE_ : Dict = image_size
SCREAMING_SNAKE_CASE_ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE_ : int = max_resolution
SCREAMING_SNAKE_CASE_ : Dict = do_resize
SCREAMING_SNAKE_CASE_ : Dict = size
SCREAMING_SNAKE_CASE_ : str = apply_ocr
def snake_case ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Dict = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = LayoutLMvaImageProcessingTester(self )
@property
def snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ ,'do_resize' ) )
self.assertTrue(hasattr(snake_case__ ,'size' ) )
self.assertTrue(hasattr(snake_case__ ,'apply_ocr' ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def snake_case ( self ):
pass
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
self.assertIsInstance(encoding.words ,snake_case__ )
self.assertIsInstance(encoding.boxes ,snake_case__ )
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Tuple = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# with apply_OCR = True
SCREAMING_SNAKE_CASE_ : Tuple = LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_dataset('hf-internal-testing/fixtures_docvqa' ,split='test' )
SCREAMING_SNAKE_CASE_ : str = Image.open(ds[0]['file'] ).convert('RGB' )
SCREAMING_SNAKE_CASE_ : Any = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE_ : Any = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
SCREAMING_SNAKE_CASE_ : Any = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,snake_case__ )
self.assertListEqual(encoding.boxes ,snake_case__ )
# with apply_OCR = False
SCREAMING_SNAKE_CASE_ : Optional[int] = LayoutLMvaImageProcessor(apply_ocr=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
| 685 | 1 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
if (
(cp >= 0x4e_00 and cp <= 0x9f_ff)
or (cp >= 0x34_00 and cp <= 0x4d_bf) #
or (cp >= 0x2_00_00 and cp <= 0x2_a6_df) #
or (cp >= 0x2_a7_00 and cp <= 0x2_b7_3f) #
or (cp >= 0x2_b7_40 and cp <= 0x2_b8_1f) #
or (cp >= 0x2_b8_20 and cp <= 0x2_ce_af) #
or (cp >= 0xf9_00 and cp <= 0xfa_ff)
or (cp >= 0x2_f8_00 and cp <= 0x2_fa_1f) #
): #
return True
return False
def __UpperCAmelCase ( lowerCamelCase_ : str ) -> List[Any]:
"""simple docstring"""
for char in word:
SCREAMING_SNAKE_CASE_ : int = ord(lowerCamelCase_ )
if not _is_chinese_char(lowerCamelCase_ ):
return 0
return 1
def __UpperCAmelCase ( lowerCamelCase_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = set()
for token in tokens:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(lowerCamelCase_ ) > 1 and is_chinese(lowerCamelCase_ )
if chinese_word:
word_set.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = list(lowerCamelCase_ )
return word_list
def __UpperCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : set() ) -> Optional[int]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
SCREAMING_SNAKE_CASE_ : str = max([len(lowerCamelCase_ ) for w in chinese_word_set] )
SCREAMING_SNAKE_CASE_ : Tuple = bert_tokens
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = 0, len(lowerCamelCase_ )
while start < end:
SCREAMING_SNAKE_CASE_ : List[str] = True
if is_chinese(bert_word[start] ):
SCREAMING_SNAKE_CASE_ : Any = min(end - start , lowerCamelCase_ )
for i in range(lowerCamelCase_ , 1 , -1 ):
SCREAMING_SNAKE_CASE_ : Optional[int] = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
SCREAMING_SNAKE_CASE_ : Tuple = '##' + bert_word[j]
SCREAMING_SNAKE_CASE_ : Tuple = start + i
SCREAMING_SNAKE_CASE_ : Optional[int] = False
break
if single_word:
start += 1
return bert_word
def __UpperCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : LTP , lowerCamelCase_ : BertTokenizer ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = []
for i in range(0 , len(lowerCamelCase_ ) , 1_00 ):
SCREAMING_SNAKE_CASE_ : Any = ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
SCREAMING_SNAKE_CASE_ : Dict = [get_chinese_word(lowerCamelCase_ ) for r in res]
ltp_res.extend(lowerCamelCase_ )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = []
for i in range(0 , len(lowerCamelCase_ ) , 1_00 ):
SCREAMING_SNAKE_CASE_ : str = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=5_12 )
bert_res.extend(res['input_ids'] )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = []
for input_ids, chinese_word in zip(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : int = []
for id in input_ids:
SCREAMING_SNAKE_CASE_ : str = bert_tokenizer._convert_id_to_token(lowerCamelCase_ )
input_tokens.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = add_sub_symbol(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCamelCase_ ):
if token[:2] == "##":
SCREAMING_SNAKE_CASE_ : Optional[Any] = token[2:]
# save chinese tokens' pos
if len(lowerCamelCase_ ) == 1 and _is_chinese_char(ord(lowerCamelCase_ ) ):
ref_id.append(lowerCamelCase_ )
ref_ids.append(lowerCamelCase_ )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
return ref_ids
def __UpperCAmelCase ( lowerCamelCase_ : Tuple ) -> int:
"""simple docstring"""
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Tuple = f.readlines()
SCREAMING_SNAKE_CASE_ : str = [line.strip() for line in data if len(lowerCamelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
SCREAMING_SNAKE_CASE_ : List[Any] = LTP(args.ltp ) # faster in GPU device
SCREAMING_SNAKE_CASE_ : str = BertTokenizer.from_pretrained(args.bert )
SCREAMING_SNAKE_CASE_ : Dict = prepare_ref(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Dict = [json.dumps(lowerCamelCase_ ) + '\n' for ref in ref_ids]
f.writelines(lowerCamelCase_ )
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
UpperCamelCase__ : Optional[int] = parser.parse_args()
main(args)
| 685 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ : str = logging.getLogger(__name__)
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : str
__a : str
__a : Optional[str] = None
__a : Optional[str] = None
__a : Optional[str] = None
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : List[int]
__a : Optional[List[int]] = None
__a : Optional[List[int]] = None
__a : Optional[Union[int, float]] = None
__a : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = None ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(
snake_case__ ,'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' ,tokenizer.__class__.__name__ ,str(snake_case__ ) ,snake_case__ ,) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE_ : Dict = cached_features_file + '.lock'
with FileLock(snake_case__ ):
if os.path.exists(snake_case__ ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.load(snake_case__ )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
SCREAMING_SNAKE_CASE_ : List[Any] = (
processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
)
logger.info('Training examples: %s' ,len(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[str] = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
logger.info('Saving features into cached file %s' ,snake_case__ )
torch.save(self.features ,snake_case__ )
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ :
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = 128 ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[int] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list
SCREAMING_SNAKE_CASE_ : int = processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(snake_case__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
SCREAMING_SNAKE_CASE_ : List[Any] = tf.data.Dataset.from_generator(
snake_case__ ,(
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) ,(
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case ( self ):
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
class lowerCAmelCase_ ( lowerCamelCase_ ):
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_train_set.txt' ) ) ,'train' )
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_evaluation_set.txt' ) ) ,'dev' )
def snake_case ( self ):
return ["contradiction", "entailment", "neutral"]
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i, line in enumerate(snake_case__ ):
if i == 0:
continue
SCREAMING_SNAKE_CASE_ : List[str] = '%s-%s' % (set_type, line[0])
SCREAMING_SNAKE_CASE_ : Dict = line[5]
SCREAMING_SNAKE_CASE_ : Dict = line[6]
SCREAMING_SNAKE_CASE_ : Tuple = line[7][2:] if line[7].startswith('ex' ) else line[7]
SCREAMING_SNAKE_CASE_ : Optional[int] = line[0]
examples.append(InputExample(guid=snake_case__ ,text_a=snake_case__ ,text_b=snake_case__ ,label=snake_case__ ,pairID=snake_case__ ) )
return examples
def __UpperCAmelCase ( lowerCamelCase_ : List[InputExample] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : PreTrainedTokenizer , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase_ )}
SCREAMING_SNAKE_CASE_ : Dict = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
SCREAMING_SNAKE_CASE_ : Any = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='max_length' , truncation=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : List[Any] = label_map[example.label] if example.label in label_map else 0
SCREAMING_SNAKE_CASE_ : List[str] = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase_ , label=lowerCamelCase_ , pairID=lowerCamelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F'guid: {example}' )
logger.info(F'features: {features[i]}' )
return features
UpperCamelCase__ : str = {
'''hans''': 3,
}
UpperCamelCase__ : Dict = {
'''hans''': HansProcessor,
}
| 685 | 1 |
def __UpperCAmelCase ( lowerCamelCase_ : int = 10_00 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = -1
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE_ : Tuple = (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE_ : List[str] = n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE_ : Optional[int] = a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE_ : List[str] = candidate
return product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 685 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 1_00 * 2**20, 9_00 * 2**20] )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] ) -> int:
"""simple docstring"""
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE_ : str = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = is_small_dataset(lowerCamelCase_ )
assert result == expected
| 685 | 1 |
import colorsys
from PIL import Image # type: ignore
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : int ) -> float:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = x
SCREAMING_SNAKE_CASE_ : Union[str, Any] = y
for step in range(lowerCamelCase_ ): # noqa: B007
SCREAMING_SNAKE_CASE_ : int = a * a - b * b + x
SCREAMING_SNAKE_CASE_ : List[Any] = 2 * a * b + y
SCREAMING_SNAKE_CASE_ : Dict = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __UpperCAmelCase ( lowerCamelCase_ : float ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def __UpperCAmelCase ( lowerCamelCase_ : float ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(lowerCamelCase_ , 1 , 1 ) )
def __UpperCAmelCase ( lowerCamelCase_ : int = 8_00 , lowerCamelCase_ : int = 6_00 , lowerCamelCase_ : float = -0.6 , lowerCamelCase_ : float = 0 , lowerCamelCase_ : float = 3.2 , lowerCamelCase_ : int = 50 , lowerCamelCase_ : bool = True , ) -> Image.Image:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = Image.new('RGB' , (image_width, image_height) )
SCREAMING_SNAKE_CASE_ : Optional[int] = img.load()
# loop through the image-coordinates
for image_x in range(lowerCamelCase_ ):
for image_y in range(lowerCamelCase_ ):
# determine the figure-coordinates based on the image-coordinates
SCREAMING_SNAKE_CASE_ : Optional[int] = figure_width / image_width * image_height
SCREAMING_SNAKE_CASE_ : int = figure_center_x + (image_x / image_width - 0.5) * figure_width
SCREAMING_SNAKE_CASE_ : List[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
SCREAMING_SNAKE_CASE_ : Optional[int] = get_distance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_color_coded_rgb(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = get_black_and_white_rgb(lowerCamelCase_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
UpperCamelCase__ : Dict = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 685 |
from math import log
from scipy.constants import Boltzmann, physical_constants
UpperCamelCase__ : Any = 3_00 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 1 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
UpperCamelCase__ : Optional[Any] = F"""https://www.google.com/search?q={query}&num=100"""
UpperCamelCase__ : Any = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
UpperCamelCase__ : str = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
UpperCamelCase__ : Any = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 685 |
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[],
[],
[],
]
def snake_case ( self ,snake_case__ ,snake_case__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(snake_case__ )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def snake_case ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self ):
return "\n".join(F'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : List[str] = []
def snake_case ( self ,snake_case__ ):
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(snake_case__ )
def snake_case ( self ):
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = min(self.queue )
self.queue.remove(snake_case__ )
return data
def __str__( self ):
return str(self.queue )
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : List[Any]=0.9_9_9 , lowerCamelCase_ : Any="cosine" , ) -> Optional[Any]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase_ : Union[str, Any] ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase_ : Any ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
SCREAMING_SNAKE_CASE_ : Any = []
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : str = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE_ : int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase_ ) / alpha_bar_fn(lowerCamelCase_ ) , lowerCamelCase_ ) )
return torch.tensor(lowerCamelCase_ , dtype=torch.floataa )
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
__a : Any = [e.name for e in KarrasDiffusionSchedulers]
__a : Union[str, Any] = 2
@register_to_config
def __init__( self ,snake_case__ = 1000 ,snake_case__ = 0.00085 ,snake_case__ = 0.012 ,snake_case__ = "linear" ,snake_case__ = None ,snake_case__ = "epsilon" ,snake_case__ = "linspace" ,snake_case__ = 0 ,):
if trained_betas is not None:
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(snake_case__ ,dtype=torch.floataa )
elif beta_schedule == "linear":
SCREAMING_SNAKE_CASE_ : Tuple = torch.linspace(snake_case__ ,snake_case__ ,snake_case__ ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE_ : Dict = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,snake_case__ ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE_ : int = betas_for_alpha_bar(snake_case__ )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
SCREAMING_SNAKE_CASE_ : Tuple = 1.0 - self.betas
SCREAMING_SNAKE_CASE_ : List[str] = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(snake_case__ ,snake_case__ ,snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__=None ):
if schedule_timesteps is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.timesteps
SCREAMING_SNAKE_CASE_ : List[str] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
SCREAMING_SNAKE_CASE_ : str = 1 if len(snake_case__ ) > 1 else 0
else:
SCREAMING_SNAKE_CASE_ : Dict = timestep.cpu().item() if torch.is_tensor(snake_case__ ) else timestep
SCREAMING_SNAKE_CASE_ : List[Any] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def snake_case ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def snake_case ( self ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Any = self.index_for_timestep(snake_case__ )
if self.state_in_first_order:
SCREAMING_SNAKE_CASE_ : List[Any] = self.sigmas[step_index]
else:
SCREAMING_SNAKE_CASE_ : Any = self.sigmas_interpol[step_index]
SCREAMING_SNAKE_CASE_ : List[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def snake_case ( self ,snake_case__ ,snake_case__ = None ,snake_case__ = None ,):
SCREAMING_SNAKE_CASE_ : int = num_inference_steps
SCREAMING_SNAKE_CASE_ : List[str] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.linspace(0 ,num_train_timesteps - 1 ,snake_case__ ,dtype=snake_case__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
SCREAMING_SNAKE_CASE_ : str = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE_ : Any = (np.arange(0 ,snake_case__ ) * step_ratio).round()[::-1].copy().astype(snake_case__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
SCREAMING_SNAKE_CASE_ : int = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE_ : List[str] = (np.arange(snake_case__ ,0 ,-step_ratio )).round().copy().astype(snake_case__ )
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.from_numpy(np.log(snake_case__ ) ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = np.interp(snake_case__ ,np.arange(0 ,len(snake_case__ ) ) ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ : str = torch.from_numpy(snake_case__ ).to(device=snake_case__ )
# interpolate sigmas
SCREAMING_SNAKE_CASE_ : Any = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp()
SCREAMING_SNAKE_CASE_ : Dict = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
SCREAMING_SNAKE_CASE_ : str = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(snake_case__ ).startswith('mps' ):
# mps does not support float64
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.from_numpy(snake_case__ ).to(snake_case__ ,dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE_ : Any = torch.from_numpy(snake_case__ ).to(snake_case__ )
# interpolate timesteps
SCREAMING_SNAKE_CASE_ : str = self.sigma_to_t(snake_case__ ).to(snake_case__ ,dtype=timesteps.dtype )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten()
SCREAMING_SNAKE_CASE_ : str = torch.cat([timesteps[:1], interleaved_timesteps] )
SCREAMING_SNAKE_CASE_ : Dict = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
SCREAMING_SNAKE_CASE_ : List[Any] = defaultdict(snake_case__ )
def snake_case ( self ,snake_case__ ):
# get log sigma
SCREAMING_SNAKE_CASE_ : Any = sigma.log()
# get distribution
SCREAMING_SNAKE_CASE_ : Tuple = log_sigma - self.log_sigmas[:, None]
# get sigmas range
SCREAMING_SNAKE_CASE_ : int = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
SCREAMING_SNAKE_CASE_ : Dict = low_idx + 1
SCREAMING_SNAKE_CASE_ : List[str] = self.log_sigmas[low_idx]
SCREAMING_SNAKE_CASE_ : int = self.log_sigmas[high_idx]
# interpolate sigmas
SCREAMING_SNAKE_CASE_ : Any = (low - log_sigma) / (low - high)
SCREAMING_SNAKE_CASE_ : Any = w.clamp(0 ,1 )
# transform interpolation to time range
SCREAMING_SNAKE_CASE_ : Any = (1 - w) * low_idx + w * high_idx
SCREAMING_SNAKE_CASE_ : Union[str, Any] = t.view(sigma.shape )
return t
@property
def snake_case ( self ):
return self.sample is None
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = True ,):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.index_for_timestep(snake_case__ )
# advance index counter by 1
SCREAMING_SNAKE_CASE_ : str = timestep.cpu().item() if torch.is_tensor(snake_case__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
SCREAMING_SNAKE_CASE_ : List[str] = self.sigmas[step_index]
SCREAMING_SNAKE_CASE_ : List[str] = self.sigmas_interpol[step_index + 1]
SCREAMING_SNAKE_CASE_ : Tuple = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
SCREAMING_SNAKE_CASE_ : Any = self.sigmas[step_index - 1]
SCREAMING_SNAKE_CASE_ : List[str] = self.sigmas_interpol[step_index]
SCREAMING_SNAKE_CASE_ : str = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE_ : List[str] = sigma_hat if self.state_in_first_order else sigma_interpol
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE_ : int = sigma_hat if self.state_in_first_order else sigma_interpol
SCREAMING_SNAKE_CASE_ : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample' )
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
SCREAMING_SNAKE_CASE_ : List[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
SCREAMING_SNAKE_CASE_ : Dict = sigma_interpol - sigma_hat
# store for 2nd order step
SCREAMING_SNAKE_CASE_ : List[Any] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
SCREAMING_SNAKE_CASE_ : Tuple = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
SCREAMING_SNAKE_CASE_ : Any = sigma_next - sigma_hat
SCREAMING_SNAKE_CASE_ : Dict = self.sample
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : Dict = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
SCREAMING_SNAKE_CASE_ : Dict = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(snake_case__ ):
# mps does not support float64
SCREAMING_SNAKE_CASE_ : Dict = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE_ : Dict = self.timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE_ : Optional[int] = timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE_ : List[str] = [self.index_for_timestep(snake_case__ ,snake_case__ ) for t in timesteps]
SCREAMING_SNAKE_CASE_ : Tuple = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
SCREAMING_SNAKE_CASE_ : List[Any] = sigma.unsqueeze(-1 )
SCREAMING_SNAKE_CASE_ : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 685 |
def __UpperCAmelCase ( lowerCamelCase_ : int = 10_00 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , lowerCamelCase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 685 | 1 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCamelCase__ : Union[str, Any] = re.compile(r'''\s+''')
def __UpperCAmelCase ( lowerCamelCase_ : str ) -> Optional[int]:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(lowerCamelCase_ , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [len(lowerCamelCase_ ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(lowerCamelCase_ ), "line_max": max(lowerCamelCase_ )}
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def __UpperCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def __UpperCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any]=5 ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ['auto-generated', 'autogenerated', 'automatically generated']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = example['content'].splitlines()
for _, line in zip(range(lowerCamelCase_ ) , lowerCamelCase_ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Any=5 , lowerCamelCase_ : List[str]=0.0_5 ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ['unit tests', 'test file', 'configuration file']
SCREAMING_SNAKE_CASE_ : List[Any] = example['content'].splitlines()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : int = 0
# first test
for _, line in zip(range(lowerCamelCase_ ) , lowerCamelCase_ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
SCREAMING_SNAKE_CASE_ : int = example['content'].count('\n' )
SCREAMING_SNAKE_CASE_ : Optional[int] = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ['def ', 'class ', 'for ', 'while ']
SCREAMING_SNAKE_CASE_ : Any = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any=4 ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = example['content'].splitlines()
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def __UpperCAmelCase ( lowerCamelCase_ : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer(example['content'] , truncation=lowerCamelCase_ )['input_ids']
SCREAMING_SNAKE_CASE_ : Optional[int] = len(example['content'] ) / len(lowerCamelCase_ )
return {"ratio": ratio}
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {}
results.update(get_hash(lowerCamelCase_ ) )
results.update(line_stats(lowerCamelCase_ ) )
results.update(alpha_stats(lowerCamelCase_ ) )
results.update(char_token_ratio(lowerCamelCase_ ) )
results.update(is_autogenerated(lowerCamelCase_ ) )
results.update(is_config_or_test(lowerCamelCase_ ) )
results.update(has_no_keywords(lowerCamelCase_ ) )
results.update(has_few_assignments(lowerCamelCase_ ) )
return results
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str ) -> Any:
"""simple docstring"""
if not check_uniques(lowerCamelCase_ , lowerCamelCase_ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def __UpperCAmelCase ( lowerCamelCase_ : List[str] ) -> int:
"""simple docstring"""
with open(lowerCamelCase_ , 'rb' ) as f_in:
with gzip.open(str(lowerCamelCase_ ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(lowerCamelCase_ , lowerCamelCase_ )
os.unlink(lowerCamelCase_ )
# Settings
UpperCamelCase__ : Any = HfArgumentParser(PreprocessingArguments)
UpperCamelCase__ : int = parser.parse_args()
if args.num_workers is None:
UpperCamelCase__ : int = multiprocessing.cpu_count()
UpperCamelCase__ : int = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCamelCase__ : List[Any] = time.time()
UpperCamelCase__ : str = load_dataset(args.dataset_name, split='''train''')
print(F"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
UpperCamelCase__ : List[Any] = time.time()
UpperCamelCase__ : Dict = ds.map(preprocess, num_proc=args.num_workers)
print(F"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
UpperCamelCase__ : str = set(ds.unique('''hash'''))
UpperCamelCase__ : Union[str, Any] = len(uniques) / len(ds)
print(F"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
UpperCamelCase__ : int = time.time()
UpperCamelCase__ : List[str] = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(F"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCamelCase__ : Optional[Any] = time.time()
UpperCamelCase__ , UpperCamelCase__ : List[str] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(F"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
UpperCamelCase__ : List[str] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
UpperCamelCase__ : Optional[Any] = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
UpperCamelCase__ : int = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCamelCase__ : str = str(data_dir / F"""file-{file_number+1:012}.json""")
UpperCamelCase__ : Union[str, Any] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"""Time to save dataset: {time.time()-t_start:.2f}""")
| 685 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Tuple = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Any = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Dict = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
| 685 | 1 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class lowerCAmelCase_ ( unittest.TestCase , lowerCamelCase_ ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = load_tool('text-classification' )
self.tool.setup()
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_tool('text-classification' ,remote=snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.tool('That\'s quite cool' ,['positive', 'negative'] )
self.assertEqual(snake_case__ ,'positive' )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.remote_tool('That\'s quite cool' ,['positive', 'negative'] )
self.assertEqual(snake_case__ ,'positive' )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.tool(text='That\'s quite cool' ,labels=['positive', 'negative'] )
self.assertEqual(snake_case__ ,'positive' )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.remote_tool(text='That\'s quite cool' ,labels=['positive', 'negative'] )
self.assertEqual(snake_case__ ,'positive' )
| 685 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCamelCase__ : Union[str, Any] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Any = []
UpperCamelCase__ : Optional[Any] = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
UpperCamelCase__ : Any = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'''emoji''': True,
},
}
]
UpperCamelCase__ : Union[str, Any] = 0
for log in Path().glob('''*.log'''):
UpperCamelCase__ : Optional[int] = 0
with open(log, '''r''') as f:
for line in f:
UpperCamelCase__ : Any = json.loads(line)
if line.get('''nodeid''', '''''') != "":
UpperCamelCase__ : Tuple = line['''nodeid''']
if line.get('''duration''', None) is not None:
UpperCamelCase__ : List[Any] = F"""{line["duration"]:.4f}"""
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCamelCase__ : Tuple = []
log.unlink()
UpperCamelCase__ : List[Any] = ''''''
UpperCamelCase__ : List[str] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : Optional[int] = {}
for test in failed_tests:
UpperCamelCase__ : str = test[0].split('''::''')
UpperCamelCase__ : List[Any] = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
UpperCamelCase__ : int = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCamelCase__ : str = [test[0] for test in failed_table]
UpperCamelCase__ : Union[str, Any] = list(set(files))
# Count number of instances in failed_tests
UpperCamelCase__ : Dict = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCamelCase__ : str = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 30_00:
UpperCamelCase__ : List[Any] = '''Too many failed tests, please see the full report in the Action results.'''
UpperCamelCase__ : Optional[Any] = len(err) + 10
UpperCamelCase__ : List[str] = message[: 30_00 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
UpperCamelCase__ : Optional[Any] = '''No failed tests! 🤗'''
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
UpperCamelCase__ : int = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
UpperCamelCase__ : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
UpperCamelCase__ : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCamelCase__ : Optional[Any] = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCamelCase__ : Tuple = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
UpperCamelCase__ : Any = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCamelCase__ : int = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCamelCase__ : str = row[0]
else:
UpperCamelCase__ : str = ''''''
UpperCamelCase__ : Optional[Any] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 685 | 1 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int ) -> Tuple:
"""simple docstring"""
with open(lowerCamelCase_ ) as metadata_file:
SCREAMING_SNAKE_CASE_ : str = json.load(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = LukeConfig(use_entity_aware_attention=lowerCamelCase_ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE_ : List[Any] = torch.load(lowerCamelCase_ , map_location='cpu' )['module']
# Load the entity vocab file
SCREAMING_SNAKE_CASE_ : int = load_original_entity_vocab(lowerCamelCase_ )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE_ : str = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE_ : Optional[Any] = AddedToken('<ent>' , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = AddedToken('<ent2>' , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , 'tokenizer_config.json' ) , 'r' ) as f:
SCREAMING_SNAKE_CASE_ : Any = json.load(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = 'MLukeTokenizer'
with open(os.path.join(lowerCamelCase_ , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = MLukeTokenizer.from_pretrained(lowerCamelCase_ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE_ : Any = tokenizer.convert_tokens_to_ids(['@'] )[0]
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.convert_tokens_to_ids(['#'] )[0]
SCREAMING_SNAKE_CASE_ : str = state_dict['embeddings.word_embeddings.weight']
SCREAMING_SNAKE_CASE_ : str = word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : Any = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE_ : str = state_dict[bias_name]
SCREAMING_SNAKE_CASE_ : Any = decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : int = decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE_ : Optional[int] = F'encoder.layer.{layer_index}.attention.self.'
SCREAMING_SNAKE_CASE_ : Any = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE_ : Any = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE_ : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE_ : Optional[int] = state_dict['entity_embeddings.entity_embeddings.weight']
SCREAMING_SNAKE_CASE_ : Optional[Any] = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : Tuple = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = state_dict['entity_predictions.bias']
SCREAMING_SNAKE_CASE_ : str = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : str = torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE_ : Dict = LukeForMaskedLM(config=lowerCamelCase_ ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
SCREAMING_SNAKE_CASE_ : Any = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
SCREAMING_SNAKE_CASE_ : List[Any] = state_dict[key]
else:
SCREAMING_SNAKE_CASE_ : str = state_dict[key]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
if set(lowerCamelCase_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}' )
if set(lowerCamelCase_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE_ : Tuple = MLukeTokenizer.from_pretrained(lowerCamelCase_ , task='entity_classification' )
SCREAMING_SNAKE_CASE_ : Optional[int] = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
SCREAMING_SNAKE_CASE_ : Optional[Any] = (0, 9)
SCREAMING_SNAKE_CASE_ : Any = tokenizer(lowerCamelCase_ , entity_spans=[span] , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(**lowerCamelCase_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.Size((1, 33, 7_68) )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.Size((1, 1, 7_68) )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE_ : int = MLukeTokenizer.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = 'Tokyo is the capital of <mask>.'
SCREAMING_SNAKE_CASE_ : Dict = (24, 30)
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer(lowerCamelCase_ , entity_spans=[span] , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Dict = model(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = encoding['input_ids'][0].tolist()
SCREAMING_SNAKE_CASE_ : Any = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
SCREAMING_SNAKE_CASE_ : int = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE_ : Tuple = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(lowerCamelCase_ ) )
model.save_pretrained(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ['[MASK]', '[PAD]', '[UNK]']
SCREAMING_SNAKE_CASE_ : Optional[int] = [json.loads(lowerCamelCase_ ) for line in open(lowerCamelCase_ )]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
for entry in data:
SCREAMING_SNAKE_CASE_ : int = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE_ : List[Any] = entity_id
break
SCREAMING_SNAKE_CASE_ : Optional[int] = F'{language}:{entity_name}'
SCREAMING_SNAKE_CASE_ : str = entity_id
return new_mapping
if __name__ == "__main__":
UpperCamelCase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
UpperCamelCase__ : Optional[int] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 685 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE_ : Tuple = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 1 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : int=() , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Optional[int]="no" , lowerCamelCase_ : Optional[Any]="29500" ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
SCREAMING_SNAKE_CASE_ : str = True
elif "IPython" in sys.modules:
SCREAMING_SNAKE_CASE_ : Dict = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
SCREAMING_SNAKE_CASE_ : Optional[int] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , lowerCamelCase_ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 8
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='TPU' )
print(F'Launching a training on {num_processes} TPU cores.' )
xmp.spawn(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*lowerCamelCase_ )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port=lowerCamelCase_ , mixed_precision=lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='MULTI_GPU' )
print(F'Launching training on {num_processes} GPUs.' )
try:
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
SCREAMING_SNAKE_CASE_ : Optional[Any] = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple=() , lowerCamelCase_ : str=2 ) -> Union[str, Any]:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
SCREAMING_SNAKE_CASE_ : Tuple = PrepareForLaunch(lowerCamelCase_ , debug=lowerCamelCase_ )
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
| 685 |
import qiskit
def __UpperCAmelCase ( lowerCamelCase_ : int = 2 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = qubits
# Using Aer's simulator
SCREAMING_SNAKE_CASE_ : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE_ : str = qiskit.QuantumCircuit(lowerCamelCase_ , lowerCamelCase_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , lowerCamelCase_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , lowerCamelCase_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(lowerCamelCase_ ) ) , list(range(lowerCamelCase_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
SCREAMING_SNAKE_CASE_ : Tuple = qiskit.execute(lowerCamelCase_ , lowerCamelCase_ , shots=10_00 )
return job.result().get_counts(lowerCamelCase_ )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 685 | 1 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
UpperCamelCase__ : List[Any] = 20_48
UpperCamelCase__ : Optional[Any] = 40_96
UpperCamelCase__ : Dict = 42
UpperCamelCase__ : Any = os.environ.pop('''PROCESS_TRAIN''', '''false''')
UpperCamelCase__ : str = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4}
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
def choose_first(lowerCamelCase_ : Any , lowerCamelCase_ : Any=False ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
if len(lowerCamelCase_ ) == 1:
SCREAMING_SNAKE_CASE_ : int = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
SCREAMING_SNAKE_CASE_ : int = {k: [a[k]] for k in a}
if len(a['start_token'] ) > 0:
break
return a
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'id': example['id']}
SCREAMING_SNAKE_CASE_ : int = example['annotations']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = annotation['yes_no_answer']
if 0 in yes_no_answer or 1 in yes_no_answer:
SCREAMING_SNAKE_CASE_ : int = ['yes'] if 1 in yes_no_answer else ['no']
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : str = ['<cls>']
else:
SCREAMING_SNAKE_CASE_ : str = ['short']
SCREAMING_SNAKE_CASE_ : List[str] = choose_first(annotation['short_answers'] )
if len(out['start_token'] ) == 0:
# answer will be long if short is not available
SCREAMING_SNAKE_CASE_ : str = ['long']
SCREAMING_SNAKE_CASE_ : List[str] = choose_first(annotation['long_answer'] , is_long_answer=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = []
answer.update(lowerCamelCase_ )
# disregard some samples
if len(answer['start_token'] ) > 1 or answer["start_token"] == answer["end_token"]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = ['start_token', 'end_token', 'start_byte', 'end_byte', 'text']
if not all(isinstance(answer[k] , lowerCamelCase_ ) for k in cols ):
raise ValueError('Issue in ID' , example['id'] )
return answer
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=False ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = _get_single_answer(lowerCamelCase_ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
SCREAMING_SNAKE_CASE_ : Optional[Any] = example['document']['tokens']
SCREAMING_SNAKE_CASE_ : int = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
return {
"context": " ".join(lowerCamelCase_ ),
"answer": {
"start_token": -1_00, # ignore index in cross-entropy
"end_token": -1_00, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['start_token', 'end_token']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
SCREAMING_SNAKE_CASE_ : Union[str, Any] = example['document']['tokens']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = answer['start_token']
SCREAMING_SNAKE_CASE_ : str = answer['end_token']
SCREAMING_SNAKE_CASE_ : Any = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
SCREAMING_SNAKE_CASE_ : Tuple = ' '.join(context[start_token:end_token] )
# checking above code
if assertion:
SCREAMING_SNAKE_CASE_ : Tuple = doc['is_html'][answer['start_token'] : answer['end_token']]
SCREAMING_SNAKE_CASE_ : str = doc['token'][answer['start_token'] : answer['end_token']]
SCREAMING_SNAKE_CASE_ : Optional[int] = ' '.join([old[i] for i in range(len(lowerCamelCase_ ) ) if not is_html[i]] )
if new != old:
print('ID:' , example['id'] )
print('New:' , lowerCamelCase_ , end='\n' )
print('Old:' , lowerCamelCase_ , end='\n\n' )
return {
"context": " ".join(lowerCamelCase_ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any]=20_48 , lowerCamelCase_ : Dict=40_96 , lowerCamelCase_ : int=True ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = get_context_and_ans(lowerCamelCase_ , assertion=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = out['answer']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer(example['question']['text'] , out['context'] ).input_ids
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : Tuple = []
SCREAMING_SNAKE_CASE_ : str = input_ids[:q_len]
SCREAMING_SNAKE_CASE_ : Optional[int] = range(lowerCamelCase_ , len(lowerCamelCase_ ) , max_length - doc_stride )
for i in doc_start_indices:
SCREAMING_SNAKE_CASE_ : Optional[Any] = i + max_length - q_len
SCREAMING_SNAKE_CASE_ : Tuple = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['category'][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_00] * len(lowerCamelCase_ ),
"end_token": [-1_00] * len(lowerCamelCase_ ),
"category": category,
},
}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = out['context'].split()
SCREAMING_SNAKE_CASE_ : List[Any] = splitted_context[answer['end_token']]
SCREAMING_SNAKE_CASE_ : List[Any] = len(
tokenizer(
' '.join(splitted_context[: answer['start_token']] ) , add_special_tokens=lowerCamelCase_ , ).input_ids )
SCREAMING_SNAKE_CASE_ : Any = len(
tokenizer(' '.join(splitted_context[: answer['end_token']] ) , add_special_tokens=lowerCamelCase_ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
SCREAMING_SNAKE_CASE_ : int = len(tokenizer(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
SCREAMING_SNAKE_CASE_ : Any = input_ids[answer['start_token'] : answer['end_token'] + 1] # right & left are inclusive
SCREAMING_SNAKE_CASE_ : int = answer['start_token']
SCREAMING_SNAKE_CASE_ : Optional[int] = answer['end_token']
if assertion:
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.decode(lowerCamelCase_ )
if answer["span"] != new:
print('ISSUE IN TOKENIZATION' )
print('OLD:' , answer['span'] )
print('NEW:' , lowerCamelCase_ , end='\n\n' )
if len(lowerCamelCase_ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
SCREAMING_SNAKE_CASE_ : Tuple = input_ids[:q_len]
SCREAMING_SNAKE_CASE_ : List[Any] = range(lowerCamelCase_ , len(lowerCamelCase_ ) , max_length - doc_stride )
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : Tuple = []
SCREAMING_SNAKE_CASE_ : Optional[int] = [] # null, yes, no, long, short
for i in doc_start_indices:
SCREAMING_SNAKE_CASE_ : int = i + max_length - q_len
SCREAMING_SNAKE_CASE_ : List[Any] = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
SCREAMING_SNAKE_CASE_ : Optional[int] = start_token - i + q_len
SCREAMING_SNAKE_CASE_ : List[Any] = end_token - i + q_len
answers_category.append(answer['category'][0] ) # ["short"] -> "short"
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = -1_00
SCREAMING_SNAKE_CASE_ : Union[str, Any] = -1_00
answers_category.append('null' )
SCREAMING_SNAKE_CASE_ : Any = inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowerCamelCase_ )
answers_end_token.append(lowerCamelCase_ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('ISSUE in strided for ID:' , example['id'] )
print('New:' , tokenizer.decode(lowerCamelCase_ ) )
print('Old:' , tokenizer.decode(lowerCamelCase_ ) , end='\n\n' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any]=20_48 , lowerCamelCase_ : Optional[Any]=40_96 , lowerCamelCase_ : Optional[Any]=False ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = get_strided_contexts_and_ans(
lowerCamelCase_ , lowerCamelCase_ , doc_stride=lowerCamelCase_ , max_length=lowerCamelCase_ , assertion=lowerCamelCase_ , )
return example
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : List[str] ) -> Any:
"""simple docstring"""
with jsonlines.open(lowerCamelCase_ , 'a' ) as writer:
for example in tqdm(lowerCamelCase_ , total=len(lowerCamelCase_ ) , desc='Saving samples ... ' ):
SCREAMING_SNAKE_CASE_ : Any = example['labels']
for ids, start, end, cat in zip(
example['input_ids'] , labels['start_token'] , labels['end_token'] , labels['category'] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'input_ids': ids,
'start_token': start,
'end_token': end,
'category': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
UpperCamelCase__ : Optional[int] = load_dataset('''natural_questions''')
UpperCamelCase__ : List[Any] = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
UpperCamelCase__ : int = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation''']
UpperCamelCase__ : Any = {
'''tokenizer''': tokenizer,
'''doc_stride''': DOC_STRIDE,
'''max_length''': MAX_LENGTH,
'''assertion''': False,
}
UpperCamelCase__ : Any = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
UpperCamelCase__ : List[str] = data.remove_columns(['''annotations''', '''document''', '''id''', '''question'''])
print(data)
np.random.seed(SEED)
UpperCamelCase__ : int = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl'''
save_to_disk(data, file_name=cache_file_name)
| 685 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> bool:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('check_bouncy() accepts only integer arguments' )
SCREAMING_SNAKE_CASE_ : Optional[int] = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = ''.join(sorted(lowerCamelCase_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __UpperCAmelCase ( lowerCamelCase_ : float = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 1_00:
raise ValueError('solution() only accepts values from 0 to 100' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Dict = 1
while True:
if check_bouncy(lowerCamelCase_ ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 685 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ : List[Any] = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[str] = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Dict = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 685 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Dict = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = ['''ChineseCLIPFeatureExtractor''']
UpperCamelCase__ : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 685 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : List[Any] = DiTPipeline
__a : Dict = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__a : Dict = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
__a : List[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__a : int = False
def snake_case ( self ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TransformeraDModel(
sample_size=16 ,num_layers=2 ,patch_size=4 ,attention_head_dim=8 ,num_attention_heads=2 ,in_channels=4 ,out_channels=8 ,attention_bias=snake_case__ ,activation_fn='gelu-approximate' ,num_embeds_ada_norm=1000 ,norm_type='ada_norm_zero' ,norm_elementwise_affine=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoencoderKL()
SCREAMING_SNAKE_CASE_ : Optional[int] = DDIMScheduler()
SCREAMING_SNAKE_CASE_ : str = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def snake_case ( self ,snake_case__ ,snake_case__=0 ):
if str(snake_case__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ : str = torch.manual_seed(snake_case__ )
else:
SCREAMING_SNAKE_CASE_ : int = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = 'cpu'
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Tuple = self.pipeline_class(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = pipe(**snake_case__ ).images
SCREAMING_SNAKE_CASE_ : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 16, 16, 3) )
SCREAMING_SNAKE_CASE_ : Any = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
SCREAMING_SNAKE_CASE_ : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case__ ,1E-3 )
def snake_case ( self ):
self._test_inference_batch_single_identical(relax_max_difference=snake_case__ ,expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def snake_case ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[str] = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['vase', 'umbrella', 'white shark', 'white wolf']
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe.get_label_ids(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = pipe(snake_case__ ,generator=snake_case__ ,num_inference_steps=40 ,output_type='np' ).images
for word, image in zip(snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = load_numpy(
F'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1E-2
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
SCREAMING_SNAKE_CASE_ : Any = ['vase', 'umbrella']
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe.get_label_ids(snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = pipe(snake_case__ ,generator=snake_case__ ,num_inference_steps=25 ,output_type='np' ).images
for word, image in zip(snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
F'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 685 |
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> Tuple:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
else:
return a * actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(lowerCamelCase_ , lowerCamelCase_ )
return actual_power(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 685 | 1 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Union[str, Any] = CLIPTokenizer
__a : List[str] = CLIPTokenizerFast
__a : List[str] = True
__a : Tuple = {}
__a : Tuple = False
def snake_case ( self ):
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dict(zip(snake_case__ ,range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
SCREAMING_SNAKE_CASE_ : Any = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Tuple = 'lower newer'
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
SCREAMING_SNAKE_CASE_ : List[Any] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,snake_case__ )
@require_ftfy
def snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
SCREAMING_SNAKE_CASE_ : Dict = 'xa\u0303y' + ' ' + 'x\xe3y'
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of space type
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of line break type
SCREAMING_SNAKE_CASE_ : Tuple = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE_ : Tuple = F'{text_of_1_token} {text_of_1_token}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : str = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(snake_case__ ) + 1, len(snake_case__ ) + 1 + len(snake_case__ )) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F' {text}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : int = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(snake_case__ ) + 1, 1 + len(snake_case__ ) + 1 + len(snake_case__ )) ,)
def snake_case ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(snake_case__ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def snake_case ( self ):
super().test_tokenization_python_rust_equals()
def snake_case ( self ):
# CLIP always lower cases letters
pass
| 685 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=7 ,snake_case__=True ,snake_case__=True ,snake_case__=False ,snake_case__=True ,snake_case__=99 ,snake_case__=32 ,snake_case__=5 ,snake_case__=4 ,snake_case__=37 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=16 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=3 ,snake_case__=4 ,snake_case__=None ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = seq_length
SCREAMING_SNAKE_CASE_ : Tuple = is_training
SCREAMING_SNAKE_CASE_ : List[str] = use_input_mask
SCREAMING_SNAKE_CASE_ : List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : int = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[str] = num_choices
SCREAMING_SNAKE_CASE_ : Tuple = scope
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=snake_case__ ,initializer_range=self.initializer_range ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(snake_case__ ,attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE_ : List[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,use_cache=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : str = ids_tensor((self.batch_size, 3) ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_ids, next_tokens] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_mask, next_mask] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Dict = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,past_key_values=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__a : int = (LlamaForCausalLM,) if is_torch_available() else ()
__a : Any = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Tuple = False
__a : Tuple = False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self ,config_class=snake_case__ ,hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : Optional[int] = type
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Dict = 3
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : str = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Any = 3
SCREAMING_SNAKE_CASE_ : int = 'single_label_classification'
SCREAMING_SNAKE_CASE_ : str = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Dict = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Tuple = 3
SCREAMING_SNAKE_CASE_ : str = 'multi_label_classification'
SCREAMING_SNAKE_CASE_ : int = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Tuple = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def snake_case ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([1, 10] ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE_ : int = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : List[Any] = original_model(snake_case__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : List[Any] = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE_ : int = LlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE_ : str = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : Optional[int] = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : List[str] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Dict = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : str = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : int = model(torch.tensor(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
SCREAMING_SNAKE_CASE_ : List[str] = 'Simply put, the theory of relativity states that '
SCREAMING_SNAKE_CASE_ : str = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.encode(snake_case__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Tuple = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' ,device_map='sequential' ,use_safetensors=snake_case__ )
# greedy generation outputs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.generate(snake_case__ ,max_new_tokens=64 ,top_p=snake_case__ ,temperature=1 ,do_sample=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.decode(generated_ids[0] ,skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ ,snake_case__ )
| 685 | 1 |
from scipy.stats import pearsonr
import datasets
UpperCamelCase__ : Any = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
UpperCamelCase__ : int = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
UpperCamelCase__ : List[str] = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def snake_case ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) ,reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__=False ):
if return_pvalue:
SCREAMING_SNAKE_CASE_ : int = pearsonr(snake_case__ ,snake_case__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(snake_case__ ,snake_case__ )[0] )}
| 685 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
UpperCamelCase__ : int = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
UpperCamelCase__ : str = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : List[Any] = collections.OrderedDict()
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Any = f.readlines()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = b
SCREAMING_SNAKE_CASE_ : Dict = idx
for wd in b:
SCREAMING_SNAKE_CASE_ : Any = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : List[str] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__="<|endoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__="<|startoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__=False ,**snake_case__ ,):
super().__init__(
unk_token=snake_case__ ,pad_token=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,do_clean_text=snake_case__ ,**snake_case__ ,)
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
SCREAMING_SNAKE_CASE_ : str = do_clean_text
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = load_vocab_and_emoji(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def snake_case ( self ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def snake_case ( self ):
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.tokenize(snake_case__ ,clean=self.do_clean_text )
def snake_case ( self ,snake_case__ ):
return self.vocab.get(snake_case__ ,self.vocab.get(self.unk_token ) )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.convert_id_to_token(snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = ''.join(snake_case__ ).strip()
return out_string
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case__ ,add_special_tokens=snake_case__ ) + [self.eos_token_id] )
if len(snake_case__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE_ : List[Any] = input_ids[-self.model_max_length :]
return input_ids
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
if os.path.isdir(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
SCREAMING_SNAKE_CASE_ : Tuple = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
SCREAMING_SNAKE_CASE_ : str = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
SCREAMING_SNAKE_CASE_ : Dict = token_index
writer.write(','.join(snake_case__ ) + '\n' )
index += 1
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
json.dump(self.emoji ,snake_case__ )
return vocab_file, emoji_file
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = vocab # same as swe
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_to_tokens # same as bpe
SCREAMING_SNAKE_CASE_ : Dict = emoji
SCREAMING_SNAKE_CASE_ : int = np.max([len(snake_case__ ) for w in self.vocab.keys()] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
SCREAMING_SNAKE_CASE_ : str = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
SCREAMING_SNAKE_CASE_ : int = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
SCREAMING_SNAKE_CASE_ : Tuple = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ):
return len(self.ids_to_tokens )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<URL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = self.content_repattera.sub('<EMAIL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<TEL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<PRICE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = content.replace('<BLOCK><BLOCK>' ,'<BLOCK>' )
return content
def snake_case ( self ,snake_case__ ,snake_case__=False ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('\r\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\r' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : List[str] = text.replace('\t' ,'<TAB>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('—' ,'ー' )
SCREAMING_SNAKE_CASE_ : Optional[int] = text.replace('−' ,'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
SCREAMING_SNAKE_CASE_ : int = text.replace(snake_case__ ,snake_case__ )
if clean:
SCREAMING_SNAKE_CASE_ : str = self.clean_text(snake_case__ )
def check_simbol(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 2:
SCREAMING_SNAKE_CASE_ : str = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2A1 and c <= 0XC2BF)
or (c >= 0XC780 and c <= 0XC783)
or (c >= 0XCAB9 and c <= 0XCBBF)
or (c >= 0XCC80 and c <= 0XCDA2)
):
return True
return False
def checkuae(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 3:
SCREAMING_SNAKE_CASE_ : Dict = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_8080 and c <= 0XE2_B07F:
return True
return False
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List[Any] = []
while pos < len(snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = min(len(snake_case__ ) ,pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
SCREAMING_SNAKE_CASE_ : List[Any] = [] # (token_id, token, pos)
for e in range(snake_case__ ,snake_case__ ,-1 ):
SCREAMING_SNAKE_CASE_ : str = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(snake_case__ ) > 2:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(snake_case__ ) > 0:
# the smallest token_id is adopted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = sorted(snake_case__ ,key=lambda snake_case__ : x[0] )[0]
result.append(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = e
else:
SCREAMING_SNAKE_CASE_ : Any = pos + 1
SCREAMING_SNAKE_CASE_ : Optional[int] = text[pos:end]
if check_simbol(snake_case__ ):
result.append('<KIGOU>' )
elif checkuae(snake_case__ ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
SCREAMING_SNAKE_CASE_ : int = end
return result
def snake_case ( self ,snake_case__ ,snake_case__="\n" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Dict = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : Dict = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(snake_case__ )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(snake_case__ )
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : int = ''.join(snake_case__ )
return text
| 685 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
SCREAMING_SNAKE_CASE_ : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE_ : Dict = {
'do_resize': True,
'size': {'height': 224, 'width': 224},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.48145466, 0.4578275, 0.40821073],
'image_std': [0.26862954, 0.26130258, 0.27577711],
'do_convert_rgb': True,
}
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,snake_case__ )
with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp:
json.dump(snake_case__ ,snake_case__ )
def snake_case ( self ,**snake_case__ ):
return BertTokenizer.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,**snake_case__ ):
return BertTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,**snake_case__ ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [Image.fromarray(np.moveaxis(snake_case__ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[Any] = ChineseCLIPProcessor(tokenizer=snake_case__ ,image_processor=snake_case__ )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Optional[int] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=snake_case__ )
SCREAMING_SNAKE_CASE_ : str = ChineseCLIPProcessor(tokenizer=snake_case__ ,image_processor=snake_case__ )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Tuple = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,snake_case__ )
self.assertIsInstance(processor_fast.tokenizer ,snake_case__ )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,snake_case__ )
self.assertIsInstance(processor_fast.image_processor ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizer(cls_token='(CLS)' ,sep_token='(SEP)' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_image_processor(do_normalize=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname ,cls_token='(CLS)' ,sep_token='(SEP)' ,do_normalize=snake_case__ )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,snake_case__ )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[Any] = ChineseCLIPProcessor(tokenizer=snake_case__ ,image_processor=snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processor(snake_case__ ,return_tensors='np' )
SCREAMING_SNAKE_CASE_ : Tuple = processor(images=snake_case__ ,return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=snake_case__ ,image_processor=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'Alexandra,T-shirt的价格是15便士。'
SCREAMING_SNAKE_CASE_ : Optional[Any] = processor(text=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(snake_case__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=snake_case__ ,image_processor=snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = 'Alexandra,T-shirt的价格是15便士。'
SCREAMING_SNAKE_CASE_ : Any = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : str = processor(text=snake_case__ ,images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[str] = ChineseCLIPProcessor(tokenizer=snake_case__ ,image_processor=snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ : List[str] = processor.batch_decode(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Any = ChineseCLIPProcessor(tokenizer=snake_case__ ,image_processor=snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'Alexandra,T-shirt的价格是15便士。'
SCREAMING_SNAKE_CASE_ : Any = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Any = processor(text=snake_case__ ,images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 685 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : int=() , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Optional[int]="no" , lowerCamelCase_ : Optional[Any]="29500" ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
SCREAMING_SNAKE_CASE_ : str = True
elif "IPython" in sys.modules:
SCREAMING_SNAKE_CASE_ : Dict = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
SCREAMING_SNAKE_CASE_ : Optional[int] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , lowerCamelCase_ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 8
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='TPU' )
print(F'Launching a training on {num_processes} TPU cores.' )
xmp.spawn(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*lowerCamelCase_ )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port=lowerCamelCase_ , mixed_precision=lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='MULTI_GPU' )
print(F'Launching training on {num_processes} GPUs.' )
try:
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
SCREAMING_SNAKE_CASE_ : Optional[Any] = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple=() , lowerCamelCase_ : str=2 ) -> Union[str, Any]:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
SCREAMING_SNAKE_CASE_ : Tuple = PrepareForLaunch(lowerCamelCase_ , debug=lowerCamelCase_ )
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
| 685 | 1 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCamelCase__ : List[str] = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
UpperCamelCase__ : int = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
UpperCamelCase__ : Union[str, Any] = '''|'''.join(sys.argv[1:])
UpperCamelCase__ : str = re.compile(rF"""^({joined_dirs}).*?\.py$""")
UpperCamelCase__ : Dict = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 685 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCamelCase__ : Tuple = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 685 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
UpperCamelCase__ : Any = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : str = "levit"
def __init__( self ,snake_case__=224 ,snake_case__=3 ,snake_case__=3 ,snake_case__=2 ,snake_case__=1 ,snake_case__=16 ,snake_case__=[128, 256, 384] ,snake_case__=[4, 8, 12] ,snake_case__=[4, 4, 4] ,snake_case__=[16, 16, 16] ,snake_case__=0 ,snake_case__=[2, 2, 2] ,snake_case__=[2, 2, 2] ,snake_case__=0.02 ,**snake_case__ ,):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = image_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE_ : Optional[int] = kernel_size
SCREAMING_SNAKE_CASE_ : Optional[int] = stride
SCREAMING_SNAKE_CASE_ : Dict = padding
SCREAMING_SNAKE_CASE_ : List[str] = hidden_sizes
SCREAMING_SNAKE_CASE_ : Dict = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = depths
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key_dim
SCREAMING_SNAKE_CASE_ : int = drop_path_rate
SCREAMING_SNAKE_CASE_ : Tuple = patch_size
SCREAMING_SNAKE_CASE_ : Dict = attention_ratio
SCREAMING_SNAKE_CASE_ : int = mlp_ratio
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : int = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Any = version.parse("1.11" )
@property
def snake_case ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case ( self ):
return 1E-4
| 685 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Union[str, Any] = CLIPTokenizer
__a : List[str] = CLIPTokenizerFast
__a : List[str] = True
__a : Tuple = {}
__a : Tuple = False
def snake_case ( self ):
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dict(zip(snake_case__ ,range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
SCREAMING_SNAKE_CASE_ : Any = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Tuple = 'lower newer'
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
SCREAMING_SNAKE_CASE_ : List[Any] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,snake_case__ )
@require_ftfy
def snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
SCREAMING_SNAKE_CASE_ : Dict = 'xa\u0303y' + ' ' + 'x\xe3y'
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of space type
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of line break type
SCREAMING_SNAKE_CASE_ : Tuple = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE_ : Tuple = F'{text_of_1_token} {text_of_1_token}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : str = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(snake_case__ ) + 1, len(snake_case__ ) + 1 + len(snake_case__ )) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F' {text}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : int = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(snake_case__ ) + 1, 1 + len(snake_case__ ) + 1 + len(snake_case__ )) ,)
def snake_case ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(snake_case__ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def snake_case ( self ):
super().test_tokenization_python_rust_equals()
def snake_case ( self ):
# CLIP always lower cases letters
pass
| 685 | 1 |
UpperCamelCase__ : Any = 8.31_44_62 # Unit - J mol-1 K-1
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 685 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
SCREAMING_SNAKE_CASE_ : int = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.parse_args()
if not hasattr(lowerCamelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE_ : Optional[Any] = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 685 | 1 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=7 ,snake_case__=True ,snake_case__=True ,snake_case__=True ,snake_case__=True ,snake_case__=99 ,snake_case__=32 ,snake_case__=5 ,snake_case__=4 ,snake_case__=37 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=16 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=3 ,snake_case__=4 ,snake_case__=None ,):
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : str = batch_size
SCREAMING_SNAKE_CASE_ : Any = seq_length
SCREAMING_SNAKE_CASE_ : Any = is_training
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_input_mask
SCREAMING_SNAKE_CASE_ : Dict = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Tuple = use_labels
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE_ : int = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Any = type_vocab_size
SCREAMING_SNAKE_CASE_ : int = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[int] = num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = num_choices
SCREAMING_SNAKE_CASE_ : List[str] = scope
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
return NystromformerConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=snake_case__ ,initializer_range=self.initializer_range ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = NystromformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : int = model(snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = model(snake_case__ ,token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = NystromformerForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = NystromformerForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ ,start_positions=snake_case__ ,end_positions=snake_case__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = NystromformerForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : str = model(snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = self.num_labels
SCREAMING_SNAKE_CASE_ : str = NystromformerForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = self.num_choices
SCREAMING_SNAKE_CASE_ : str = NystromformerForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Dict = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Any = model(
snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ ,labels=snake_case__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE_ : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : Optional[int] = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__a : int = (
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : str = False
__a : Tuple = False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = NystromformerModelTester(self )
SCREAMING_SNAKE_CASE_ : str = ConfigTester(self ,config_class=snake_case__ ,hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : List[Any] = type
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def snake_case ( self ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : int = NystromformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : int = model(snake_case__ )[0]
SCREAMING_SNAKE_CASE_ : int = torch.Size((1, 6, 768) )
self.assertEqual(output.shape ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,snake_case__ ,atol=1E-4 ) )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = 'the [MASK] of Belgium is Brussels'
SCREAMING_SNAKE_CASE_ : int = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE_ : Dict = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE_ : int = tokenizer(snake_case__ ,return_tensors='pt' )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(encoding.input_ids ).logits
SCREAMING_SNAKE_CASE_ : int = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case__ ) ,'capital' )
| 685 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
UpperCamelCase__ : Dict = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
UpperCamelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'https://pypi.org/pypi/diffusers/json'
SCREAMING_SNAKE_CASE_ : Optional[int] = json.loads(request.urlopen(lowerCamelCase_ ).read() )['releases'].keys()
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : version.Version(lowerCamelCase_ ) )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCamelCase_ )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Path(lowerCamelCase_ ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] ) -> Any:
"""simple docstring"""
init_hf_modules()
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : List[Any] = f.read()
# Imports of the form `import .xxx`
SCREAMING_SNAKE_CASE_ : Tuple = re.findall('^\s*import\s+\.(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCamelCase_ ) )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [module_file]
SCREAMING_SNAKE_CASE_ : Tuple = []
# Let's recurse through all relative imports
while not no_change:
SCREAMING_SNAKE_CASE_ : int = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ).parent
SCREAMING_SNAKE_CASE_ : int = [str(module_path / m ) for m in new_imports]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [f for f in new_import_files if f not in all_relative_imports]
SCREAMING_SNAKE_CASE_ : Any = [F'{f}.py' for f in new_import_files]
SCREAMING_SNAKE_CASE_ : Optional[int] = len(lowerCamelCase_ ) == 0
all_relative_imports.extend(lowerCamelCase_ )
return all_relative_imports
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] = f.read()
# Imports of the form `import xxx`
SCREAMING_SNAKE_CASE_ : List[str] = re.findall('^\s*import\s+(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Only keep the top-level module
SCREAMING_SNAKE_CASE_ : List[str] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(set(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : List[str] = []
for imp in imports:
try:
importlib.import_module(lowerCamelCase_ )
except ImportError:
missing_packages.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
F'{", ".join(lowerCamelCase_ )}. Run `pip install {" ".join(lowerCamelCase_ )}`' )
return get_relative_imports(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = module_path.replace(os.path.sep , '.' )
SCREAMING_SNAKE_CASE_ : Any = importlib.import_module(lowerCamelCase_ )
if class_name is None:
return find_pipeline_class(lowerCamelCase_ )
return getattr(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
SCREAMING_SNAKE_CASE_ : List[Any] = dict(inspect.getmembers(lowerCamelCase_ , inspect.isclass ) )
SCREAMING_SNAKE_CASE_ : List[str] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCamelCase_ )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.' )
SCREAMING_SNAKE_CASE_ : Any = cls
return pipeline_class
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if os.path.isfile(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = module_file_or_url
SCREAMING_SNAKE_CASE_ : Dict = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
SCREAMING_SNAKE_CASE_ : List[str] = get_diffusers_versions()
# cut ".dev0"
SCREAMING_SNAKE_CASE_ : Dict = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
SCREAMING_SNAKE_CASE_ : List[Any] = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(F'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
SCREAMING_SNAKE_CASE_ : int = F'v{revision}'
elif revision == "main":
SCREAMING_SNAKE_CASE_ : List[Any] = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
SCREAMING_SNAKE_CASE_ : Tuple = COMMUNITY_PIPELINES_URL.format(revision=lowerCamelCase_ , pipeline=lowerCamelCase_ )
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = cached_download(
lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Tuple = 'git'
SCREAMING_SNAKE_CASE_ : Dict = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE_ : List[str] = hf_hub_download(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
SCREAMING_SNAKE_CASE_ : Dict = check_imports(lowerCamelCase_ )
# Now we move the module inside our cached dynamic modules.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path(lowerCamelCase_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
for module_needed in modules_needed:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F'{module_needed}.py'
shutil.copy(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple = use_auth_token
elif use_auth_token is True:
SCREAMING_SNAKE_CASE_ : int = HfFolder.get_token()
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[Any] = model_info(lowerCamelCase_ , revision=lowerCamelCase_ , token=lowerCamelCase_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
SCREAMING_SNAKE_CASE_ : Any = submodule_path / commit_hash
SCREAMING_SNAKE_CASE_ : List[Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCamelCase_ )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCamelCase_ , F'{module_needed}.py' , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return os.path.join(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , **lowerCamelCase_ : Dict , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = get_cached_module_file(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return get_class_in_module(lowerCamelCase_ , final_module.replace('.py' , '' ) )
| 685 | 1 |
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> Tuple:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
else:
return a * actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(lowerCamelCase_ , lowerCamelCase_ )
return actual_power(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 685 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[int] = "visual_bert"
def __init__( self ,snake_case__=30522 ,snake_case__=768 ,snake_case__=512 ,snake_case__=12 ,snake_case__=12 ,snake_case__=3072 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=False ,snake_case__=True ,snake_case__=1 ,snake_case__=0 ,snake_case__=2 ,**snake_case__ ,):
super().__init__(pad_token_id=snake_case__ ,bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = visual_embedding_dim
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : int = bypass_transformer
SCREAMING_SNAKE_CASE_ : Optional[Any] = special_visual_initialize
| 685 | 1 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : List[str] = 13
SCREAMING_SNAKE_CASE_ : Any = 7
SCREAMING_SNAKE_CASE_ : Tuple = 30
SCREAMING_SNAKE_CASE_ : Optional[int] = self.seq_length + self.mem_len
SCREAMING_SNAKE_CASE_ : Any = 15
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = 99
SCREAMING_SNAKE_CASE_ : int = [10, 50, 80]
SCREAMING_SNAKE_CASE_ : List[Any] = 32
SCREAMING_SNAKE_CASE_ : Optional[Any] = 32
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 4
SCREAMING_SNAKE_CASE_ : List[str] = 8
SCREAMING_SNAKE_CASE_ : Tuple = 128
SCREAMING_SNAKE_CASE_ : str = 2
SCREAMING_SNAKE_CASE_ : Any = 2
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Any = 3
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.vocab_size - 1
SCREAMING_SNAKE_CASE_ : List[Any] = 0.01
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = TransfoXLConfig(
vocab_size=self.vocab_size ,mem_len=self.mem_len ,clamp_len=self.clamp_len ,cutoffs=self.cutoffs ,d_model=self.hidden_size ,d_embed=self.d_embed ,n_head=self.num_attention_heads ,d_head=self.d_head ,d_inner=self.d_inner ,div_val=self.div_val ,n_layer=self.num_hidden_layers ,eos_token_id=self.eos_token_id ,pad_token_id=self.vocab_size - 1 ,init_range=self.init_range ,num_labels=self.num_labels ,)
return (config, input_ids_a, input_ids_a, lm_labels)
def snake_case ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = TFTransfoXLModel(snake_case__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = model(snake_case__ ).to_tuple()
SCREAMING_SNAKE_CASE_ : str = {'input_ids': input_ids_a, 'mems': mems_a}
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = model(snake_case__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = TFTransfoXLLMHeadModel(snake_case__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ).to_tuple()
SCREAMING_SNAKE_CASE_ : List[Any] = {'input_ids': input_ids_a, 'labels': lm_labels}
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = model(snake_case__ ).to_tuple()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = model([input_ids_a, mems_a] ).to_tuple()
SCREAMING_SNAKE_CASE_ : Any = {'input_ids': input_ids_a, 'mems': mems_a, 'labels': lm_labels}
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = TFTransfoXLForSequenceClassification(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = model(snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Any = config_and_inputs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'input_ids': input_ids_a}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : int = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__a : List[str] = () if is_tf_available() else ()
__a : Optional[Any] = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__a : int = False
__a : Optional[Any] = False
__a : List[str] = False
__a : str = False
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = TFTransfoXLModelTester(self )
SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self ,config_class=snake_case__ ,d_embed=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
self.model_tester.set_seed()
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*snake_case__ )
def snake_case ( self ):
self.model_tester.set_seed()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Tuple = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Tuple = model_class(snake_case__ )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
SCREAMING_SNAKE_CASE_ : Any = model.get_output_embeddings()
assert isinstance(snake_case__ ,tf.keras.layers.Layer )
SCREAMING_SNAKE_CASE_ : Tuple = model.get_bias()
assert name is None
else:
SCREAMING_SNAKE_CASE_ : str = model.get_output_embeddings()
assert x is None
SCREAMING_SNAKE_CASE_ : List[str] = model.get_bias()
assert name is None
def snake_case ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def snake_case ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Dict = TFTransfoXLModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@unittest.skip(reason='This model doesn\'t play well with fit() due to not returning a single loss.' )
def snake_case ( self ):
pass
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@unittest.skip('Skip test until #12651 is resolved.' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103' )
# fmt: off
SCREAMING_SNAKE_CASE_ : int = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] ,dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
SCREAMING_SNAKE_CASE_ : List[str] = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
SCREAMING_SNAKE_CASE_ : str = model.generate(snake_case__ ,max_length=200 ,do_sample=snake_case__ )
self.assertListEqual(output_ids[0].numpy().tolist() ,snake_case__ )
| 685 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
def is_in_circle(lowerCamelCase_ : float , lowerCamelCase_ : float ) -> bool:
SCREAMING_SNAKE_CASE_ : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
SCREAMING_SNAKE_CASE_ : Optional[int] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase_ ) )
# The ratio of the area for circle to square is pi/4.
SCREAMING_SNAKE_CASE_ : Tuple = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Callable[[float], float] , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(lowerCamelCase_ , lowerCamelCase_ ) ) for _ in range(lowerCamelCase_ ) ) * (max_value - min_value)
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 ) -> None:
"""simple docstring"""
def identity_function(lowerCamelCase_ : float ) -> float:
return x
SCREAMING_SNAKE_CASE_ : str = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print('******************' )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> None:
"""simple docstring"""
def function_to_integrate(lowerCamelCase_ : float ) -> float:
return sqrt(4.0 - x * x )
SCREAMING_SNAKE_CASE_ : Dict = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 1 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase__ : List[str] = '''src/diffusers'''
UpperCamelCase__ : List[Any] = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
UpperCamelCase__ : Optional[Any] = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCamelCase__ : Tuple = spec.loader.load_module()
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return line.startswith(lowerCamelCase_ ) or len(lowerCamelCase_ ) <= 1 or re.search(R'^\s*\)(\s*->.*:|:)\s*$' , lowerCamelCase_ ) is not None
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = object_name.split('.' )
SCREAMING_SNAKE_CASE_ : Any = 0
# First let's find the module where our object lives.
SCREAMING_SNAKE_CASE_ : Optional[Any] = parts[i]
while i < len(lowerCamelCase_ ) and not os.path.isfile(os.path.join(lowerCamelCase_ , F'{module}.py' ) ):
i += 1
if i < len(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : str = os.path.join(lowerCamelCase_ , parts[i] )
if i >= len(lowerCamelCase_ ):
raise ValueError(F'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(lowerCamelCase_ , F'{module}.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
SCREAMING_SNAKE_CASE_ : Dict = f.readlines()
# Now let's find the class / func in the code!
SCREAMING_SNAKE_CASE_ : int = ''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowerCamelCase_ ) and re.search(RF'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowerCamelCase_ ):
raise ValueError(F' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
SCREAMING_SNAKE_CASE_ : str = line_index
while line_index < len(lowerCamelCase_ ) and _should_continue(lines[line_index] , lowerCamelCase_ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = lines[start_index:line_index]
return "".join(lowerCamelCase_ )
UpperCamelCase__ : Any = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
UpperCamelCase__ : Optional[int] = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
UpperCamelCase__ : Any = re.compile(r'''<FILL\s+[^>]*>''')
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = code.split('\n' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
while idx < len(lowerCamelCase_ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowerCamelCase_ ):
return re.search(R'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def __UpperCAmelCase ( lowerCamelCase_ : List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = len(get_indent(lowerCamelCase_ ) ) > 0
if has_indent:
SCREAMING_SNAKE_CASE_ : Tuple = F'class Bla:\n{code}'
SCREAMING_SNAKE_CASE_ : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = black.format_str(lowerCamelCase_ , mode=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = style_docstrings_in_code(lowerCamelCase_ )
return result[len('class Bla:\n' ) :] if has_indent else result
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any]=False ) -> Dict:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
SCREAMING_SNAKE_CASE_ : int = f.readlines()
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : Tuple = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = search.groups()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = find_code_in_diffusers(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = get_indent(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = line_index + 1 if indent == theoretical_indent else line_index + 2
SCREAMING_SNAKE_CASE_ : Optional[int] = theoretical_indent
SCREAMING_SNAKE_CASE_ : List[Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
SCREAMING_SNAKE_CASE_ : List[str] = True
while line_index < len(lowerCamelCase_ ) and should_continue:
line_index += 1
if line_index >= len(lowerCamelCase_ ):
break
SCREAMING_SNAKE_CASE_ : List[str] = lines[line_index]
SCREAMING_SNAKE_CASE_ : List[Any] = _should_continue(lowerCamelCase_ , lowerCamelCase_ ) and re.search(F'^{indent}# End copy' , lowerCamelCase_ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
SCREAMING_SNAKE_CASE_ : str = lines[start_index:line_index]
SCREAMING_SNAKE_CASE_ : Optional[Any] = ''.join(lowerCamelCase_ )
# Remove any nested `Copied from` comments to avoid circular copies
SCREAMING_SNAKE_CASE_ : Tuple = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(lowerCamelCase_ ) is None]
SCREAMING_SNAKE_CASE_ : str = '\n'.join(lowerCamelCase_ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowerCamelCase_ ) > 0:
SCREAMING_SNAKE_CASE_ : Optional[Any] = replace_pattern.replace('with' , '' ).split(',' )
SCREAMING_SNAKE_CASE_ : str = [_re_replace_pattern.search(lowerCamelCase_ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = pattern.groups()
SCREAMING_SNAKE_CASE_ : Tuple = re.sub(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if option.strip() == "all-casing":
SCREAMING_SNAKE_CASE_ : List[str] = re.sub(obja.lower() , obja.lower() , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = re.sub(obja.upper() , obja.upper() , lowerCamelCase_ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
SCREAMING_SNAKE_CASE_ : Any = blackify(lines[start_index - 1] + theoretical_code )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
SCREAMING_SNAKE_CASE_ : Any = lines[:start_index] + [theoretical_code] + lines[line_index:]
SCREAMING_SNAKE_CASE_ : Dict = start_index + 1
if overwrite and len(lowerCamelCase_ ) > 0:
# Warn the user a file has been modified.
print(F'Detected changes, rewriting {filename}.' )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lowerCamelCase_ )
return diffs
def __UpperCAmelCase ( lowerCamelCase_ : bool = False ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = glob.glob(os.path.join(lowerCamelCase_ , '**/*.py' ) , recursive=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Dict = []
for filename in all_files:
SCREAMING_SNAKE_CASE_ : str = is_copy_consistent(lowerCamelCase_ , lowerCamelCase_ )
diffs += [F'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(lowerCamelCase_ ) > 0:
SCREAMING_SNAKE_CASE_ : List[Any] = '\n'.join(lowerCamelCase_ )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
UpperCamelCase__ : List[str] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 685 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self ,snake_case__ ,snake_case__=7 ,snake_case__=3 ,snake_case__=18 ,snake_case__=30 ,snake_case__=400 ,snake_case__=True ,snake_case__=None ,snake_case__=True ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : List[str] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = num_channels
SCREAMING_SNAKE_CASE_ : Dict = image_size
SCREAMING_SNAKE_CASE_ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE_ : int = max_resolution
SCREAMING_SNAKE_CASE_ : Dict = do_resize
SCREAMING_SNAKE_CASE_ : Dict = size
SCREAMING_SNAKE_CASE_ : str = apply_ocr
def snake_case ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Dict = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = LayoutLMvaImageProcessingTester(self )
@property
def snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ ,'do_resize' ) )
self.assertTrue(hasattr(snake_case__ ,'size' ) )
self.assertTrue(hasattr(snake_case__ ,'apply_ocr' ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def snake_case ( self ):
pass
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
self.assertIsInstance(encoding.words ,snake_case__ )
self.assertIsInstance(encoding.boxes ,snake_case__ )
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Tuple = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# with apply_OCR = True
SCREAMING_SNAKE_CASE_ : Tuple = LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_dataset('hf-internal-testing/fixtures_docvqa' ,split='test' )
SCREAMING_SNAKE_CASE_ : str = Image.open(ds[0]['file'] ).convert('RGB' )
SCREAMING_SNAKE_CASE_ : Any = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE_ : Any = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
SCREAMING_SNAKE_CASE_ : Any = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,snake_case__ )
self.assertListEqual(encoding.boxes ,snake_case__ )
# with apply_OCR = False
SCREAMING_SNAKE_CASE_ : Optional[int] = LayoutLMvaImageProcessor(apply_ocr=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
| 685 | 1 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase__ : Any = '''true'''
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any]=82 , lowerCamelCase_ : List[Any]=16 ) -> Optional[Any]:
"""simple docstring"""
set_seed(42 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = RegressionModel()
SCREAMING_SNAKE_CASE_ : str = deepcopy(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = RegressionDataset(length=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ )
model.to(accelerator.device )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = accelerator.prepare(lowerCamelCase_ , lowerCamelCase_ )
return model, ddp_model, dataloader
def __UpperCAmelCase ( lowerCamelCase_ : Accelerator , lowerCamelCase_ : Any=False ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
SCREAMING_SNAKE_CASE_ : Tuple = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(lowerCamelCase_ : Optional[Any] ):
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ )
return outputs
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_ : Dict = dataset.map(
lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCamelCase_ : int ):
if use_longest:
return tokenizer.pad(lowerCamelCase_ , padding='longest' , return_tensors='pt' )
return tokenizer.pad(lowerCamelCase_ , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return DataLoader(lowerCamelCase_ , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=16 )
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = Accelerator(dispatch_batches=lowerCamelCase_ , split_batches=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = get_dataloader(lowerCamelCase_ , not dispatch_batches )
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = accelerator.prepare(lowerCamelCase_ , lowerCamelCase_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for batch in dataloader:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = batch.values()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = [], []
for logit, targ in logits_and_targets:
logits.append(lowerCamelCase_ )
targs.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = torch.cat(lowerCamelCase_ ), torch.cat(lowerCamelCase_ )
return logits, targs
def __UpperCAmelCase ( lowerCamelCase_ : Accelerator , lowerCamelCase_ : List[str]=82 , lowerCamelCase_ : Tuple=False , lowerCamelCase_ : Tuple=False , lowerCamelCase_ : int=16 ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = get_basic_setup(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = generate_predictions(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
assert (
len(lowerCamelCase_ ) == num_samples
), F'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCamelCase_ )}'
def __UpperCAmelCase ( lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = evaluate.load('glue' , 'mrpc' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = get_mrpc_setup(lowerCamelCase_ , lowerCamelCase_ )
# First do baseline
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = setup['no']
model.to(lowerCamelCase_ )
model.eval()
for batch in dataloader:
batch.to(lowerCamelCase_ )
with torch.inference_mode():
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowerCamelCase_ , references=batch['labels'] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = metric.compute()
# Then do distributed
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ : str = batch['labels']
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowerCamelCase_ , references=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = Accelerator(split_batches=lowerCamelCase_ , dispatch_batches=lowerCamelCase_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(lowerCamelCase_ , lowerCamelCase_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Accelerator(split_batches=lowerCamelCase_ , dispatch_batches=lowerCamelCase_ )
if accelerator.is_local_main_process:
print(F'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(lowerCamelCase_ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
SCREAMING_SNAKE_CASE_ : Optional[int] = Accelerator()
test_torch_metrics(lowerCamelCase_ , 5_12 )
accelerator.state._reset_state()
def __UpperCAmelCase ( lowerCamelCase_ : Dict ) -> List[str]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 685 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ : str = logging.getLogger(__name__)
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : str
__a : str
__a : Optional[str] = None
__a : Optional[str] = None
__a : Optional[str] = None
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : List[int]
__a : Optional[List[int]] = None
__a : Optional[List[int]] = None
__a : Optional[Union[int, float]] = None
__a : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = None ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(
snake_case__ ,'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' ,tokenizer.__class__.__name__ ,str(snake_case__ ) ,snake_case__ ,) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE_ : Dict = cached_features_file + '.lock'
with FileLock(snake_case__ ):
if os.path.exists(snake_case__ ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.load(snake_case__ )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
SCREAMING_SNAKE_CASE_ : List[Any] = (
processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
)
logger.info('Training examples: %s' ,len(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[str] = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
logger.info('Saving features into cached file %s' ,snake_case__ )
torch.save(self.features ,snake_case__ )
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ :
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = 128 ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[int] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list
SCREAMING_SNAKE_CASE_ : int = processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(snake_case__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
SCREAMING_SNAKE_CASE_ : List[Any] = tf.data.Dataset.from_generator(
snake_case__ ,(
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) ,(
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case ( self ):
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
class lowerCAmelCase_ ( lowerCamelCase_ ):
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_train_set.txt' ) ) ,'train' )
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_evaluation_set.txt' ) ) ,'dev' )
def snake_case ( self ):
return ["contradiction", "entailment", "neutral"]
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i, line in enumerate(snake_case__ ):
if i == 0:
continue
SCREAMING_SNAKE_CASE_ : List[str] = '%s-%s' % (set_type, line[0])
SCREAMING_SNAKE_CASE_ : Dict = line[5]
SCREAMING_SNAKE_CASE_ : Dict = line[6]
SCREAMING_SNAKE_CASE_ : Tuple = line[7][2:] if line[7].startswith('ex' ) else line[7]
SCREAMING_SNAKE_CASE_ : Optional[int] = line[0]
examples.append(InputExample(guid=snake_case__ ,text_a=snake_case__ ,text_b=snake_case__ ,label=snake_case__ ,pairID=snake_case__ ) )
return examples
def __UpperCAmelCase ( lowerCamelCase_ : List[InputExample] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : PreTrainedTokenizer , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase_ )}
SCREAMING_SNAKE_CASE_ : Dict = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
SCREAMING_SNAKE_CASE_ : Any = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='max_length' , truncation=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : List[Any] = label_map[example.label] if example.label in label_map else 0
SCREAMING_SNAKE_CASE_ : List[str] = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase_ , label=lowerCamelCase_ , pairID=lowerCamelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F'guid: {example}' )
logger.info(F'features: {features[i]}' )
return features
UpperCamelCase__ : str = {
'''hans''': 3,
}
UpperCamelCase__ : Dict = {
'''hans''': HansProcessor,
}
| 685 | 1 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
SCREAMING_SNAKE_CASE_ : List[str] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert('RGB' )
return image
def __UpperCAmelCase ( lowerCamelCase_ : List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.weight', F'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.bias', F'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.weight', F'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.bias', F'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.qkv.weight', F'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.weight', F'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.bias', F'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.weight', F'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.bias', F'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.weight', F'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.bias', F'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def __UpperCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = dct.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = val
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
SCREAMING_SNAKE_CASE_ : List[Any] = state_dict.pop(F'visual_encoder.blocks.{i}.attn.q_bias' )
SCREAMING_SNAKE_CASE_ : int = state_dict.pop(F'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat((q_bias, torch.zeros_like(lowerCamelCase_ , requires_grad=lowerCamelCase_ ), v_bias) )
SCREAMING_SNAKE_CASE_ : Optional[int] = qkv_bias
def __UpperCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 3_64 if 'coco' in model_name else 2_24
SCREAMING_SNAKE_CASE_ : Tuple = BlipaVisionConfig(image_size=lowerCamelCase_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
SCREAMING_SNAKE_CASE_ : Any = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=lowerCamelCase_ ).to_dict()
elif "opt-6.7b" in model_name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=lowerCamelCase_ ).to_dict()
elif "t5-xl" in model_name:
SCREAMING_SNAKE_CASE_ : Tuple = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
SCREAMING_SNAKE_CASE_ : Tuple = BlipaConfig(vision_config=lowerCamelCase_ , text_config=lowerCamelCase_ )
return config, image_size
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any=None , lowerCamelCase_ : int=False ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
SCREAMING_SNAKE_CASE_ : int = tokenizer('\n' , add_special_tokens=lowerCamelCase_ ).input_ids[0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = get_blipa_config(lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = BlipaForConditionalGeneration(lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
SCREAMING_SNAKE_CASE_ : Tuple = 'cuda' if torch.cuda.is_available() else 'cpu'
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = load_model_and_preprocess(
name=lowerCamelCase_ , model_type=lowerCamelCase_ , is_eval=lowerCamelCase_ , device=lowerCamelCase_ )
original_model.eval()
print('Done!' )
# update state dict keys
SCREAMING_SNAKE_CASE_ : Tuple = original_model.state_dict()
SCREAMING_SNAKE_CASE_ : List[str] = create_rename_keys(lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
SCREAMING_SNAKE_CASE_ : Optional[Any] = state_dict.pop(lowerCamelCase_ )
if key.startswith('Qformer.bert' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
SCREAMING_SNAKE_CASE_ : List[str] = key.replace('self' , 'attention' )
if "opt_proj" in key:
SCREAMING_SNAKE_CASE_ : Optional[int] = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
SCREAMING_SNAKE_CASE_ : List[str] = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = key.replace('opt' , 'language' )
if key.startswith('t5' ):
SCREAMING_SNAKE_CASE_ : List[str] = key.replace('t5' , 'language' )
SCREAMING_SNAKE_CASE_ : int = val
# read in qv biases
read_in_q_v_bias(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = hf_model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
SCREAMING_SNAKE_CASE_ : int = load_demo_image()
SCREAMING_SNAKE_CASE_ : List[Any] = vis_processors['eval'](lowerCamelCase_ ).unsqueeze(0 ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(lowerCamelCase_ )
# create processor
SCREAMING_SNAKE_CASE_ : str = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=lowerCamelCase_ , image_std=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = BlipaProcessor(image_processor=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = processor(images=lowerCamelCase_ , return_tensors='pt' ).pixel_values.to(lowerCamelCase_ )
# make sure processor creates exact same pixel values
assert torch.allclose(lowerCamelCase_ , lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
hf_model.to(lowerCamelCase_ )
with torch.no_grad():
if "opt" in model_name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
SCREAMING_SNAKE_CASE_ : Tuple = hf_model(lowerCamelCase_ , lowerCamelCase_ ).logits
else:
SCREAMING_SNAKE_CASE_ : Tuple = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
SCREAMING_SNAKE_CASE_ : str = hf_model(lowerCamelCase_ , lowerCamelCase_ , labels=lowerCamelCase_ ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=lowerCamelCase_ )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=lowerCamelCase_ )
else:
# cast to same type
SCREAMING_SNAKE_CASE_ : Any = logits.dtype
assert torch.allclose(original_logits.to(lowerCamelCase_ ) , lowerCamelCase_ , atol=1E-2 )
print('Looks ok!' )
print('Generating a caption...' )
SCREAMING_SNAKE_CASE_ : List[str] = ''
SCREAMING_SNAKE_CASE_ : Any = tokenizer(lowerCamelCase_ , return_tensors='pt' ).input_ids.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = original_model.generate({'image': original_pixel_values} )
SCREAMING_SNAKE_CASE_ : str = hf_model.generate(
lowerCamelCase_ , lowerCamelCase_ , do_sample=lowerCamelCase_ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = input_ids.shape[1]
SCREAMING_SNAKE_CASE_ : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = [text.strip() for text in output_text]
print('HF generation:' , lowerCamelCase_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowerCamelCase_ )
hf_model.save_pretrained(lowerCamelCase_ )
if push_to_hub:
processor.push_to_hub(F'nielsr/{model_name}' )
hf_model.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCamelCase__ : List[Any] = argparse.ArgumentParser()
UpperCamelCase__ : Optional[int] = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
UpperCamelCase__ : str = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 685 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 1_00 * 2**20, 9_00 * 2**20] )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] ) -> int:
"""simple docstring"""
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE_ : str = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = is_small_dataset(lowerCamelCase_ )
assert result == expected
| 685 | 1 |
def __UpperCAmelCase ( lowerCamelCase_ : int = 1_00 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = n * (n + 1) * (2 * n + 1) / 6
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 685 |
from math import log
from scipy.constants import Boltzmann, physical_constants
UpperCamelCase__ : Any = 3_00 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 1 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
UpperCamelCase__ : str = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,*snake_case__ ,**snake_case__ ):
warnings.warn(
'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DonutImageProcessor instead.' ,snake_case__ ,)
super().__init__(*snake_case__ ,**snake_case__ )
| 685 |
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[],
[],
[],
]
def snake_case ( self ,snake_case__ ,snake_case__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(snake_case__ )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def snake_case ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self ):
return "\n".join(F'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : List[str] = []
def snake_case ( self ,snake_case__ ):
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(snake_case__ )
def snake_case ( self ):
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = min(self.queue )
self.queue.remove(snake_case__ )
return data
def __str__( self ):
return str(self.queue )
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[int] = "visual_bert"
def __init__( self ,snake_case__=30522 ,snake_case__=768 ,snake_case__=512 ,snake_case__=12 ,snake_case__=12 ,snake_case__=3072 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=False ,snake_case__=True ,snake_case__=1 ,snake_case__=0 ,snake_case__=2 ,**snake_case__ ,):
super().__init__(pad_token_id=snake_case__ ,bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = visual_embedding_dim
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : int = bypass_transformer
SCREAMING_SNAKE_CASE_ : Optional[Any] = special_visual_initialize
| 685 |
def __UpperCAmelCase ( lowerCamelCase_ : int = 10_00 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , lowerCamelCase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 685 | 1 |
from __future__ import annotations
from collections.abc import Generator
def __UpperCAmelCase ( ) -> Generator[int, None, None]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : dict[int, int] = {}
SCREAMING_SNAKE_CASE_ : Any = 2
while True:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = factor_map.pop(lowerCamelCase_ , lowerCamelCase_ )
if factor:
SCREAMING_SNAKE_CASE_ : Tuple = factor + prime
while x in factor_map:
x += factor
SCREAMING_SNAKE_CASE_ : List[str] = factor
else:
SCREAMING_SNAKE_CASE_ : List[str] = prime
yield prime
prime += 1
def __UpperCAmelCase ( lowerCamelCase_ : float = 1E10 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = sieve()
SCREAMING_SNAKE_CASE_ : List[Any] = 1
while True:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = next(lowerCamelCase_ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowerCamelCase_ )
n += 2
if __name__ == "__main__":
print(solution())
| 685 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Tuple = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Any = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Dict = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
| 685 | 1 |
from __future__ import annotations
from collections import namedtuple
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ) -> tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = namedtuple('result' , 'name value' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('Only one argument must be 0' )
elif power < 0:
raise ValueError(
'Power cannot be negative in any electrical/electronics system' )
elif voltage == 0:
return result('voltage' , power / current )
elif current == 0:
return result('current' , power / voltage )
elif power == 0:
return result('power' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCamelCase__ : Union[str, Any] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Any = []
UpperCamelCase__ : Optional[Any] = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
UpperCamelCase__ : Any = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'''emoji''': True,
},
}
]
UpperCamelCase__ : Union[str, Any] = 0
for log in Path().glob('''*.log'''):
UpperCamelCase__ : Optional[int] = 0
with open(log, '''r''') as f:
for line in f:
UpperCamelCase__ : Any = json.loads(line)
if line.get('''nodeid''', '''''') != "":
UpperCamelCase__ : Tuple = line['''nodeid''']
if line.get('''duration''', None) is not None:
UpperCamelCase__ : List[Any] = F"""{line["duration"]:.4f}"""
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCamelCase__ : Tuple = []
log.unlink()
UpperCamelCase__ : List[Any] = ''''''
UpperCamelCase__ : List[str] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : Optional[int] = {}
for test in failed_tests:
UpperCamelCase__ : str = test[0].split('''::''')
UpperCamelCase__ : List[Any] = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
UpperCamelCase__ : int = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCamelCase__ : str = [test[0] for test in failed_table]
UpperCamelCase__ : Union[str, Any] = list(set(files))
# Count number of instances in failed_tests
UpperCamelCase__ : Dict = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCamelCase__ : str = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 30_00:
UpperCamelCase__ : List[Any] = '''Too many failed tests, please see the full report in the Action results.'''
UpperCamelCase__ : Optional[Any] = len(err) + 10
UpperCamelCase__ : List[str] = message[: 30_00 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
UpperCamelCase__ : Optional[Any] = '''No failed tests! 🤗'''
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
UpperCamelCase__ : int = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
UpperCamelCase__ : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
UpperCamelCase__ : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCamelCase__ : Optional[Any] = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCamelCase__ : Tuple = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
UpperCamelCase__ : Any = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCamelCase__ : int = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCamelCase__ : str = row[0]
else:
UpperCamelCase__ : str = ''''''
UpperCamelCase__ : Optional[Any] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 685 | 1 |
import cmath
import math
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ) -> complex:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = math.radians(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = math.radians(lowerCamelCase_ )
# Convert voltage and current to rectangular form
SCREAMING_SNAKE_CASE_ : List[str] = cmath.rect(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = cmath.rect(lowerCamelCase_ , lowerCamelCase_ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE_ : Tuple = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 1 |
from math import factorial
UpperCamelCase__ : Optional[Any] = {str(d): factorial(d) for d in range(10)}
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
return sum(DIGIT_FACTORIAL[d] for d in str(lowerCamelCase_ ) )
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , lowerCamelCase_ ) if sum_of_digit_factorial(lowerCamelCase_ ) == i )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 685 |
import qiskit
def __UpperCAmelCase ( lowerCamelCase_ : int = 2 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = qubits
# Using Aer's simulator
SCREAMING_SNAKE_CASE_ : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE_ : str = qiskit.QuantumCircuit(lowerCamelCase_ , lowerCamelCase_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , lowerCamelCase_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , lowerCamelCase_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(lowerCamelCase_ ) ) , list(range(lowerCamelCase_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
SCREAMING_SNAKE_CASE_ : Tuple = qiskit.execute(lowerCamelCase_ , lowerCamelCase_ , shots=10_00 )
return job.result().get_counts(lowerCamelCase_ )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 685 | 1 |
from __future__ import annotations
UpperCamelCase__ : List[Any] = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = graph
# mapping node to its parent in resulting breadth first tree
SCREAMING_SNAKE_CASE_ : dict[str, str | None] = {}
SCREAMING_SNAKE_CASE_ : Optional[Any] = source_vertex
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = {self.source_vertex}
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : int = [self.source_vertex] # first in first out queue
while queue:
SCREAMING_SNAKE_CASE_ : Dict = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = vertex
queue.append(snake_case__ )
def snake_case ( self ,snake_case__ ):
if target_vertex == self.source_vertex:
return self.source_vertex
SCREAMING_SNAKE_CASE_ : int = self.parent.get(snake_case__ )
if target_vertex_parent is None:
SCREAMING_SNAKE_CASE_ : List[str] = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(snake_case__ )
return self.shortest_path(snake_case__ ) + F'->{target_vertex}'
if __name__ == "__main__":
UpperCamelCase__ : Tuple = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 685 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> bool:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('check_bouncy() accepts only integer arguments' )
SCREAMING_SNAKE_CASE_ : Optional[int] = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = ''.join(sorted(lowerCamelCase_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __UpperCAmelCase ( lowerCamelCase_ : float = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 1_00:
raise ValueError('solution() only accepts values from 0 to 100' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Dict = 1
while True:
if check_bouncy(lowerCamelCase_ ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 685 | 1 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCamelCase__ : int = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __UpperCAmelCase ( lowerCamelCase_ : str ) -> Optional[Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE_ : Tuple = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(lowerCamelCase_ , id=lowerCamelCase_ )
| 685 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Dict = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = ['''ChineseCLIPFeatureExtractor''']
UpperCamelCase__ : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 685 | 1 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
UpperCamelCase__ : Dict = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,*snake_case__ ,**snake_case__ ):
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' ,snake_case__ ,)
super().__init__(*snake_case__ ,**snake_case__ )
| 685 |
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> Tuple:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
else:
return a * actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(lowerCamelCase_ , lowerCamelCase_ )
return actual_power(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 685 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase__ : Tuple = logging.get_logger(__name__)
UpperCamelCase__ : Dict = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : str = "trajectory_transformer"
__a : Optional[int] = ["past_key_values"]
__a : int = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self ,snake_case__=100 ,snake_case__=5 ,snake_case__=1 ,snake_case__=1 ,snake_case__=249 ,snake_case__=6 ,snake_case__=17 ,snake_case__=25 ,snake_case__=4 ,snake_case__=4 ,snake_case__=128 ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=0.0006 ,snake_case__=512 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=1 ,snake_case__=True ,snake_case__=1 ,snake_case__=50256 ,snake_case__=50256 ,**snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = action_weight
SCREAMING_SNAKE_CASE_ : Dict = reward_weight
SCREAMING_SNAKE_CASE_ : str = value_weight
SCREAMING_SNAKE_CASE_ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[str] = block_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = action_dim
SCREAMING_SNAKE_CASE_ : Optional[Any] = observation_dim
SCREAMING_SNAKE_CASE_ : Tuple = transition_dim
SCREAMING_SNAKE_CASE_ : Optional[int] = learning_rate
SCREAMING_SNAKE_CASE_ : List[Any] = n_layer
SCREAMING_SNAKE_CASE_ : Union[str, Any] = n_head
SCREAMING_SNAKE_CASE_ : Any = n_embd
SCREAMING_SNAKE_CASE_ : List[str] = embd_pdrop
SCREAMING_SNAKE_CASE_ : List[Any] = attn_pdrop
SCREAMING_SNAKE_CASE_ : Union[str, Any] = resid_pdrop
SCREAMING_SNAKE_CASE_ : str = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Union[str, Any] = kaiming_initializer_range
SCREAMING_SNAKE_CASE_ : int = use_cache
super().__init__(pad_token_id=snake_case__ ,bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,**snake_case__ )
| 685 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=7 ,snake_case__=True ,snake_case__=True ,snake_case__=False ,snake_case__=True ,snake_case__=99 ,snake_case__=32 ,snake_case__=5 ,snake_case__=4 ,snake_case__=37 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=16 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=3 ,snake_case__=4 ,snake_case__=None ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = seq_length
SCREAMING_SNAKE_CASE_ : Tuple = is_training
SCREAMING_SNAKE_CASE_ : List[str] = use_input_mask
SCREAMING_SNAKE_CASE_ : List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : int = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[str] = num_choices
SCREAMING_SNAKE_CASE_ : Tuple = scope
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=snake_case__ ,initializer_range=self.initializer_range ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(snake_case__ ,attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE_ : List[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,use_cache=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : str = ids_tensor((self.batch_size, 3) ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_ids, next_tokens] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_mask, next_mask] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Dict = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,past_key_values=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__a : int = (LlamaForCausalLM,) if is_torch_available() else ()
__a : Any = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Tuple = False
__a : Tuple = False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self ,config_class=snake_case__ ,hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : Optional[int] = type
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Dict = 3
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : str = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Any = 3
SCREAMING_SNAKE_CASE_ : int = 'single_label_classification'
SCREAMING_SNAKE_CASE_ : str = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Dict = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Tuple = 3
SCREAMING_SNAKE_CASE_ : str = 'multi_label_classification'
SCREAMING_SNAKE_CASE_ : int = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Tuple = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def snake_case ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([1, 10] ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE_ : int = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : List[Any] = original_model(snake_case__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : List[Any] = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE_ : int = LlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE_ : str = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : Optional[int] = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : List[str] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Dict = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : str = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : int = model(torch.tensor(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
SCREAMING_SNAKE_CASE_ : List[str] = 'Simply put, the theory of relativity states that '
SCREAMING_SNAKE_CASE_ : str = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.encode(snake_case__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Tuple = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' ,device_map='sequential' ,use_safetensors=snake_case__ )
# greedy generation outputs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.generate(snake_case__ ,max_new_tokens=64 ,top_p=snake_case__ ,temperature=1 ,do_sample=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.decode(generated_ids[0] ,skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ ,snake_case__ )
| 685 | 1 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
UpperCamelCase__ : List[Any] = float('''nan''')
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = sys.stdout
SCREAMING_SNAKE_CASE_ : Optional[Any] = open(snake_case__ ,'a' )
def __getattr__( self ,snake_case__ ):
return getattr(self.stdout ,snake_case__ )
def snake_case ( self ,snake_case__ ):
self.stdout.write(snake_case__ )
# strip tqdm codes
self.file.write(re.sub(R'^.*\r' ,'' ,snake_case__ ,0 ,re.M ) )
def __UpperCAmelCase ( lowerCamelCase_ : str=80 , lowerCamelCase_ : List[str]=False ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = []
# deal with critical env vars
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['CUDA_VISIBLE_DEVICES']
for key in env_keys:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.environ.get(lowerCamelCase_ , lowerCamelCase_ )
if val is not None:
cmd.append(F'{key}={val}' )
# python executable (not always needed if the script is executable)
SCREAMING_SNAKE_CASE_ : int = sys.executable if full_python_path else sys.executable.split('/' )[-1]
cmd.append(lowerCamelCase_ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Tuple = ''
while len(lowerCamelCase_ ) > 0:
current_line += F'{cmd.pop(0 )} '
if len(lowerCamelCase_ ) == 0 or len(lowerCamelCase_ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = ''
return "\\\n".join(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = re.sub(R'[\\\n]+' , ' ' , args.base_cmd )
# remove --output_dir if any and set our own
SCREAMING_SNAKE_CASE_ : Dict = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd )
args.base_cmd += F' --output_dir {output_dir}'
# ensure we have --overwrite_output_dir
SCREAMING_SNAKE_CASE_ : str = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any ) -> Optional[int]:
"""simple docstring"""
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 1_0.3_1, 1_0_0.2, 5_5.6_6_6_6, 2_2_2.2_2_2_2_2_2_2_2] )} , )
SCREAMING_SNAKE_CASE_ : Tuple = subprocess.run(lowerCamelCase_ , capture_output=lowerCamelCase_ , text=lowerCamelCase_ )
if verbose:
print('STDOUT' , result.stdout )
print('STDERR' , result.stderr )
# save the streams
SCREAMING_SNAKE_CASE_ : Dict = variation.replace(' ' , '-' )
with open(Path(lowerCamelCase_ ) / F'log.{prefix}.stdout.txt' , 'w' ) as f:
f.write(result.stdout )
with open(Path(lowerCamelCase_ ) / F'log.{prefix}.stderr.txt' , 'w' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('failed' )
return {target_metric_key: nan}
with io.open(F'{output_dir}/all_results.json' , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.load(lowerCamelCase_ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any , lowerCamelCase_ : Dict , lowerCamelCase_ : int , lowerCamelCase_ : str , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = F'{id}: {variation:<{longest_variation_len}}'
SCREAMING_SNAKE_CASE_ : Optional[int] = F'{preamble}: '
SCREAMING_SNAKE_CASE_ : Union[str, Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(lowerCamelCase_ ) , desc=lowerCamelCase_ , leave=lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : int = process_run_single(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = single_run_metrics[target_metric_key]
if not math.isnan(lowerCamelCase_ ):
metrics.append(lowerCamelCase_ )
results.append(lowerCamelCase_ )
outcome += "✓"
else:
outcome += "✘"
SCREAMING_SNAKE_CASE_ : Tuple = F'\33[2K\r{outcome}'
if len(lowerCamelCase_ ) > 0:
SCREAMING_SNAKE_CASE_ : Tuple = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
SCREAMING_SNAKE_CASE_ : List[Any] = round(mean_metrics[target_metric_key] , 2 )
SCREAMING_SNAKE_CASE_ : List[str] = F'{outcome} {mean_target}'
if len(lowerCamelCase_ ) > 1:
results_str += F' {tuple(round(lowerCamelCase_ , 2 ) for x in results )}'
print(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = variation
return mean_metrics
else:
print(lowerCamelCase_ )
return {variation_key: variation, target_metric_key: nan}
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = torch.cuda.get_device_properties(torch.device('cuda' ) )
return F'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n'
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = pd.DataFrame(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'variation'
SCREAMING_SNAKE_CASE_ : List[str] = 'diff_%'
SCREAMING_SNAKE_CASE_ : int = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
SCREAMING_SNAKE_CASE_ : Dict = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(lowerCamelCase_ ):
# as a fallback, use the minimal value as the sentinel
SCREAMING_SNAKE_CASE_ : Tuple = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = df.apply(
lambda lowerCamelCase_ : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='columns' , )
# re-order columns
SCREAMING_SNAKE_CASE_ : str = [variation_key, target_metric_key, diff_key, *report_metric_keys]
SCREAMING_SNAKE_CASE_ : List[str] = df.reindex(lowerCamelCase_ , axis='columns' ) # reorder cols
# capitalize
SCREAMING_SNAKE_CASE_ : Optional[Any] = df.rename(str.capitalize , axis='columns' )
# make the cols as narrow as possible
SCREAMING_SNAKE_CASE_ : Any = df.rename(lambda lowerCamelCase_ : c.replace('_' , '<br>' ) , axis='columns' )
SCREAMING_SNAKE_CASE_ : List[str] = df.rename(lambda lowerCamelCase_ : c.replace('_' , '\n' ) , axis='columns' )
SCREAMING_SNAKE_CASE_ : List[str] = ['', 'Copy between the cut-here-lines and paste as is to github or a forum']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=lowerCamelCase_ , floatfmt='.2f' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=lowerCamelCase_ , floatfmt='.2f' )]
print('\n\n'.join(lowerCamelCase_ ) )
def __UpperCAmelCase ( ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = argparse.ArgumentParser()
parser.add_argument(
'--base-cmd' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Base cmd' , )
parser.add_argument(
'--variations' , default=lowerCamelCase_ , type=lowerCamelCase_ , nargs='+' , required=lowerCamelCase_ , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , )
parser.add_argument(
'--base-variation' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , )
parser.add_argument(
'--target-metric-key' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , )
parser.add_argument(
'--report-metric-keys' , default='' , type=lowerCamelCase_ , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , )
parser.add_argument(
'--repeat-times' , default=1 , type=lowerCamelCase_ , help='How many times to re-run each variation - an average will be reported' , )
parser.add_argument(
'--output_dir' , default='output_benchmark' , type=lowerCamelCase_ , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , )
parser.add_argument(
'--verbose' , default=lowerCamelCase_ , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE_ : Optional[Any] = args.output_dir
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = get_base_command(lowerCamelCase_ , lowerCamelCase_ )
# split each dimension into its --foo variations
SCREAMING_SNAKE_CASE_ : str = [list(map(str.strip , re.split(R'\|' , lowerCamelCase_ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
SCREAMING_SNAKE_CASE_ : int = list(map(str.strip , map(' '.join , itertools.product(*lowerCamelCase_ ) ) ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = max(len(lowerCamelCase_ ) for x in variations )
# split wanted keys
SCREAMING_SNAKE_CASE_ : Union[str, Any] = args.report_metric_keys.split()
# capture prints into a log file for convenience
SCREAMING_SNAKE_CASE_ : Optional[Any] = F'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'
print(F'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' )
print(F'and this script\'s output is also piped into {report_fn}' )
SCREAMING_SNAKE_CASE_ : List[str] = Tee(lowerCamelCase_ )
print(F'\n*** Running {len(lowerCamelCase_ )} benchmarks:' )
print(F'Base command: {" ".join(lowerCamelCase_ )}' )
SCREAMING_SNAKE_CASE_ : List[Any] = 'variation'
SCREAMING_SNAKE_CASE_ : Any = []
for id, variation in enumerate(tqdm(lowerCamelCase_ , desc='Total completion: ' , leave=lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE_ : Optional[int] = base_cmd + variation.split()
results.append(
process_run(
id + 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , args.target_metric_key , lowerCamelCase_ , args.repeat_times , lowerCamelCase_ , args.verbose , ) )
process_results(lowerCamelCase_ , args.target_metric_key , lowerCamelCase_ , args.base_variation , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 685 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
UpperCamelCase__ : int = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
UpperCamelCase__ : str = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : List[Any] = collections.OrderedDict()
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Any = f.readlines()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = b
SCREAMING_SNAKE_CASE_ : Dict = idx
for wd in b:
SCREAMING_SNAKE_CASE_ : Any = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : List[str] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__="<|endoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__="<|startoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__=False ,**snake_case__ ,):
super().__init__(
unk_token=snake_case__ ,pad_token=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,do_clean_text=snake_case__ ,**snake_case__ ,)
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
SCREAMING_SNAKE_CASE_ : str = do_clean_text
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = load_vocab_and_emoji(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def snake_case ( self ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def snake_case ( self ):
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.tokenize(snake_case__ ,clean=self.do_clean_text )
def snake_case ( self ,snake_case__ ):
return self.vocab.get(snake_case__ ,self.vocab.get(self.unk_token ) )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.convert_id_to_token(snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = ''.join(snake_case__ ).strip()
return out_string
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case__ ,add_special_tokens=snake_case__ ) + [self.eos_token_id] )
if len(snake_case__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE_ : List[Any] = input_ids[-self.model_max_length :]
return input_ids
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
if os.path.isdir(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
SCREAMING_SNAKE_CASE_ : Tuple = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
SCREAMING_SNAKE_CASE_ : str = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
SCREAMING_SNAKE_CASE_ : Dict = token_index
writer.write(','.join(snake_case__ ) + '\n' )
index += 1
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
json.dump(self.emoji ,snake_case__ )
return vocab_file, emoji_file
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = vocab # same as swe
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_to_tokens # same as bpe
SCREAMING_SNAKE_CASE_ : Dict = emoji
SCREAMING_SNAKE_CASE_ : int = np.max([len(snake_case__ ) for w in self.vocab.keys()] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
SCREAMING_SNAKE_CASE_ : str = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
SCREAMING_SNAKE_CASE_ : int = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
SCREAMING_SNAKE_CASE_ : Tuple = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ):
return len(self.ids_to_tokens )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<URL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = self.content_repattera.sub('<EMAIL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<TEL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<PRICE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = content.replace('<BLOCK><BLOCK>' ,'<BLOCK>' )
return content
def snake_case ( self ,snake_case__ ,snake_case__=False ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('\r\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\r' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : List[str] = text.replace('\t' ,'<TAB>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('—' ,'ー' )
SCREAMING_SNAKE_CASE_ : Optional[int] = text.replace('−' ,'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
SCREAMING_SNAKE_CASE_ : int = text.replace(snake_case__ ,snake_case__ )
if clean:
SCREAMING_SNAKE_CASE_ : str = self.clean_text(snake_case__ )
def check_simbol(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 2:
SCREAMING_SNAKE_CASE_ : str = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2A1 and c <= 0XC2BF)
or (c >= 0XC780 and c <= 0XC783)
or (c >= 0XCAB9 and c <= 0XCBBF)
or (c >= 0XCC80 and c <= 0XCDA2)
):
return True
return False
def checkuae(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 3:
SCREAMING_SNAKE_CASE_ : Dict = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_8080 and c <= 0XE2_B07F:
return True
return False
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List[Any] = []
while pos < len(snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = min(len(snake_case__ ) ,pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
SCREAMING_SNAKE_CASE_ : List[Any] = [] # (token_id, token, pos)
for e in range(snake_case__ ,snake_case__ ,-1 ):
SCREAMING_SNAKE_CASE_ : str = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(snake_case__ ) > 2:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(snake_case__ ) > 0:
# the smallest token_id is adopted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = sorted(snake_case__ ,key=lambda snake_case__ : x[0] )[0]
result.append(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = e
else:
SCREAMING_SNAKE_CASE_ : Any = pos + 1
SCREAMING_SNAKE_CASE_ : Optional[int] = text[pos:end]
if check_simbol(snake_case__ ):
result.append('<KIGOU>' )
elif checkuae(snake_case__ ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
SCREAMING_SNAKE_CASE_ : int = end
return result
def snake_case ( self ,snake_case__ ,snake_case__="\n" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Dict = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : Dict = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(snake_case__ )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(snake_case__ )
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : int = ''.join(snake_case__ )
return text
| 685 | 1 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Tuple = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Any = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Dict = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
| 685 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : int=() , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Optional[int]="no" , lowerCamelCase_ : Optional[Any]="29500" ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
SCREAMING_SNAKE_CASE_ : str = True
elif "IPython" in sys.modules:
SCREAMING_SNAKE_CASE_ : Dict = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
SCREAMING_SNAKE_CASE_ : Optional[int] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , lowerCamelCase_ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 8
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='TPU' )
print(F'Launching a training on {num_processes} TPU cores.' )
xmp.spawn(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*lowerCamelCase_ )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port=lowerCamelCase_ , mixed_precision=lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='MULTI_GPU' )
print(F'Launching training on {num_processes} GPUs.' )
try:
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
SCREAMING_SNAKE_CASE_ : Optional[Any] = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple=() , lowerCamelCase_ : str=2 ) -> Union[str, Any]:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
SCREAMING_SNAKE_CASE_ : Tuple = PrepareForLaunch(lowerCamelCase_ , debug=lowerCamelCase_ )
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
| 685 | 1 |
def __UpperCAmelCase ( lowerCamelCase_ : List[str] ) -> Any:
"""simple docstring"""
stooge(lowerCamelCase_ , 0 , len(lowerCamelCase_ ) - 1 )
return arr
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict ) -> Any:
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
SCREAMING_SNAKE_CASE_ : Dict = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowerCamelCase_ , lowerCamelCase_ , (h - t) )
# Recursively sort last 2/3 elements
stooge(lowerCamelCase_ , i + t , (lowerCamelCase_) )
# Recursively sort first 2/3 elements
stooge(lowerCamelCase_ , lowerCamelCase_ , (h - t) )
if __name__ == "__main__":
UpperCamelCase__ : str = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase__ : str = [int(item) for item in user_input.split(''',''')]
print(stooge_sort(unsorted))
| 685 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCamelCase__ : Tuple = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 685 | 1 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class lowerCAmelCase_ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = 1.0 ,snake_case__ = None ,):
super().__init__()
SCREAMING_SNAKE_CASE_ : str = initial_learning_rate
SCREAMING_SNAKE_CASE_ : int = warmup_steps
SCREAMING_SNAKE_CASE_ : int = power
SCREAMING_SNAKE_CASE_ : Optional[Any] = decay_schedule_fn
SCREAMING_SNAKE_CASE_ : Union[str, Any] = name
def __call__( self ,snake_case__ ):
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
SCREAMING_SNAKE_CASE_ : Dict = tf.cast(snake_case__ ,tf.floataa )
SCREAMING_SNAKE_CASE_ : Dict = tf.cast(self.warmup_steps ,tf.floataa )
SCREAMING_SNAKE_CASE_ : Optional[int] = global_step_float / warmup_steps_float
SCREAMING_SNAKE_CASE_ : str = self.initial_learning_rate * tf.math.pow(snake_case__ ,self.power )
return tf.cond(
global_step_float < warmup_steps_float ,lambda: warmup_learning_rate ,lambda: self.decay_schedule_fn(step - self.warmup_steps ) ,name=snake_case__ ,)
def snake_case ( self ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 0.9 , lowerCamelCase_ : float = 0.9_9_9 , lowerCamelCase_ : float = 1E-8 , lowerCamelCase_ : Optional[float] = None , lowerCamelCase_ : Optional[float] = None , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 , lowerCamelCase_ : Optional[List[str]] = None , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=lowerCamelCase_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=lowerCamelCase_ , )
if num_warmup_steps:
SCREAMING_SNAKE_CASE_ : Tuple = WarmUp(
initial_learning_rate=lowerCamelCase_ , decay_schedule_fn=lowerCamelCase_ , warmup_steps=lowerCamelCase_ , )
if weight_decay_rate > 0.0:
SCREAMING_SNAKE_CASE_ : Dict = AdamWeightDecay(
learning_rate=lowerCamelCase_ , weight_decay_rate=lowerCamelCase_ , beta_a=lowerCamelCase_ , beta_a=lowerCamelCase_ , epsilon=lowerCamelCase_ , clipnorm=lowerCamelCase_ , global_clipnorm=lowerCamelCase_ , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=lowerCamelCase_ , )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.keras.optimizers.Adam(
learning_rate=lowerCamelCase_ , beta_a=lowerCamelCase_ , beta_a=lowerCamelCase_ , epsilon=lowerCamelCase_ , clipnorm=lowerCamelCase_ , global_clipnorm=lowerCamelCase_ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ = 0.001 ,snake_case__ = 0.9 ,snake_case__ = 0.999 ,snake_case__ = 1E-7 ,snake_case__ = False ,snake_case__ = 0.0 ,snake_case__ = None ,snake_case__ = None ,snake_case__ = "AdamWeightDecay" ,**snake_case__ ,):
super().__init__(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : int = weight_decay_rate
SCREAMING_SNAKE_CASE_ : Tuple = include_in_weight_decay
SCREAMING_SNAKE_CASE_ : Union[str, Any] = exclude_from_weight_decay
@classmethod
def snake_case ( cls ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = {'WarmUp': WarmUp}
return super(snake_case__ ,cls ).from_config(snake_case__ ,custom_objects=snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
super(snake_case__ ,self )._prepare_local(snake_case__ ,snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tf.constant(
self.weight_decay_rate ,name='adam_weight_decay_rate' )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] ,use_locking=self._use_locking ,)
return tf.no_op()
def snake_case ( self ,snake_case__ ,snake_case__=None ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = list(zip(*snake_case__ ) )
return super(snake_case__ ,self ).apply_gradients(zip(snake_case__ ,snake_case__ ) ,name=snake_case__ ,**snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = apply_state or {}
SCREAMING_SNAKE_CASE_ : str = apply_state.get((var_device, var_dtype) )
if coefficients is None:
SCREAMING_SNAKE_CASE_ : str = self._fallback_apply_state(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__=None ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self._get_lr(var.device ,var.dtype.base_dtype ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = self._decay_weights_op(snake_case__ ,snake_case__ ,snake_case__ )
with tf.control_dependencies([decay] ):
return super(snake_case__ ,self )._resource_apply_dense(snake_case__ ,snake_case__ ,**snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=None ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self._get_lr(var.device ,var.dtype.base_dtype ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = self._decay_weights_op(snake_case__ ,snake_case__ ,snake_case__ )
with tf.control_dependencies([decay] ):
return super(snake_case__ ,self )._resource_apply_sparse(snake_case__ ,snake_case__ ,snake_case__ ,**snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def snake_case ( self ,snake_case__ ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(snake_case__ ,snake_case__ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(snake_case__ ,snake_case__ ) is not None:
return False
return True
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ):
SCREAMING_SNAKE_CASE_ : Tuple = []
SCREAMING_SNAKE_CASE_ : List[Any] = None
@property
def snake_case ( self ):
if self._accum_steps is None:
SCREAMING_SNAKE_CASE_ : Tuple = tf.Variable(
tf.constant(0 ,dtype=tf.intaa ) ,trainable=snake_case__ ,synchronization=tf.VariableSynchronization.ON_READ ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA ,)
return self._accum_steps.value()
@property
def snake_case ( self ):
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self ,snake_case__ ):
if not self._gradients:
SCREAMING_SNAKE_CASE_ : Tuple = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(snake_case__ ) ,trainable=snake_case__ ,synchronization=tf.VariableSynchronization.ON_READ ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA ,)
if gradient is not None
else gradient
for gradient in gradients
] )
if len(snake_case__ ) != len(self._gradients ):
raise ValueError(F'Expected {len(self._gradients )} gradients, but got {len(snake_case__ )}' )
for accum_gradient, gradient in zip(self._gradients ,snake_case__ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(snake_case__ )
self._accum_steps.assign_add(1 )
def snake_case ( self ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(snake_case__ ) )
| 685 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Union[str, Any] = CLIPTokenizer
__a : List[str] = CLIPTokenizerFast
__a : List[str] = True
__a : Tuple = {}
__a : Tuple = False
def snake_case ( self ):
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dict(zip(snake_case__ ,range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
SCREAMING_SNAKE_CASE_ : Any = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Tuple = 'lower newer'
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
SCREAMING_SNAKE_CASE_ : List[Any] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,snake_case__ )
@require_ftfy
def snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
SCREAMING_SNAKE_CASE_ : Dict = 'xa\u0303y' + ' ' + 'x\xe3y'
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of space type
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of line break type
SCREAMING_SNAKE_CASE_ : Tuple = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE_ : Tuple = F'{text_of_1_token} {text_of_1_token}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : str = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(snake_case__ ) + 1, len(snake_case__ ) + 1 + len(snake_case__ )) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F' {text}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : int = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(snake_case__ ) + 1, 1 + len(snake_case__ ) + 1 + len(snake_case__ )) ,)
def snake_case ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(snake_case__ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def snake_case ( self ):
super().test_tokenization_python_rust_equals()
def snake_case ( self ):
# CLIP always lower cases letters
pass
| 685 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def __UpperCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any]=False ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'module.blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'module.blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'module.blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'module.blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'module.blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int]=False ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''
else:
SCREAMING_SNAKE_CASE_ : Tuple = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ : Tuple = state_dict.pop(F'module.blocks.{i}.attn.qkv.weight' )
SCREAMING_SNAKE_CASE_ : Optional[int] = state_dict.pop(F'module.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ : int = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE_ : Any = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_ : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_ : Dict = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE_ : Dict = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = dct.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = val
def __UpperCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ViTMSNConfig()
SCREAMING_SNAKE_CASE_ : Optional[Any] = 10_00
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'datasets/huggingface/label-files'
SCREAMING_SNAKE_CASE_ : int = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE_ : Dict = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ ) , 'r' ) )
SCREAMING_SNAKE_CASE_ : Tuple = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = idalabel
SCREAMING_SNAKE_CASE_ : Any = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : List[str] = 3_84
SCREAMING_SNAKE_CASE_ : Any = 15_36
SCREAMING_SNAKE_CASE_ : Tuple = 6
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : List[Any] = 10_24
SCREAMING_SNAKE_CASE_ : Dict = 40_96
SCREAMING_SNAKE_CASE_ : Optional[Any] = 24
SCREAMING_SNAKE_CASE_ : Any = 16
SCREAMING_SNAKE_CASE_ : int = 0.1
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : Optional[int] = 4
elif "l7" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : Tuple = 7
SCREAMING_SNAKE_CASE_ : Optional[Any] = 10_24
SCREAMING_SNAKE_CASE_ : Optional[int] = 40_96
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 24
SCREAMING_SNAKE_CASE_ : Any = 16
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0.1
SCREAMING_SNAKE_CASE_ : int = ViTMSNModel(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu' )['target_encoder']
SCREAMING_SNAKE_CASE_ : Dict = ViTImageProcessor(size=config.image_size )
remove_projection_head(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = create_rename_keys(lowerCamelCase_ , base_model=lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_ , base_model=lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE_ : List[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
SCREAMING_SNAKE_CASE_ : Any = ViTImageProcessor(
size=config.image_size , image_mean=lowerCamelCase_ , image_std=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processor(images=lowerCamelCase_ , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE_ : Any = model(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , lowerCamelCase_ , atol=1E-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase_ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
UpperCamelCase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase__ : Tuple = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 685 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
SCREAMING_SNAKE_CASE_ : int = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.parse_args()
if not hasattr(lowerCamelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE_ : Optional[Any] = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 685 | 1 |
from __future__ import annotations
from typing import Any
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = num_of_nodes
SCREAMING_SNAKE_CASE_ : list[list[int]] = []
SCREAMING_SNAKE_CASE_ : dict[int, int] = {}
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
self.m_edges.append([u_node, v_node, weight] )
def snake_case ( self ,snake_case__ ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def snake_case ( self ,snake_case__ ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.find_component(snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
if component_size[u_node] <= component_size[v_node]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(snake_case__ )
elif component_size[u_node] >= component_size[v_node]:
SCREAMING_SNAKE_CASE_ : Dict = self.find_component(snake_case__ )
component_size[u_node] += component_size[v_node]
self.set_component(snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = edge
SCREAMING_SNAKE_CASE_ : int = self.m_component[u]
SCREAMING_SNAKE_CASE_ : Tuple = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
SCREAMING_SNAKE_CASE_ : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = edge
SCREAMING_SNAKE_CASE_ : Any = self.m_component[u]
SCREAMING_SNAKE_CASE_ : List[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(snake_case__ ,snake_case__ ,snake_case__ )
print(F'Added edge [{u} - {v}]\nAdded weight: {w}\n' )
num_of_components -= 1
SCREAMING_SNAKE_CASE_ : str = [-1] * self.m_num_of_nodes
print(F'The total weight of the minimal spanning tree is: {mst_weight}' )
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
UpperCamelCase__ : Dict = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
UpperCamelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'https://pypi.org/pypi/diffusers/json'
SCREAMING_SNAKE_CASE_ : Optional[int] = json.loads(request.urlopen(lowerCamelCase_ ).read() )['releases'].keys()
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : version.Version(lowerCamelCase_ ) )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCamelCase_ )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Path(lowerCamelCase_ ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] ) -> Any:
"""simple docstring"""
init_hf_modules()
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : List[Any] = f.read()
# Imports of the form `import .xxx`
SCREAMING_SNAKE_CASE_ : Tuple = re.findall('^\s*import\s+\.(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCamelCase_ ) )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [module_file]
SCREAMING_SNAKE_CASE_ : Tuple = []
# Let's recurse through all relative imports
while not no_change:
SCREAMING_SNAKE_CASE_ : int = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ).parent
SCREAMING_SNAKE_CASE_ : int = [str(module_path / m ) for m in new_imports]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [f for f in new_import_files if f not in all_relative_imports]
SCREAMING_SNAKE_CASE_ : Any = [F'{f}.py' for f in new_import_files]
SCREAMING_SNAKE_CASE_ : Optional[int] = len(lowerCamelCase_ ) == 0
all_relative_imports.extend(lowerCamelCase_ )
return all_relative_imports
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] = f.read()
# Imports of the form `import xxx`
SCREAMING_SNAKE_CASE_ : List[str] = re.findall('^\s*import\s+(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Only keep the top-level module
SCREAMING_SNAKE_CASE_ : List[str] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(set(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : List[str] = []
for imp in imports:
try:
importlib.import_module(lowerCamelCase_ )
except ImportError:
missing_packages.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
F'{", ".join(lowerCamelCase_ )}. Run `pip install {" ".join(lowerCamelCase_ )}`' )
return get_relative_imports(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = module_path.replace(os.path.sep , '.' )
SCREAMING_SNAKE_CASE_ : Any = importlib.import_module(lowerCamelCase_ )
if class_name is None:
return find_pipeline_class(lowerCamelCase_ )
return getattr(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
SCREAMING_SNAKE_CASE_ : List[Any] = dict(inspect.getmembers(lowerCamelCase_ , inspect.isclass ) )
SCREAMING_SNAKE_CASE_ : List[str] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCamelCase_ )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.' )
SCREAMING_SNAKE_CASE_ : Any = cls
return pipeline_class
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if os.path.isfile(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = module_file_or_url
SCREAMING_SNAKE_CASE_ : Dict = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
SCREAMING_SNAKE_CASE_ : List[str] = get_diffusers_versions()
# cut ".dev0"
SCREAMING_SNAKE_CASE_ : Dict = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
SCREAMING_SNAKE_CASE_ : List[Any] = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(F'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
SCREAMING_SNAKE_CASE_ : int = F'v{revision}'
elif revision == "main":
SCREAMING_SNAKE_CASE_ : List[Any] = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
SCREAMING_SNAKE_CASE_ : Tuple = COMMUNITY_PIPELINES_URL.format(revision=lowerCamelCase_ , pipeline=lowerCamelCase_ )
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = cached_download(
lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Tuple = 'git'
SCREAMING_SNAKE_CASE_ : Dict = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE_ : List[str] = hf_hub_download(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
SCREAMING_SNAKE_CASE_ : Dict = check_imports(lowerCamelCase_ )
# Now we move the module inside our cached dynamic modules.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path(lowerCamelCase_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
for module_needed in modules_needed:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F'{module_needed}.py'
shutil.copy(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple = use_auth_token
elif use_auth_token is True:
SCREAMING_SNAKE_CASE_ : int = HfFolder.get_token()
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[Any] = model_info(lowerCamelCase_ , revision=lowerCamelCase_ , token=lowerCamelCase_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
SCREAMING_SNAKE_CASE_ : Any = submodule_path / commit_hash
SCREAMING_SNAKE_CASE_ : List[Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCamelCase_ )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCamelCase_ , F'{module_needed}.py' , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return os.path.join(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , **lowerCamelCase_ : Dict , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = get_cached_module_file(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return get_class_in_module(lowerCamelCase_ , final_module.replace('.py' , '' ) )
| 685 | 1 |
from __future__ import annotations
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float , ) -> tuple[str, float]:
"""simple docstring"""
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[int] = "visual_bert"
def __init__( self ,snake_case__=30522 ,snake_case__=768 ,snake_case__=512 ,snake_case__=12 ,snake_case__=12 ,snake_case__=3072 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=False ,snake_case__=True ,snake_case__=1 ,snake_case__=0 ,snake_case__=2 ,**snake_case__ ,):
super().__init__(pad_token_id=snake_case__ ,bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = visual_embedding_dim
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : int = bypass_transformer
SCREAMING_SNAKE_CASE_ : Optional[Any] = special_visual_initialize
| 685 | 1 |
import numpy as np
from transformers import Pipeline
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.max(lowerCamelCase_ , axis=-1 , keepdims=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowerCamelCase_ )
class lowerCAmelCase_ ( lowerCamelCase_ ):
def snake_case ( self ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
if "second_text" in kwargs:
SCREAMING_SNAKE_CASE_ : int = kwargs['second_text']
return preprocess_kwargs, {}, {}
def snake_case ( self ,snake_case__ ,snake_case__=None ):
return self.tokenizer(snake_case__ ,text_pair=snake_case__ ,return_tensors=self.framework )
def snake_case ( self ,snake_case__ ):
return self.model(**snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = model_outputs.logits[0].numpy()
SCREAMING_SNAKE_CASE_ : Dict = softmax(snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = np.argmax(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = self.model.config.idalabel[best_class]
SCREAMING_SNAKE_CASE_ : List[str] = probabilities[best_class].item()
SCREAMING_SNAKE_CASE_ : Dict = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 685 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
def is_in_circle(lowerCamelCase_ : float , lowerCamelCase_ : float ) -> bool:
SCREAMING_SNAKE_CASE_ : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
SCREAMING_SNAKE_CASE_ : Optional[int] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase_ ) )
# The ratio of the area for circle to square is pi/4.
SCREAMING_SNAKE_CASE_ : Tuple = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Callable[[float], float] , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(lowerCamelCase_ , lowerCamelCase_ ) ) for _ in range(lowerCamelCase_ ) ) * (max_value - min_value)
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 ) -> None:
"""simple docstring"""
def identity_function(lowerCamelCase_ : float ) -> float:
return x
SCREAMING_SNAKE_CASE_ : str = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print('******************' )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> None:
"""simple docstring"""
def function_to_integrate(lowerCamelCase_ : float ) -> float:
return sqrt(4.0 - x * x )
SCREAMING_SNAKE_CASE_ : Dict = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.